From aecf01f9d226045b190a44f8e2efc564737dd487 Mon Sep 17 00:00:00 2001 From: Bernard Knueven Date: Wed, 24 Apr 2024 13:48:43 -0600 Subject: [PATCH 01/31] add subgradient option to lagrangian --- mpisppy/cylinders/lagrangian_bounder.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/mpisppy/cylinders/lagrangian_bounder.py b/mpisppy/cylinders/lagrangian_bounder.py index 30ebfebd9..620c1ad93 100644 --- a/mpisppy/cylinders/lagrangian_bounder.py +++ b/mpisppy/cylinders/lagrangian_bounder.py @@ -58,6 +58,7 @@ def main(self): if hasattr(self.opt, 'rho_setter'): rho_setter = self.opt.rho_setter extensions = self.opt.extensions is not None + verbose = self.opt.options['verbose'] self.lagrangian_prep() @@ -86,3 +87,10 @@ def main(self): if extensions: self.opt.extobject.enditer_after_sync() self.dk_iter += 1 + elif self.opt.options.get("subgradient_while_waiting", True): + # compute a subgradient step + self.opt.Compute_Xbar(verbose) + self.opt.Update_W(verbose) + bound = self.lagrangian() + if bound is not None: + self.bound = bound From ea492e765bd6830f7620256c01802747c80d6bbf Mon Sep 17 00:00:00 2001 From: Bernard Knueven Date: Mon, 29 Apr 2024 15:36:30 -0600 Subject: [PATCH 02/31] add a burb in secretmenu --- doc/src/secretmenu.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/doc/src/secretmenu.rst b/doc/src/secretmenu.rst index fae91205d..fcad0e260 100644 --- a/doc/src/secretmenu.rst +++ b/doc/src/secretmenu.rst @@ -48,3 +48,11 @@ hub definition dictionary is called ``hub_dict`` you would add hub_dict["opt_kwargs"]["PHoptions"]["initial_proximal_cut_count"] = 4 before passing ``hub_dict`` to ``spin_the_wheel``. + + +subgradient_while_waiting +------------------------- + +The Lagrangian spoke has an additional argument, `subgradient_while_waiting`, +which will compute subgradient steps while it is waiting on new W's from the +hub. From 13f0b779901eb6e55c4ab4ab3507af91e07b6da5 Mon Sep 17 00:00:00 2001 From: Bernard Knueven Date: Mon, 29 Apr 2024 15:36:52 -0600 Subject: [PATCH 03/31] set default to False --- mpisppy/cylinders/lagrangian_bounder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mpisppy/cylinders/lagrangian_bounder.py b/mpisppy/cylinders/lagrangian_bounder.py index 620c1ad93..6766d95e9 100644 --- a/mpisppy/cylinders/lagrangian_bounder.py +++ b/mpisppy/cylinders/lagrangian_bounder.py @@ -87,7 +87,7 @@ def main(self): if extensions: self.opt.extobject.enditer_after_sync() self.dk_iter += 1 - elif self.opt.options.get("subgradient_while_waiting", True): + elif self.opt.options.get("subgradient_while_waiting", False): # compute a subgradient step self.opt.Compute_Xbar(verbose) self.opt.Update_W(verbose) From ea4e1e33f7defc84540ee5fb60fcc2ca89c7839d Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Wed, 8 May 2024 14:25:45 -0700 Subject: [PATCH 04/31] Inserting the work done in DLW's admm_ph into mpisppy --- .github/workflows/testadmm_ph.yml | 42 +++ doc/src/admm_ph.rst | 189 ++++++++++++ doc/src/index.rst | 1 + examples/distr/distr.py | 402 +++++++++++++++++++++++++ examples/distr/distr_admm_cylinders.py | 151 ++++++++++ examples/distr/distr_ef.py | 79 +++++ examples/distr/globalmodel.py | 134 +++++++++ mpisppy/tests/examples/distr.py | 402 +++++++++++++++++++++++++ mpisppy/tests/test_admm_ph.py | 75 +++++ mpisppy/utils/admm_ph.py | 162 ++++++++++ 10 files changed, 1637 insertions(+) create mode 100644 .github/workflows/testadmm_ph.yml create mode 100644 doc/src/admm_ph.rst create mode 100644 examples/distr/distr.py create mode 100644 examples/distr/distr_admm_cylinders.py create mode 100644 examples/distr/distr_ef.py create mode 100644 examples/distr/globalmodel.py create mode 100644 mpisppy/tests/examples/distr.py create mode 100644 mpisppy/tests/test_admm_ph.py create mode 100644 mpisppy/utils/admm_ph.py diff --git a/.github/workflows/testadmm_ph.yml b/.github/workflows/testadmm_ph.yml new file mode 100644 index 000000000..d0e148791 --- /dev/null +++ b/.github/workflows/testadmm_ph.yml @@ -0,0 +1,42 @@ +# aph (pyomo released) + +name: admm_ph tests + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +defaults: + run: + shell: bash -l {0} + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - uses: conda-incubator/setup-miniconda@v2 + with: + activate-environment: test_env + python-version: 3.9 + auto-activate-base: false + - name: Install dependencies + run: | + conda install mpi4py pandas setuptools + pip install pyomo xpress cplex + pip install numpy + + - name: setup the program + run: | + python setup.py develop + + - name: run tests + timeout-minutes: 100 + run: | + cd mpisppy/tests + # envall does nothing + python test_admm_ph.py diff --git a/doc/src/admm_ph.rst b/doc/src/admm_ph.rst new file mode 100644 index 000000000..916e8f739 --- /dev/null +++ b/doc/src/admm_ph.rst @@ -0,0 +1,189 @@ +.. _admm_ph: + +ADMM PH +======= + +**ADMM_PH** uses progressive hedging implemented in mpi-sppy +to solve a non-stochastic problem by breaking them into subproblems. + +An example of usage is given below. + +Usage +----- + +The driver (in the example ``distr_admm_cylinders.py``) calls ``admm_ph.py``, +thanks to the formated data provided by the model (in the example ``distr.py``). +The file ``admm_ph.py`` returns variable probabilities that can be used in the driver to create the PH (or APH) +object which will solve the subproblems in a parallel way, insuring that merging conditions are respected. + +Data needed in the driver ++++++++++++++++++++++++++ + +The driver file requires a function which creates the model for each scenario. + +.. py:function:: scenario_creator(scenario_name) + + Creates the model, which should include the consensus variables. + However, this function should not create any tree. + + Args: + scenario_name (str): the name of the scenario that will be created. + + Returns: + Pyomo ConcreteModel: the instantiated model + +The driver file also requires helper arguments that are used in mpi-sppy. Here is a summary: +.. TBD add hyperlink on precedent line + +* ``scenario_creator_kwargs``(dict[str]): key words arguments needed in ``scenario_creator`` + +* ``all_scenario_names`` (list of str): the subproblem names + +* A function that prints the results once the problem is solved + .. py:function:: scenario_denouement + + Args: + rank (int): rank in the cylinder + + scenario_name (str): name of the scenario + + scenario (Pyomo ConcreteModel): the instantiated model + + +.. note:: + + Subproblems will be represented in mpi-sppy by scenarios. Consensus variables ensure the problem constraints are + respected, they are represented in mpi-sppy by non-anticipative variables. + The following terms are associated: subproblems = scenarios (= regions in the example), + and nonants = consensus-vars (= dummy nodes in the example) + + +The driver also needs global information to link the subproblems. +It should be provided in ``consensus_vars``. + +``consensus_vars`` (dictionary): + * Keys are the subproblems + * Values are the list of consensus variables + +.. note:: + + Every variable in ``consensus_vars[subproblem]`` should also appear as a variable of the subproblem. + +Solving methods in config +++++++++++++++++++++++++++++++ + +In addition to the previously presented data, the driver also requires arguments to +create the PH Model and solve it. These arguments are asked to the user via config. +.. TBD add external link on precedent line + +The user must specify the solver name with ``--solver-name (str)``. + +The number of cylinders used for progressive hedging must be specified with ``-np (int)``. + +If the config argument ``run_async`` is used, the method used will be asynchronous projective hedging instead of ph. + +.. In the following line add link to config page +The other arguments are detailed in config. They include: +* ``config.popular_args()``: the popular arguments, note that ``default_rho`` is needed +* ``xhatxbar``, ``lagrangian``, ``ph_ob``, ``fwph`` each of them create a new cylinder +* ``two_sided_args`` these optionnal arguments include give stopping conditions: ``rel_gap``, ``abs_gap`` for +relative or absolute termination gap and ``max_stalled_iters`` for maximum iterations with no reduction in gap +* ``ph_args`` and ``aph_args`` which are specific arguments required when using PH or APH + +Direct solver of the extensive form ++++++++++++++++++++++++++++++++++++ +``distr_ef.py`` can be used as a verification or debugging tool for small instances. +It directly solves the extensive form using the wrapper ``scenario_creator`` from ``admm_ph``. +It has the advantage of requiring the same arguments as ``distr_admm_cylinders`` because both solve the extensive form. + +This method offers a verification for small instances, but is slower than ``admm_ph`` +as it doesn't decompose the problem. + + +.. note:: + + ``admm_ph`` doesn't collect instanciation time. + +Distribution example +-------------------- +``distr.py`` is an example of model creator in admm_ph for a (linear) inter-region minimal cost distribution problem. +``distr_admm_cylinders.py`` is the driver. + +Original data dictionnaries ++++++++++++++++++++++++++++ + +In the example the ``inter_region_dict_creator`` creates the inter-region information. + +.. autofunction:: distr.inter_region_dict_creator + +The ``region_dict_creator`` creates the information specific to a region regardless of the other regions. + +.. autofunction:: distr.region_dict_creator + +Adapting the data to create the model ++++++++++++++++++++++++++++++++++++++ + +To solve the regions independantly we must make sure that the constraints (in our example flow balance) would still be respected if the +models were merged. To impose this, dummy nodes are introduced. + +In our example a consensus variable is the slack variable on a dummy node. \\ +If ``inter-region-dict`` indicates that an arc exists from the node DC1 (distrubution center in Region 1) to DC2, then we create a dummy +node DC1DC2 stored both in Region1's local_dict["dummy node source"] and in Region2's local_dict["dummy node target"], with a slack whose +only constraint is to be lower than the flow possible from DC1 to DC2. PH then insures that the slacks are equal. + +The purpose of ``distr.dummy_nodes_generator`` is to do that. + +.. autofunction:: distr.dummy_nodes_generator + +.. note:: + + In our example we have chosen to split equally among the regions + the cost of transport from a region source to a region target equally. + + We here represent the flow problem with a directed graph. If a flow from DC2 to DC1 were to be authorized we would create + a dummy node DC2DC1. + +Once the local_dict is created, the Pyomo model can be created thanks to ``min_cost_distr_problem``. + +.. autofunction:: distr.min_cost_distr_problem + +Transforming data for the driver +++++++++++++++++++++++++++++++++ +The driver requires three essential elements: ``all_scenario_names``, ``scenario_creator`` and ``consensus_vars``. + +``all_scenario_names`` (see above) is given by ``scenario_names_creator`` + +``scenario_creator`` is created thanks to the previous functions. + +.. autofunction:: distr.scenario_creator + +The function ``consensus_vars_creator`` creates the required ``consensus_vars`` dictionary. + +.. autofunction:: distr.consensus_vars_creator + +The dictionary ``scenario_creator_kwargs`` is created with + +.. autofunction:: distr.kw_creator + +The function ``inparser_adder`` requires the user to give ``num_scens`` during the config. + + + +Non local solvers ++++++++++++++++++ + +The file ``globalmodel.py`` and ``distr_ef.py`` are used for debugging. They don't rely on ph or admm, they simply solve the +problem without decomposition. + +* ``globalmodel.py`` + In ``globalmodel.py``, ``global_dict_creator`` merges the data into a global dictionary + thanks to the inter-region dictionary, without creating dummy nodes. It then creates the model (as if there was + only one region without dummy nodes) and solves it. + + However this file strongly depends on the structure of the problem and doesn't decompose the problems. It may be difficultly adpated + and inefficient. + +* ``distr_ef.py`` + As presented previously solves the extensive form. + The arguments are the same as ``distr_admm_cylinders.py``, the method doesn't need to be adapted with the model. + diff --git a/doc/src/index.rst b/doc/src/index.rst index 79f2f711a..7011e7a7b 100644 --- a/doc/src/index.rst +++ b/doc/src/index.rst @@ -31,6 +31,7 @@ MPI is used. pickledbundles.rst grad_rho.rst w_rho.rst + admm_ph.rst contributors.rst internals.rst api.rst diff --git a/examples/distr/distr.py b/examples/distr/distr.py new file mode 100644 index 000000000..c708849c7 --- /dev/null +++ b/examples/distr/distr.py @@ -0,0 +1,402 @@ +# Network Flow - various formulations +import pyomo.environ as pyo +import mpisppy.utils.sputils as sputils + +# In this file, we create a (linear) inter-region minimal cost distribution problem. +# Our data, gives the constraints inside each in region in region_dict_creator +# and amongst the different regions in inter_region_dict_creator +# The data slightly differs depending on the number of regions (num_scens) which is created for 2, 3 or 4 regions + +# Note: regions in our model will be represented in mpi-sppy by scenarios and to ensure the inter-region constraints +# we will use dummy-nodes, which will be represented in mpi-sppy by non-anticipative variables +# The following association of terms are made: regions = scenarios, and dummy-nodes = nonants = consensus-vars + +### The following functions create the data + +def inter_region_dict_creator(num_scens): + """Creates the oriented arcs between the regions, with their capacities and costs. \n + This dictionary represents the inter-region constraints and flows. It indicates where to add dummy nodes. + + Args: + num_scens (int): select the number of scenarios (regions) wanted + + Returns: + dict: + Each arc is presented as a pair (source, target) with source and target containing (scenario_name, node_name) \n + The arcs are used as keys for the dictionaries of costs and capacities + """ + inter_region_dict={} + + if num_scens == 2: + inter_region_dict["arcs"]=[(("Region1","DC1"),("Region2","DC2"))] + inter_region_dict["costs"]={(("Region1","DC1"),("Region2","DC2")): 100} + inter_region_dict["capacities"]={(("Region1","DC1"),("Region2","DC2")): 70} + + elif num_scens == 3: + inter_region_dict["arcs"] = [(("Region1","DC1"),("Region2","DC2")),(("Region2","DC2"),("Region1","DC1")),\ + (("Region1","DC1"),("Region3","DC3_1")),(("Region3","DC3_1"),("Region1","DC1")),\ + (("Region2","DC2"),("Region3","DC3_2")),(("Region3","DC3_2"),("Region2","DC2")),\ + ] + inter_region_dict["costs"] = {(("Region1","DC1"),("Region2","DC2")): 100, (("Region2","DC2"),("Region1","DC1")): 50,\ + (("Region1","DC1"),("Region3","DC3_1")): 200, (("Region3","DC3_1"),("Region1","DC1")): 200,\ + (("Region2","DC2"),("Region3","DC3_2")): 200, (("Region3","DC3_2"),("Region2","DC2")): 200,\ + } + inter_region_dict["capacities"] = {(("Region1","DC1"),("Region2","DC2")): 70, (("Region2","DC2"),("Region1","DC1")): 100,\ + (("Region1","DC1"),("Region3","DC3_1")): 50, (("Region3","DC3_1"),("Region1","DC1")): 50,\ + (("Region2","DC2"),("Region3","DC3_2")): 50, (("Region3","DC3_2"),("Region2","DC2")): 50,\ + } + + elif num_scens == 4: + inter_region_dict["arcs"] = [(("Region1","DC1"),("Region2","DC2")),(("Region2","DC2"),("Region1","DC1")),\ + (("Region1","DC1"),("Region3","DC3_1")),(("Region3","DC3_1"),("Region1","DC1")),\ + (("Region2","DC2"),("Region3","DC3_2")),(("Region3","DC3_2"),("Region2","DC2")),\ + (("Region1","DC1"),("Region4","DC4")),(("Region4","DC4"),("Region1","DC1")),\ + (("Region4","DC4"),("Region2","DC2")),(("Region2","DC2"),("Region4","DC4")),\ + ] + inter_region_dict["costs"] = {(("Region1","DC1"),("Region2","DC2")): 100, (("Region2","DC2"),("Region1","DC1")): 50,\ + (("Region1","DC1"),("Region3","DC3_1")): 200, (("Region3","DC3_1"),("Region1","DC1")): 200,\ + (("Region2","DC2"),("Region3","DC3_2")): 200, (("Region3","DC3_2"),("Region2","DC2")): 200,\ + (("Region1","DC1"),("Region4","DC4")): 30, (("Region4","DC4"),("Region1","DC1")): 50,\ + (("Region4","DC4"),("Region2","DC2")): 100, (("Region2","DC2"),("Region4","DC4")): 70,\ + } + inter_region_dict["capacities"] = {(("Region1","DC1"),("Region2","DC2")): 70, (("Region2","DC2"),("Region1","DC1")): 100,\ + (("Region1","DC1"),("Region3","DC3_1")): 50, (("Region3","DC3_1"),("Region1","DC1")): 50,\ + (("Region2","DC2"),("Region3","DC3_2")): 50, (("Region3","DC3_2"),("Region2","DC2")): 50,\ + (("Region1","DC1"),("Region4","DC4")): 100, (("Region4","DC4"),("Region1","DC1")): 60,\ + (("Region4","DC4"),("Region2","DC2")): 20, (("Region2","DC2"),("Region4","DC4")): 40,\ + } + + return inter_region_dict + + +def dummy_nodes_generator(region_dict, inter_region_dict): + """This function creates a new dictionary ``local_dict similar`` to ``region_dict`` with the dummy nodes and their constraints + + Args: + region_dict (dict): dictionary for the current scenario \n + inter_region_dict (dict): dictionary of the inter-region relations + + Returns: + local_dict (dict): + This dictionary copies region_dict, completes the already existing fields of + region_dict to represent the dummy nodes, and adds the following keys:\n + dummy nodes source (resp. target): the list of dummy nodes for which the source (resp. target) + is in the considered region. \n + dummy nodes source (resp. target) slack bounds: dictionary on dummy nodes source (resp. target) + """ + ### Note: The cost of the arc is here chosen to be split equally between the source region and target region + + # region_dict is renamed as local_dict because it no longer contains only information about the region + # but also contains local information that can be linked to the other scenarios (with dummy nodes) + local_dict = region_dict + local_dict["dummy nodes source"] = list() + local_dict["dummy nodes target"] = list() + local_dict["dummy nodes source slack bounds"] = {} + local_dict["dummy nodes target slack bounds"] = {} + for arc in inter_region_dict["arcs"]: + source,target = arc + region_source,node_source = source + region_target,node_target = target + + if region_source == region_dict["name"]: + dummy_node=node_source+node_target + local_dict["nodes"].append(dummy_node) + local_dict["supply"][dummy_node] = 0 + local_dict["dummy nodes source"].append(dummy_node) + local_dict["arcs"].append((node_source, dummy_node)) + local_dict["flow costs"][(node_source, dummy_node)] = inter_region_dict["costs"][(source, target)]/2 #should be adapted to model + local_dict["flow capacities"][(node_source, dummy_node)] = inter_region_dict["capacities"][(source, target)] + local_dict["dummy nodes source slack bounds"][dummy_node] = inter_region_dict["capacities"][(source, target)] + + if region_target == local_dict["name"]: + dummy_node = node_source + node_target + local_dict["nodes"].append(dummy_node) + local_dict["supply"][dummy_node] = 0 + local_dict["dummy nodes target"].append(dummy_node) + local_dict["arcs"].append((dummy_node, node_target)) + local_dict["flow costs"][(dummy_node, node_target)] = inter_region_dict["costs"][(source, target)]/2 #should be adapted to model + local_dict["flow capacities"][(dummy_node, node_target)] = inter_region_dict["capacities"][(source, target)] + local_dict["dummy nodes target slack bounds"][dummy_node] = inter_region_dict["capacities"][(source, target)] + return local_dict + +def _is_partition(L, *lists): + # Step 1: Verify that the union of all sublists contains all elements of L + if set(L) != set().union(*lists): + return False + + # Step 2: Ensure each element in L appears in exactly one sublist + for item in L: + count = 0 + for sublist in lists: + if item in sublist: + count += 1 + if count != 1: + return False + + return True + +def region_dict_creator(scenario_name): + """ Create a scenario for the inter-region max profit distribution example. + + The convention for node names is: + Symbol + number of the region (+ _ + number of the example if needed), \n + with symbols DC for distribution centers, F for factory nodes, B for buyer nodes. \n + For instance: F3_1 is the 1st factory node of region 3. \n + + Args: + scenario_name (str): + Name of the scenario to construct. + + Returns: + region_dict (dict): contains all the information in the given region to create the model. It is composed of:\n + "nodes" (list of str): all the nodes. The following subsets are also nodes: \n + "factory nodes", "buyer nodes", "distribution center nodes", \n + "arcs" (list of 2 tuples of str) : (node, node) pairs\n + "supply" (dict[n] of float): supply; keys are nodes (negative for demand)\n + "production costs" (dict of float): at each factory node\n + "revenues" (dict of float): at each buyer node \n + "flow costs" (dict[a] of float) : costs per unit flow on each arc \n + "flow capacities" (dict[a] of floats) : upper bound capacities of each arc \n + """ + if scenario_name == "Region1" : + # Creates data for Region1 + region_dict={"name": "Region1"} + region_dict["nodes"] = ["F1_1", "F1_2", "DC1", "B1_1", "B1_2"] + region_dict["factory nodes"] = ["F1_1","F1_2"] + region_dict["buyer nodes"] = ["B1_1","B1_2"] + region_dict["distribution center nodes"]= ["DC1"] + region_dict["supply"] = {"F1_1": 80, "F1_2": 70, "B1_1": -60, "B1_2": -90, "DC1": 0} + region_dict["arcs"] = [("F1_1","DC1"), ("F1_2","DC1"), ("DC1","B1_1"), + ("DC1","B1_2"), ("F1_1","B1_1"), ("F1_2","B1_2")] + + region_dict["production costs"] = {"F1_1": 50, "F1_2": 80} + region_dict["revenues"] = {"B1_1": 800, "B1_2": 900} + # most capacities are 50, so start there and then override + region_dict["flow capacities"] = {a: 50 for a in region_dict["arcs"]} + region_dict["flow capacities"][("F1_1","B1_1")] = None + region_dict["flow capacities"][("F1_2","B1_2")] = None + region_dict["flow costs"] = {("F1_1","DC1"): 300, ("F1_2","DC1"): 500, ("DC1","B1_1"): 200, + ("DC1","B1_2"): 400, ("F1_1","B1_1"): 700, ("F1_2","B1_2"): 1000} + + elif scenario_name=="Region2": + # Creates data for Region2 + region_dict={"name": "Region2"} + region_dict["nodes"] = ["DC2", "B2_1", "B2_2", "B2_3"] + region_dict["factory nodes"] = list() + region_dict["buyer nodes"] = ["B2_1","B2_2","B2_3"] + region_dict["distribution center nodes"]= ["DC2"] + region_dict["supply"] = {"B2_1": -200, "B2_2": -150, "B2_3": -100, "DC2": 0} + region_dict["arcs"] = [("DC2","B2_1"), ("DC2","B2_2"), ("DC2","B2_3")] + + region_dict["production costs"] = {} + region_dict["revenues"] = {"B2_1": 900, "B2_2": 800, "B2_3":1200} + region_dict["flow capacities"] = {("DC2","B2_1"): 200, ("DC2","B2_2"): 150, ("DC2","B2_3"): 100} + region_dict["flow costs"] = {("DC2","B2_1"): 100, ("DC2","B2_2"): 200, ("DC2","B2_3"): 300} + + elif scenario_name == "Region3" : + # Creates data for Region3 + region_dict={"name": "Region3"} + region_dict["nodes"] = ["F3_1", "F3_2", "DC3_1", "DC3_2", "B3_1", "B3_2"] + region_dict["factory nodes"] = ["F3_1","F3_2"] + region_dict["buyer nodes"] = ["B3_1","B3_2"] + region_dict["distribution center nodes"]= ["DC3_1","DC3_2"] + region_dict["supply"] = {"F3_1": 80, "F3_2": 60, "B3_1": -100, "B3_2": -100, "DC3_1": 0, "DC3_2": 0} + region_dict["arcs"] = [("F3_1","DC3_1"), ("F3_2","DC3_2"), ("DC3_1","B3_1"), + ("DC3_2","B3_2"), ("DC3_1","DC3_2"), ("DC3_2","DC3_1")] + + region_dict["production costs"] = {"F3_1": 50, "F3_2": 50} + region_dict["revenues"] = {"B3_1": 900, "B3_2": 700} + region_dict["flow capacities"] = {("F3_1","DC3_1"): 80, ("F3_2","DC3_2"): 60, ("DC3_1","B3_1"): 100, + ("DC3_2","B3_2"): 100, ("DC3_1","DC3_2"): 70, ("DC3_2","DC3_1"): 50} + region_dict["flow costs"] = {("F3_1","DC3_1"): 100, ("F3_2","DC3_2"): 100, ("DC3_1","B3_1"): 201, + ("DC3_2","B3_2"): 200, ("DC3_1","DC3_2"): 100, ("DC3_2","DC3_1"): 100} + + elif scenario_name == "Region4": + # Creates data for Region4 + region_dict={"name": "Region4"} + region_dict["nodes"] = ["F4_1", "F4_2", "DC4", "B4_1", "B4_2"] + region_dict["factory nodes"] = ["F4_1","F4_2"] + region_dict["buyer nodes"] = ["B4_1","B4_2"] + region_dict["distribution center nodes"] = ["DC4"] + region_dict["supply"] = {"F4_1": 200, "F4_2": 30, "B4_1": -100, "B4_2": -100, "DC4": 0} + region_dict["arcs"] = [("F4_1","DC4"), ("F4_2","DC4"), ("DC4","B4_1"), ("DC4","B4_2")] + + region_dict["production costs"] = {"F4_1": 50, "F4_2": 50} + region_dict["revenues"] = {"B4_1": 900, "B4_2": 700} + region_dict["flow capacities"] = {("F4_1","DC4"): 80, ("F4_2","DC4"): 60, ("DC4","B4_1"): 100, ("DC4","B4_2"): 100} + region_dict["flow costs"] = {("F4_1","DC4"): 100, ("F4_2","DC4"): 80, ("DC4","B4_1"): 90, ("DC4","B4_2"): 70} + + else: + raise RuntimeError (f"unknown Region name {scenario_name}") + + assert _is_partition(region_dict["nodes"], region_dict["factory nodes"], region_dict["buyer nodes"], region_dict["distribution center nodes"]) + + return region_dict + + +###Creates the model when local_dict is given +def min_cost_distr_problem(local_dict, sense=pyo.minimize): + """ Create an arcs formulation of network flow + + Args: + local_dict (dict): dictionary representing a region including the dummy nodes \n + sense (=pyo.minimize): we aim to minimize the cost, this should always be minimize + + Returns: + model (Pyomo ConcreteModel) : the instantiated model + """ + # Assert sense == pyo.minimize, "sense should be equal to pyo.minimize" + # First, make the special In, Out arc lists for each node + arcsout = {n: list() for n in local_dict["nodes"]} + arcsin = {n: list() for n in local_dict["nodes"]} + for a in local_dict["arcs"]: + arcsout[a[0]].append(a) + arcsin[a[1]].append(a) + + model = pyo.ConcreteModel(name='MinCostFlowArcs') + def flowBounds_rule(model, i,j): + return (0, local_dict["flow capacities"][(i,j)]) + model.flow = pyo.Var(local_dict["arcs"], bounds=flowBounds_rule) # x + + def slackBounds_rule(model, n): + if n in local_dict["factory nodes"]: + return (0, local_dict["supply"][n]) + elif n in local_dict["buyer nodes"]: + return (local_dict["supply"][n], 0) + elif n in local_dict["dummy nodes source"]: + return (0,local_dict["dummy nodes source slack bounds"][n]) + elif n in local_dict["dummy nodes target"]: #this slack will respect the opposite flow balance rule + return (0,local_dict["dummy nodes target slack bounds"][n]) + elif n in local_dict["distribution center nodes"]: + return (0,0) + else: + raise ValueError(f"unknown node type for node {n}") + + model.y = pyo.Var(local_dict["nodes"], bounds=slackBounds_rule) + + model.MinCost = pyo.Objective(expr=\ + sum(local_dict["flow costs"][a]*model.flow[a] for a in local_dict["arcs"]) \ + + sum(local_dict["production costs"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["factory nodes"]) \ + + sum(local_dict["revenues"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["buyer nodes"]) , + sense=sense) + + def FlowBalance_rule(m, n): + #we change the definition of the slack for target dummy nodes so that we have the slack from the source and from the target equal + if n in local_dict["dummy nodes target"]: + return sum(m.flow[a] for a in arcsout[n])\ + - sum(m.flow[a] for a in arcsin[n])\ + - m.y[n] == local_dict["supply"][n] + else: + return sum(m.flow[a] for a in arcsout[n])\ + - sum(m.flow[a] for a in arcsin[n])\ + + m.y[n] == local_dict["supply"][n] + model.FlowBalance= pyo.Constraint(local_dict["nodes"], rule=FlowBalance_rule) + + return model + + +###Creates the scenario +def scenario_creator(scenario_name, num_scens=None): + """Creates the model, which should include the consensus variables. \n + However, this function shouldn't attach the consensus variables to root nodes, as it is done in admm_ph. + + Args: + scenario_name (str): the name of the scenario that will be created. Here is of the shape f"Region{i}" with 1<=i<=num_scens \n + num_scens (int): number of scenarios (regions). Useful to create the corresponding inter-region dictionary + + Returns: + Pyomo ConcreteModel: the instantiated model + """ + assert (num_scens is not None) + inter_region_dict = inter_region_dict_creator(num_scens) + region_dict = region_dict_creator(scenario_name) + # Adding dummy nodes and associated features + local_dict = dummy_nodes_generator(region_dict, inter_region_dict) + # Generating the model + model = min_cost_distr_problem(local_dict) + + varlist = list() + sputils.attach_root_node(model, model.MinCost, varlist) + + return model + + +###Functions required in other files, which constructions are specific to the problem + +def scenario_denouement(rank, scenario_name, scenario): + """for each scenario prints its name and the final variable values + + Args: + rank (int): not used here, but could be helpful to know the location + scenario_name (str): name of the scenario + scenario (Pyomo ConcreteModel): the instantiated model + """ + print(f"flow values for {scenario_name}") + scenario.flow.pprint() + print(f"slack values for {scenario_name}") + scenario.y.pprint() + + +def consensus_vars_creator(num_scens): + """The following function creates the consensus_vars dictionary thanks to the inter-region dictionary. \n + This dictionary has redundant information, but is useful for admm_ph. + + Args: + num_scens (int): select the number of scenarios (regions) wanted + + Returns: + dict: dictionary which keys are the regions and values are the list of consensus variables + present in the region + """ + # Due to the small size of inter_region_dict, it is not given as argument but rather created. + inter_region_dict = inter_region_dict_creator(num_scens) + consensus_vars = {} + for arc in inter_region_dict["arcs"]: + source,target = arc + region_source,node_source = source + region_target,node_target = target + dummy_node = node_source + node_target + vstr = f"y[{dummy_node}]" #variable name as string, y is the slack + + #adds dummy_node in the source region + if not region_source in consensus_vars: #initiates consensus_vars[region_source] + consensus_vars[region_source] = list() + consensus_vars[region_source].append(vstr) + + #adds dummy_node in the target region + if not region_target in consensus_vars: #initiates consensus_vars[region_target] + consensus_vars[region_target] = list() + consensus_vars[region_target].append(vstr) + return consensus_vars + + +def scenario_names_creator(num_scens): + """Creates the name of every scenario. + + Args: + num_scens (int): number of scenarios + + Returns: + list (str): the list of names + """ + return [f"Region{i+1}" for i in range(num_scens)] + + +def kw_creator(cfg): + """ + Args: + cfg (config): specifications for the problem. We only look at the number of scenarios + + Returns: + dict (str): the kwargs that are used in distr.scenario_creator, here {"num_scens": num_scens} + """ + kwargs = {"num_scens" : cfg.get('num_scens', None), + } + if not kwargs["num_scens"] in [2, 3, 4]: + RuntimeError (f"unexpected number of regions {cfg.num_scens}, whould be in [2, 3, 4]") + return kwargs + + +def inparser_adder(cfg): + #requires the user to give the number of scenarios + cfg.num_scens_required() \ No newline at end of file diff --git a/examples/distr/distr_admm_cylinders.py b/examples/distr/distr_admm_cylinders.py new file mode 100644 index 000000000..9ff51a6aa --- /dev/null +++ b/examples/distr/distr_admm_cylinders.py @@ -0,0 +1,151 @@ +# Copyright 2020 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff +# This software is distributed under the 3-clause BSD License. +# general example driver for distr with cylinders +import mpisppy.utils.admm_ph as admm_ph +import distr +import mpisppy.cylinders + +from mpisppy.spin_the_wheel import WheelSpinner +import mpisppy.utils.sputils as sputils +from mpisppy.utils import config +import mpisppy.utils.cfg_vanilla as vanilla +from mpisppy import MPI +global_rank = MPI.COMM_WORLD.Get_rank() + + +write_solution = True + +def _parse_args(): + # create a config object and parse + cfg = config.Config() + distr.inparser_adder(cfg) + cfg.popular_args() + cfg.two_sided_args() + cfg.ph_args() + cfg.aph_args() + cfg.xhatxbar_args() + cfg.fwph_args() + cfg.lagrangian_args() + cfg.ph_ob_args() + cfg.wxbar_read_write_args() + cfg.tracking_args() + cfg.add_to_config("run_async", + description="Run with async projective hedging instead of progressive hedging", + domain=bool, + default=False) + + cfg.parse_command_line("distr_admm_cylinders") + return cfg + + +def _count_cylinders(cfg): + count = 1 + cfglist = ["xhatxbar", "lagrangian", "ph_ob", "fwph"] #all the cfg arguments that create a new cylinders + for cylname in cfglist: + if cfg[cylname]: + count += 1 + return count + + +def main(): + + cfg = _parse_args() + + if cfg.default_rho is None: # and rho_setter is None + raise RuntimeError("No rho_setter so a default must be specified via --default-rho") + + ph_converger = None + + options = {} + all_scenario_names = distr.scenario_names_creator(num_scens=cfg.num_scens) + scenario_creator = distr.scenario_creator + scenario_creator_kwargs = distr.kw_creator(cfg) + consensus_vars = distr.consensus_vars_creator(cfg.num_scens) + n_cylinders = _count_cylinders(cfg) + admm = admm_ph.ADMM_PH(options, + all_scenario_names, + scenario_creator, + consensus_vars, + n_cylinders=n_cylinders, + mpicomm=MPI.COMM_WORLD, + scenario_creator_kwargs=scenario_creator_kwargs, + ) + + # Things needed for vanilla cylinders + scenario_creator = admm.admm_ph_scenario_creator ##change needed because of the wrapper + scenario_creator_kwargs = None + scenario_denouement = distr.scenario_denouement + #note that the admm_ph scenario_creator wrapper doesn't take any arguments + variable_probability = admm.var_prob_list_fct + + beans = (cfg, scenario_creator, scenario_denouement, all_scenario_names) + + if cfg.run_async: + # Vanilla APH hub + hub_dict = vanilla.aph_hub(*beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + rho_setter = None, + variable_probability=variable_probability) + + else: + # Vanilla PH hub + hub_dict = vanilla.ph_hub(*beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + ph_converger=ph_converger, + rho_setter=None, + variable_probability=variable_probability) + + + # FWPH spoke + if cfg.fwph: + fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + + # Standard Lagrangian bound spoke + if cfg.lagrangian: + lagrangian_spoke = vanilla.lagrangian_spoke(*beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter = None, + variable_probability=variable_probability) + + + # ph outer bounder spoke + if cfg.ph_ob: + ph_ob_spoke = vanilla.ph_ob_spoke(*beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter = None, + variable_probability=variable_probability) + + # xhat looper bound spoke + if cfg.xhatxbar: + xhatxbar_spoke = vanilla.xhatxbar_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + + list_of_spoke_dict = list() + if cfg.fwph: + list_of_spoke_dict.append(fw_spoke) + if cfg.lagrangian: + list_of_spoke_dict.append(lagrangian_spoke) + if cfg.ph_ob: + list_of_spoke_dict.append(ph_ob_spoke) + if cfg.xhatxbar: + list_of_spoke_dict.append(xhatxbar_spoke) + + assert n_cylinders == 1 + len(list_of_spoke_dict), f"n_cylinders = {n_cylinders}, len(list_of_spoke_dict) = {len(list_of_spoke_dict)}" + + wheel = WheelSpinner(hub_dict, list_of_spoke_dict) + wheel.spin() + + if write_solution: + wheel.write_first_stage_solution('distr_soln.csv') + wheel.write_first_stage_solution('distr_cyl_nonants.npy', + first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer) + wheel.write_tree_solution('distr_full_solution') + + if global_rank == 0: + best_objective = wheel.spcomm.BestInnerBound * len(all_scenario_names) + print(f"{best_objective=}") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/examples/distr/distr_ef.py b/examples/distr/distr_ef.py new file mode 100644 index 000000000..e2cd3b7d7 --- /dev/null +++ b/examples/distr/distr_ef.py @@ -0,0 +1,79 @@ +# Copyright 2020 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff +# This software is distributed under the 3-clause BSD License. +# general example driver for distr with cylinders +import mpisppy.utils.admm_ph as admm_ph +import distr +import mpisppy.cylinders +import pyomo.environ as pyo + +import mpisppy.utils.sputils as sputils +from mpisppy.utils import config +from mpisppy import MPI +global_rank = MPI.COMM_WORLD.Get_rank() + + +write_solution = True + +def _parse_args(): + # create a config object and parse + cfg = config.Config() + distr.inparser_adder(cfg) + cfg.add_to_config("solver_name", + description="Choice of the solver", + domain=str, + default=None) + + cfg.parse_command_line("distr_ef2") + return cfg + + +def solve_EF_directly(admm,solver_name): + scenario_names = admm.local_scenario_names + scenario_creator = admm.admm_ph_scenario_creator + + ef = sputils.create_EF( + scenario_names, + scenario_creator, + nonant_for_fixed_vars=False, + ) + solver = pyo.SolverFactory(solver_name) + if 'persistent' in solver_name: + solver.set_instance(ef, symbolic_solver_labels=True) + solver.solve(tee=True) + else: + solver.solve(ef, tee=True, symbolic_solver_labels=True,) + return ef + +def main(): + + cfg = _parse_args() + + options = {} + all_scenario_names = distr.scenario_names_creator(num_scens=cfg.num_scens) + scenario_creator = distr.scenario_creator + scenario_creator_kwargs = distr.kw_creator(cfg) + consensus_vars = distr.consensus_vars_creator(cfg.num_scens) + + n_cylinders = 1 + admm = admm_ph.ADMM_PH(options, + all_scenario_names, + scenario_creator, + consensus_vars, + n_cylinders=n_cylinders, + mpicomm=MPI.COMM_WORLD, + scenario_creator_kwargs=scenario_creator_kwargs, + ) + + solved_ef = solve_EF_directly(admm, cfg.solver_name) + with open("ef.txt", "w") as f: + solved_ef.pprint(f) + print ("******* model written to ef.txt *******") + solution_file_name = "solution_distr.txt" + sputils.write_ef_first_stage_solution(solved_ef, + solution_file_name,) + print(f"ef solution written to {solution_file_name}") + print(f"EF objective: {pyo.value(solved_ef.EF_Obj)}") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/examples/distr/globalmodel.py b/examples/distr/globalmodel.py new file mode 100644 index 000000000..2b4caf5a9 --- /dev/null +++ b/examples/distr/globalmodel.py @@ -0,0 +1,134 @@ +import pyomo.environ as pyo +import distr +from mpisppy.utils import config + +def _parse_args(): + # create a config object and parse + cfg = config.Config() + distr.inparser_adder(cfg) + cfg.add_to_config("solver_name", + description="which solver", + domain=str, + default=None, + argparse_args = {"required": True}, + ) + cfg.parse_command_line("globalmodel") + + return cfg + + +##we need to create a new model solver, to avoid dummy nodes +def min_cost_distr_problem(region_dict, sense=pyo.minimize): + """ Create an arcs formulation of network flow, the function is the simplification without dummy nodes of this function in distr + + Arg: + region_dict (dict): given by region_dict_creator with test_without_dummy=True so that there is no dummy node + + Returns: + model (Pyomo ConcreteModel) : the instantiated model + """ + # First, make the special In, Out arc lists for each node + arcsout = {n: list() for n in region_dict["nodes"]} + arcsin = {n: list() for n in region_dict["nodes"]} + for a in region_dict["arcs"]: + arcsout[a[0]].append(a) + arcsin[a[1]].append(a) + + model = pyo.ConcreteModel(name='MinCostFlowArcs') + def flowBounds_rule(model, i,j): + return (0, region_dict["flow capacities"][(i,j)]) + model.flow = pyo.Var(region_dict["arcs"], bounds=flowBounds_rule) # x + + def slackBounds_rule(model, n): + if n in region_dict["factory nodes"]: + return (0, region_dict["supply"][n]) + elif n in region_dict["buyer nodes"]: + return (region_dict["supply"][n], 0) + elif n in region_dict["distribution center nodes"]: + return (0,0) + else: + raise ValueError(f"unknown node type for node {n}") + + model.y = pyo.Var(region_dict["nodes"], bounds=slackBounds_rule) + + model.MinCost = pyo.Objective(expr=\ + sum(region_dict["flow costs"][a]*model.flow[a] for a in region_dict["arcs"]) \ + + sum(region_dict["production costs"][n]*(region_dict["supply"][n]-model.y[n]) for n in region_dict["factory nodes"]) \ + + sum(region_dict["revenues"][n]*(region_dict["supply"][n]-model.y[n])for n in region_dict["buyer nodes"]) , + sense=sense) + + def FlowBalance_rule(m, n): + return sum(m.flow[a] for a in arcsout[n])\ + - sum(m.flow[a] for a in arcsin[n])\ + + m.y[n] == region_dict["supply"][n] + model.FlowBalance= pyo.Constraint(region_dict["nodes"], rule=FlowBalance_rule) + + return model + + +def global_dict_creator(num_scens, start=0): + """Merges the different region_dict thanks to the inter_region_dict in distr to create a global dictionary + + Args: + num_scens (int): number of regions wanted + start (int, optional): unuseful here. Defaults to 0. + + Returns: + dict: dictionary with the information to create a min cost distribution problem + """ + inter_region_dict = distr.inter_region_dict_creator(num_scens) + global_dict={} + for i in range(start,start+num_scens): + scenario_name=f"Region{i+1}" + region_dict = distr.region_dict_creator(scenario_name) + for key in region_dict: + if key in ["nodes","factory nodes", "buyer nodes", "distribution center nodes", "arcs"]: + if i == start: + global_dict[key]=[] + for x in region_dict[key]: + global_dict[key].append(x) + if key in ["supply", "production costs","revenues", "flow capacities", "flow costs"]: + if i == start: + global_dict[key]={} + for key2 in region_dict[key]: + global_dict[key][key2] = region_dict[key][key2] + + def _extract_arc(a): + source, target = a + node_source = source[1] + node_target = target[1] + return (node_source, node_target) + + for a in inter_region_dict["arcs"]: + global_dict["arcs"].append(_extract_arc(a)) + for a in inter_region_dict["costs"]: + global_dict["flow costs"][_extract_arc(a)] = inter_region_dict["costs"][a] + for a in inter_region_dict["capacities"]: + global_dict["flow capacities"][_extract_arc(a)] = inter_region_dict["capacities"][a] + return global_dict + +def main(): + """ + do all the work + """ + cfg = _parse_args() + model = min_cost_distr_problem(global_dict_creator(num_scens=cfg.num_scens)) + + solver_name = cfg.solver_name + opt = pyo.SolverFactory(solver_name) + results = opt.solve(model) # solves and updates model + pyo.assert_optimal_termination(results) + model.pprint() + + # Grabs the objective function + objectives = model.component_objects(pyo.Objective, active=True) + count = 0 + for obj in objectives: + objective_value = pyo.value(obj) + count += 1 + assert count == 1, f"only one objective function is authorized, there are {count}" + print(f"Objective '{obj}' value: {objective_value}") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/mpisppy/tests/examples/distr.py b/mpisppy/tests/examples/distr.py new file mode 100644 index 000000000..c708849c7 --- /dev/null +++ b/mpisppy/tests/examples/distr.py @@ -0,0 +1,402 @@ +# Network Flow - various formulations +import pyomo.environ as pyo +import mpisppy.utils.sputils as sputils + +# In this file, we create a (linear) inter-region minimal cost distribution problem. +# Our data, gives the constraints inside each in region in region_dict_creator +# and amongst the different regions in inter_region_dict_creator +# The data slightly differs depending on the number of regions (num_scens) which is created for 2, 3 or 4 regions + +# Note: regions in our model will be represented in mpi-sppy by scenarios and to ensure the inter-region constraints +# we will use dummy-nodes, which will be represented in mpi-sppy by non-anticipative variables +# The following association of terms are made: regions = scenarios, and dummy-nodes = nonants = consensus-vars + +### The following functions create the data + +def inter_region_dict_creator(num_scens): + """Creates the oriented arcs between the regions, with their capacities and costs. \n + This dictionary represents the inter-region constraints and flows. It indicates where to add dummy nodes. + + Args: + num_scens (int): select the number of scenarios (regions) wanted + + Returns: + dict: + Each arc is presented as a pair (source, target) with source and target containing (scenario_name, node_name) \n + The arcs are used as keys for the dictionaries of costs and capacities + """ + inter_region_dict={} + + if num_scens == 2: + inter_region_dict["arcs"]=[(("Region1","DC1"),("Region2","DC2"))] + inter_region_dict["costs"]={(("Region1","DC1"),("Region2","DC2")): 100} + inter_region_dict["capacities"]={(("Region1","DC1"),("Region2","DC2")): 70} + + elif num_scens == 3: + inter_region_dict["arcs"] = [(("Region1","DC1"),("Region2","DC2")),(("Region2","DC2"),("Region1","DC1")),\ + (("Region1","DC1"),("Region3","DC3_1")),(("Region3","DC3_1"),("Region1","DC1")),\ + (("Region2","DC2"),("Region3","DC3_2")),(("Region3","DC3_2"),("Region2","DC2")),\ + ] + inter_region_dict["costs"] = {(("Region1","DC1"),("Region2","DC2")): 100, (("Region2","DC2"),("Region1","DC1")): 50,\ + (("Region1","DC1"),("Region3","DC3_1")): 200, (("Region3","DC3_1"),("Region1","DC1")): 200,\ + (("Region2","DC2"),("Region3","DC3_2")): 200, (("Region3","DC3_2"),("Region2","DC2")): 200,\ + } + inter_region_dict["capacities"] = {(("Region1","DC1"),("Region2","DC2")): 70, (("Region2","DC2"),("Region1","DC1")): 100,\ + (("Region1","DC1"),("Region3","DC3_1")): 50, (("Region3","DC3_1"),("Region1","DC1")): 50,\ + (("Region2","DC2"),("Region3","DC3_2")): 50, (("Region3","DC3_2"),("Region2","DC2")): 50,\ + } + + elif num_scens == 4: + inter_region_dict["arcs"] = [(("Region1","DC1"),("Region2","DC2")),(("Region2","DC2"),("Region1","DC1")),\ + (("Region1","DC1"),("Region3","DC3_1")),(("Region3","DC3_1"),("Region1","DC1")),\ + (("Region2","DC2"),("Region3","DC3_2")),(("Region3","DC3_2"),("Region2","DC2")),\ + (("Region1","DC1"),("Region4","DC4")),(("Region4","DC4"),("Region1","DC1")),\ + (("Region4","DC4"),("Region2","DC2")),(("Region2","DC2"),("Region4","DC4")),\ + ] + inter_region_dict["costs"] = {(("Region1","DC1"),("Region2","DC2")): 100, (("Region2","DC2"),("Region1","DC1")): 50,\ + (("Region1","DC1"),("Region3","DC3_1")): 200, (("Region3","DC3_1"),("Region1","DC1")): 200,\ + (("Region2","DC2"),("Region3","DC3_2")): 200, (("Region3","DC3_2"),("Region2","DC2")): 200,\ + (("Region1","DC1"),("Region4","DC4")): 30, (("Region4","DC4"),("Region1","DC1")): 50,\ + (("Region4","DC4"),("Region2","DC2")): 100, (("Region2","DC2"),("Region4","DC4")): 70,\ + } + inter_region_dict["capacities"] = {(("Region1","DC1"),("Region2","DC2")): 70, (("Region2","DC2"),("Region1","DC1")): 100,\ + (("Region1","DC1"),("Region3","DC3_1")): 50, (("Region3","DC3_1"),("Region1","DC1")): 50,\ + (("Region2","DC2"),("Region3","DC3_2")): 50, (("Region3","DC3_2"),("Region2","DC2")): 50,\ + (("Region1","DC1"),("Region4","DC4")): 100, (("Region4","DC4"),("Region1","DC1")): 60,\ + (("Region4","DC4"),("Region2","DC2")): 20, (("Region2","DC2"),("Region4","DC4")): 40,\ + } + + return inter_region_dict + + +def dummy_nodes_generator(region_dict, inter_region_dict): + """This function creates a new dictionary ``local_dict similar`` to ``region_dict`` with the dummy nodes and their constraints + + Args: + region_dict (dict): dictionary for the current scenario \n + inter_region_dict (dict): dictionary of the inter-region relations + + Returns: + local_dict (dict): + This dictionary copies region_dict, completes the already existing fields of + region_dict to represent the dummy nodes, and adds the following keys:\n + dummy nodes source (resp. target): the list of dummy nodes for which the source (resp. target) + is in the considered region. \n + dummy nodes source (resp. target) slack bounds: dictionary on dummy nodes source (resp. target) + """ + ### Note: The cost of the arc is here chosen to be split equally between the source region and target region + + # region_dict is renamed as local_dict because it no longer contains only information about the region + # but also contains local information that can be linked to the other scenarios (with dummy nodes) + local_dict = region_dict + local_dict["dummy nodes source"] = list() + local_dict["dummy nodes target"] = list() + local_dict["dummy nodes source slack bounds"] = {} + local_dict["dummy nodes target slack bounds"] = {} + for arc in inter_region_dict["arcs"]: + source,target = arc + region_source,node_source = source + region_target,node_target = target + + if region_source == region_dict["name"]: + dummy_node=node_source+node_target + local_dict["nodes"].append(dummy_node) + local_dict["supply"][dummy_node] = 0 + local_dict["dummy nodes source"].append(dummy_node) + local_dict["arcs"].append((node_source, dummy_node)) + local_dict["flow costs"][(node_source, dummy_node)] = inter_region_dict["costs"][(source, target)]/2 #should be adapted to model + local_dict["flow capacities"][(node_source, dummy_node)] = inter_region_dict["capacities"][(source, target)] + local_dict["dummy nodes source slack bounds"][dummy_node] = inter_region_dict["capacities"][(source, target)] + + if region_target == local_dict["name"]: + dummy_node = node_source + node_target + local_dict["nodes"].append(dummy_node) + local_dict["supply"][dummy_node] = 0 + local_dict["dummy nodes target"].append(dummy_node) + local_dict["arcs"].append((dummy_node, node_target)) + local_dict["flow costs"][(dummy_node, node_target)] = inter_region_dict["costs"][(source, target)]/2 #should be adapted to model + local_dict["flow capacities"][(dummy_node, node_target)] = inter_region_dict["capacities"][(source, target)] + local_dict["dummy nodes target slack bounds"][dummy_node] = inter_region_dict["capacities"][(source, target)] + return local_dict + +def _is_partition(L, *lists): + # Step 1: Verify that the union of all sublists contains all elements of L + if set(L) != set().union(*lists): + return False + + # Step 2: Ensure each element in L appears in exactly one sublist + for item in L: + count = 0 + for sublist in lists: + if item in sublist: + count += 1 + if count != 1: + return False + + return True + +def region_dict_creator(scenario_name): + """ Create a scenario for the inter-region max profit distribution example. + + The convention for node names is: + Symbol + number of the region (+ _ + number of the example if needed), \n + with symbols DC for distribution centers, F for factory nodes, B for buyer nodes. \n + For instance: F3_1 is the 1st factory node of region 3. \n + + Args: + scenario_name (str): + Name of the scenario to construct. + + Returns: + region_dict (dict): contains all the information in the given region to create the model. It is composed of:\n + "nodes" (list of str): all the nodes. The following subsets are also nodes: \n + "factory nodes", "buyer nodes", "distribution center nodes", \n + "arcs" (list of 2 tuples of str) : (node, node) pairs\n + "supply" (dict[n] of float): supply; keys are nodes (negative for demand)\n + "production costs" (dict of float): at each factory node\n + "revenues" (dict of float): at each buyer node \n + "flow costs" (dict[a] of float) : costs per unit flow on each arc \n + "flow capacities" (dict[a] of floats) : upper bound capacities of each arc \n + """ + if scenario_name == "Region1" : + # Creates data for Region1 + region_dict={"name": "Region1"} + region_dict["nodes"] = ["F1_1", "F1_2", "DC1", "B1_1", "B1_2"] + region_dict["factory nodes"] = ["F1_1","F1_2"] + region_dict["buyer nodes"] = ["B1_1","B1_2"] + region_dict["distribution center nodes"]= ["DC1"] + region_dict["supply"] = {"F1_1": 80, "F1_2": 70, "B1_1": -60, "B1_2": -90, "DC1": 0} + region_dict["arcs"] = [("F1_1","DC1"), ("F1_2","DC1"), ("DC1","B1_1"), + ("DC1","B1_2"), ("F1_1","B1_1"), ("F1_2","B1_2")] + + region_dict["production costs"] = {"F1_1": 50, "F1_2": 80} + region_dict["revenues"] = {"B1_1": 800, "B1_2": 900} + # most capacities are 50, so start there and then override + region_dict["flow capacities"] = {a: 50 for a in region_dict["arcs"]} + region_dict["flow capacities"][("F1_1","B1_1")] = None + region_dict["flow capacities"][("F1_2","B1_2")] = None + region_dict["flow costs"] = {("F1_1","DC1"): 300, ("F1_2","DC1"): 500, ("DC1","B1_1"): 200, + ("DC1","B1_2"): 400, ("F1_1","B1_1"): 700, ("F1_2","B1_2"): 1000} + + elif scenario_name=="Region2": + # Creates data for Region2 + region_dict={"name": "Region2"} + region_dict["nodes"] = ["DC2", "B2_1", "B2_2", "B2_3"] + region_dict["factory nodes"] = list() + region_dict["buyer nodes"] = ["B2_1","B2_2","B2_3"] + region_dict["distribution center nodes"]= ["DC2"] + region_dict["supply"] = {"B2_1": -200, "B2_2": -150, "B2_3": -100, "DC2": 0} + region_dict["arcs"] = [("DC2","B2_1"), ("DC2","B2_2"), ("DC2","B2_3")] + + region_dict["production costs"] = {} + region_dict["revenues"] = {"B2_1": 900, "B2_2": 800, "B2_3":1200} + region_dict["flow capacities"] = {("DC2","B2_1"): 200, ("DC2","B2_2"): 150, ("DC2","B2_3"): 100} + region_dict["flow costs"] = {("DC2","B2_1"): 100, ("DC2","B2_2"): 200, ("DC2","B2_3"): 300} + + elif scenario_name == "Region3" : + # Creates data for Region3 + region_dict={"name": "Region3"} + region_dict["nodes"] = ["F3_1", "F3_2", "DC3_1", "DC3_2", "B3_1", "B3_2"] + region_dict["factory nodes"] = ["F3_1","F3_2"] + region_dict["buyer nodes"] = ["B3_1","B3_2"] + region_dict["distribution center nodes"]= ["DC3_1","DC3_2"] + region_dict["supply"] = {"F3_1": 80, "F3_2": 60, "B3_1": -100, "B3_2": -100, "DC3_1": 0, "DC3_2": 0} + region_dict["arcs"] = [("F3_1","DC3_1"), ("F3_2","DC3_2"), ("DC3_1","B3_1"), + ("DC3_2","B3_2"), ("DC3_1","DC3_2"), ("DC3_2","DC3_1")] + + region_dict["production costs"] = {"F3_1": 50, "F3_2": 50} + region_dict["revenues"] = {"B3_1": 900, "B3_2": 700} + region_dict["flow capacities"] = {("F3_1","DC3_1"): 80, ("F3_2","DC3_2"): 60, ("DC3_1","B3_1"): 100, + ("DC3_2","B3_2"): 100, ("DC3_1","DC3_2"): 70, ("DC3_2","DC3_1"): 50} + region_dict["flow costs"] = {("F3_1","DC3_1"): 100, ("F3_2","DC3_2"): 100, ("DC3_1","B3_1"): 201, + ("DC3_2","B3_2"): 200, ("DC3_1","DC3_2"): 100, ("DC3_2","DC3_1"): 100} + + elif scenario_name == "Region4": + # Creates data for Region4 + region_dict={"name": "Region4"} + region_dict["nodes"] = ["F4_1", "F4_2", "DC4", "B4_1", "B4_2"] + region_dict["factory nodes"] = ["F4_1","F4_2"] + region_dict["buyer nodes"] = ["B4_1","B4_2"] + region_dict["distribution center nodes"] = ["DC4"] + region_dict["supply"] = {"F4_1": 200, "F4_2": 30, "B4_1": -100, "B4_2": -100, "DC4": 0} + region_dict["arcs"] = [("F4_1","DC4"), ("F4_2","DC4"), ("DC4","B4_1"), ("DC4","B4_2")] + + region_dict["production costs"] = {"F4_1": 50, "F4_2": 50} + region_dict["revenues"] = {"B4_1": 900, "B4_2": 700} + region_dict["flow capacities"] = {("F4_1","DC4"): 80, ("F4_2","DC4"): 60, ("DC4","B4_1"): 100, ("DC4","B4_2"): 100} + region_dict["flow costs"] = {("F4_1","DC4"): 100, ("F4_2","DC4"): 80, ("DC4","B4_1"): 90, ("DC4","B4_2"): 70} + + else: + raise RuntimeError (f"unknown Region name {scenario_name}") + + assert _is_partition(region_dict["nodes"], region_dict["factory nodes"], region_dict["buyer nodes"], region_dict["distribution center nodes"]) + + return region_dict + + +###Creates the model when local_dict is given +def min_cost_distr_problem(local_dict, sense=pyo.minimize): + """ Create an arcs formulation of network flow + + Args: + local_dict (dict): dictionary representing a region including the dummy nodes \n + sense (=pyo.minimize): we aim to minimize the cost, this should always be minimize + + Returns: + model (Pyomo ConcreteModel) : the instantiated model + """ + # Assert sense == pyo.minimize, "sense should be equal to pyo.minimize" + # First, make the special In, Out arc lists for each node + arcsout = {n: list() for n in local_dict["nodes"]} + arcsin = {n: list() for n in local_dict["nodes"]} + for a in local_dict["arcs"]: + arcsout[a[0]].append(a) + arcsin[a[1]].append(a) + + model = pyo.ConcreteModel(name='MinCostFlowArcs') + def flowBounds_rule(model, i,j): + return (0, local_dict["flow capacities"][(i,j)]) + model.flow = pyo.Var(local_dict["arcs"], bounds=flowBounds_rule) # x + + def slackBounds_rule(model, n): + if n in local_dict["factory nodes"]: + return (0, local_dict["supply"][n]) + elif n in local_dict["buyer nodes"]: + return (local_dict["supply"][n], 0) + elif n in local_dict["dummy nodes source"]: + return (0,local_dict["dummy nodes source slack bounds"][n]) + elif n in local_dict["dummy nodes target"]: #this slack will respect the opposite flow balance rule + return (0,local_dict["dummy nodes target slack bounds"][n]) + elif n in local_dict["distribution center nodes"]: + return (0,0) + else: + raise ValueError(f"unknown node type for node {n}") + + model.y = pyo.Var(local_dict["nodes"], bounds=slackBounds_rule) + + model.MinCost = pyo.Objective(expr=\ + sum(local_dict["flow costs"][a]*model.flow[a] for a in local_dict["arcs"]) \ + + sum(local_dict["production costs"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["factory nodes"]) \ + + sum(local_dict["revenues"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["buyer nodes"]) , + sense=sense) + + def FlowBalance_rule(m, n): + #we change the definition of the slack for target dummy nodes so that we have the slack from the source and from the target equal + if n in local_dict["dummy nodes target"]: + return sum(m.flow[a] for a in arcsout[n])\ + - sum(m.flow[a] for a in arcsin[n])\ + - m.y[n] == local_dict["supply"][n] + else: + return sum(m.flow[a] for a in arcsout[n])\ + - sum(m.flow[a] for a in arcsin[n])\ + + m.y[n] == local_dict["supply"][n] + model.FlowBalance= pyo.Constraint(local_dict["nodes"], rule=FlowBalance_rule) + + return model + + +###Creates the scenario +def scenario_creator(scenario_name, num_scens=None): + """Creates the model, which should include the consensus variables. \n + However, this function shouldn't attach the consensus variables to root nodes, as it is done in admm_ph. + + Args: + scenario_name (str): the name of the scenario that will be created. Here is of the shape f"Region{i}" with 1<=i<=num_scens \n + num_scens (int): number of scenarios (regions). Useful to create the corresponding inter-region dictionary + + Returns: + Pyomo ConcreteModel: the instantiated model + """ + assert (num_scens is not None) + inter_region_dict = inter_region_dict_creator(num_scens) + region_dict = region_dict_creator(scenario_name) + # Adding dummy nodes and associated features + local_dict = dummy_nodes_generator(region_dict, inter_region_dict) + # Generating the model + model = min_cost_distr_problem(local_dict) + + varlist = list() + sputils.attach_root_node(model, model.MinCost, varlist) + + return model + + +###Functions required in other files, which constructions are specific to the problem + +def scenario_denouement(rank, scenario_name, scenario): + """for each scenario prints its name and the final variable values + + Args: + rank (int): not used here, but could be helpful to know the location + scenario_name (str): name of the scenario + scenario (Pyomo ConcreteModel): the instantiated model + """ + print(f"flow values for {scenario_name}") + scenario.flow.pprint() + print(f"slack values for {scenario_name}") + scenario.y.pprint() + + +def consensus_vars_creator(num_scens): + """The following function creates the consensus_vars dictionary thanks to the inter-region dictionary. \n + This dictionary has redundant information, but is useful for admm_ph. + + Args: + num_scens (int): select the number of scenarios (regions) wanted + + Returns: + dict: dictionary which keys are the regions and values are the list of consensus variables + present in the region + """ + # Due to the small size of inter_region_dict, it is not given as argument but rather created. + inter_region_dict = inter_region_dict_creator(num_scens) + consensus_vars = {} + for arc in inter_region_dict["arcs"]: + source,target = arc + region_source,node_source = source + region_target,node_target = target + dummy_node = node_source + node_target + vstr = f"y[{dummy_node}]" #variable name as string, y is the slack + + #adds dummy_node in the source region + if not region_source in consensus_vars: #initiates consensus_vars[region_source] + consensus_vars[region_source] = list() + consensus_vars[region_source].append(vstr) + + #adds dummy_node in the target region + if not region_target in consensus_vars: #initiates consensus_vars[region_target] + consensus_vars[region_target] = list() + consensus_vars[region_target].append(vstr) + return consensus_vars + + +def scenario_names_creator(num_scens): + """Creates the name of every scenario. + + Args: + num_scens (int): number of scenarios + + Returns: + list (str): the list of names + """ + return [f"Region{i+1}" for i in range(num_scens)] + + +def kw_creator(cfg): + """ + Args: + cfg (config): specifications for the problem. We only look at the number of scenarios + + Returns: + dict (str): the kwargs that are used in distr.scenario_creator, here {"num_scens": num_scens} + """ + kwargs = {"num_scens" : cfg.get('num_scens', None), + } + if not kwargs["num_scens"] in [2, 3, 4]: + RuntimeError (f"unexpected number of regions {cfg.num_scens}, whould be in [2, 3, 4]") + return kwargs + + +def inparser_adder(cfg): + #requires the user to give the number of scenarios + cfg.num_scens_required() \ No newline at end of file diff --git a/mpisppy/tests/test_admm_ph.py b/mpisppy/tests/test_admm_ph.py new file mode 100644 index 000000000..613828543 --- /dev/null +++ b/mpisppy/tests/test_admm_ph.py @@ -0,0 +1,75 @@ +import unittest +import mpisppy.utils.admm_ph as admm_ph +import examples.distr as distr +from mpisppy.utils import config +from mpisppy import MPI + +class TestADMMPH(unittest.TestCase): + def setUp(self): + pass + + def tearDown(self): + pass + + def _cfg_creator(self, num_scens): + cfg = config.Config() + + cfg.num_scens_required() + cfg.num_scens = num_scens + return cfg + + + def _make_admm(self, num_scens,verbose=False): + cfg = self._cfg_creator(num_scens) + options = {} + all_scenario_names = distr.scenario_names_creator(num_scens=cfg.num_scens) + scenario_creator = distr.scenario_creator + scenario_creator_kwargs = distr.kw_creator(cfg) + consensus_vars = distr.consensus_vars_creator(cfg.num_scens) + n_cylinders = 1 #distr_admm_cylinders._count_cylinders(cfg) + return admm_ph.ADMM_PH(options, + all_scenario_names, + scenario_creator, + consensus_vars, + n_cylinders=n_cylinders, + mpicomm=MPI.COMM_WORLD, + scenario_creator_kwargs=scenario_creator_kwargs, + verbose=verbose, + ) + + def test_constructor(self): + self._make_admm(2,verbose=True) + for i in range(3,5): + self._make_admm(i) + + def test_variable_probability(self): + admm = self._make_admm(3) + q = dict() + for sname, s in admm.local_scenarios.items(): + q[sname] = admm.var_prob_list_fct(s) + self.assertEqual(q["Region1"][0][1], 0.5) + self.assertEqual(q["Region3"][0][1], 0) + + def test_admm_ph_scenario_creator(self): + admm = self._make_admm(3) + sname = "Region3" + q = admm.admm_ph_scenario_creator(sname) + self.assertTrue(q.y__DC1DC2__.is_fixed()) + self.assertFalse(q.y["DC3_1DC1"].is_fixed()) + + def _slack_name(self, dummy_node): + return f"y[{dummy_node}]" + + def test_assign_variable_probs_error1(self): + admm = self._make_admm(3) + admm.consensus_vars["Region1"].append(self._slack_name("DC2DC3")) + self.assertRaises(RuntimeError, admm.assign_variable_probs, admm) + + def test_assign_variable_probs_error2(self): + admm = self._make_admm(3) + admm.consensus_vars["Region1"].remove(self._slack_name("DC3_1DC1")) + self.assertRaises(RuntimeError, admm.assign_variable_probs, admm) + + +if __name__ == '__main__': + unittest.main() diff --git a/mpisppy/utils/admm_ph.py b/mpisppy/utils/admm_ph.py new file mode 100644 index 000000000..2174a02ac --- /dev/null +++ b/mpisppy/utils/admm_ph.py @@ -0,0 +1,162 @@ +#creating the class admm_ph +import mpisppy.utils.sputils as sputils +import pyomo.environ as pyo +import mpisppy + + +def _consensus_vars_number_creator(consensus_vars): + """associates to each consensus vars the number of time it appears + + Args: + consensus_vars (dict): dictionary which keys are the subproblems and values are the list of consensus variables + present in the subproblem + + Returns: + consensus_vars_number (dict): dictionary whose keys are the consensus variables + and values are the number of subproblems the variable is linked to. + """ + consensus_vars_number={} + for subproblem in consensus_vars: + for var in consensus_vars[subproblem]: + if not var in consensus_vars_number: # instanciates consensus_vars_number[var] + consensus_vars_number[var] = 0 + consensus_vars_number[var] += 1 + for var in consensus_vars_number: + if consensus_vars_number[var] == 1: + print(f"The consensus variable {var} appears in a single subproblem") + return consensus_vars_number + +class ADMM_PH(): + """ Defines an interface to all strata (hubs and spokes) + + Args: + options (dict): options + all_scenario_names (list): all scenario names + scenario_creator (fct): returns a concrete model with special things + consensus_vars (dict): dictionary which keys are the subproblems and values are the list of consensus variables + present in the subproblem + n_cylinder (int): number of cylinders that will ultimately be used + mpicomm (MPI comm): creates communication + scenario_creator_kwargs (dict): kwargs passed directly to scenario_creator. + verbose (boolean): if True gives extra debugging information + + Attributes: + local_scenarios (dict of scenario objects): concrete models with + extra data, key is name + local_scenario_names (list): names of locals + """ + def __init__(self, + options, + all_scenario_names, + scenario_creator, #supplied by the user/ modeller, used only here + consensus_vars, + n_cylinders, + mpicomm, + scenario_creator_kwargs=None, + verbose=None, + ): + assert len(options) == 0, "no options supported by admm_ph" + # We need local_scenarios + self.local_scenarios = {} + scenario_tree = sputils._ScenTree(["ROOT"], all_scenario_names) + assert mpicomm.Get_size() % n_cylinders == 0, \ + f"{mpicomm.Get_size()=} and {n_cylinders=}, but {mpicomm.Get_size() % n_cylinders=} should be 0" + ranks_per_cylinder = mpicomm.Get_size() // n_cylinders + + scenario_names_to_rank, _rank_slices, _scenario_slices =\ + scenario_tree.scen_names_to_ranks(ranks_per_cylinder) + + cylinder_rank = mpicomm.Get_rank() % ranks_per_cylinder + + #taken from spbase + self.local_scenario_names = [ + all_scenario_names[i] for i in _rank_slices[cylinder_rank] + ] + for sname in self.local_scenario_names: + s = scenario_creator(sname, **scenario_creator_kwargs) + self.local_scenarios[sname] = s + #we are not collecting instantiation time + + self.consensus_vars = consensus_vars + self.verbose = verbose + self.consensus_vars_number = _consensus_vars_number_creator(consensus_vars) + #check_consensus_vars(consensus_vars) + self.assign_variable_probs(verbose=self.verbose) + self.number_of_scenario = len(all_scenario_names) + + def var_prob_list_fct(self, s): + """Associates probabilities to variables and raises exceptions if the model doesn't match the dictionary consensus_vars + + Args: + s (Pyomo ConcreteModel): scenario + + Returns: + list: list of pairs (variables id (int), probabilities (float)). The order of variables is invariant with the scenarios. + If the consensus variable is present in the scenario it is associated with a probability 1/#subproblem + where it is present. Otherwise it has a probability 0. + """ + return self.varprob_dict[s] + + def assign_variable_probs(self, verbose=False): + self.varprob_dict = {} + + #we collect the consensus variables + allvars_list = list() + for sname,s in self.local_scenarios.items(): + for var in self.consensus_vars[sname]: + if not var in allvars_list: + allvars_list.append(var) + error_list1 = [] + error_list2 = [] + for sname,s in self.local_scenarios.items(): + if verbose: + print(f"admm_ph.assign_variable_probs is processing scenario: {sname}") + varlist = list() + self.varprob_dict[s] = list() + for vstr in allvars_list: + v = s.find_component(vstr) + if vstr in self.consensus_vars[sname]: + if v is not None: + #variables that should be on the model + self.varprob_dict[s].append((id(v),1/(self.consensus_vars_number[vstr]))) + else: + error_list1.append((sname,vstr)) + else: + if v is None: + # This var will not be indexed but that might not matter?? + #Going to replace the brackets + v2str = vstr.replace("[","__").replace("]","__") # + v = pyo.Var() + setattr(s, v2str, v) + v.fix(0) + self.varprob_dict[s].append((id(v),0)) + else: + error_list2.append((sname,vstr)) + if v is not None: #if None the error is trapped earlier + varlist.append(v) + + objfunc = sputils.find_active_objective(s) + #this will overwrite the nonants already there + sputils.attach_root_node(s, objfunc, varlist) + + if len(error_list1) + len(error_list2) > 0: + raise RuntimeError (f"for each pair (scenario, variable) of the following list, the variable appears" + f"in consensus_vars, but not in the model:\n {error_list1} \n" + f"for each pair (scenario, variable) of the following list, the variable appears " + f"in the model, but not in consensus var: \n {error_list2}") + + + def admm_ph_scenario_creator(self, sname): + #this is the function the user will supply for all cylinders + scenario = self.local_scenarios[sname] + + # Grabs the objective function and multiplies its value by the number of scenarios to compensate for the probabilities + objectives = scenario.component_objects(pyo.Objective, active=True) + count = 0 + for obj in objectives: + count += 1 + assert count == 1, f"only one objective function is authorized, there are {count}" + obj.expr = obj.expr * self.number_of_scenario + + return scenario + \ No newline at end of file From 7cb04c6ede4c49ac78c6d85259b1531609412573 Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Thu, 9 May 2024 09:18:19 -0700 Subject: [PATCH 05/31] modifying documentation --- doc/src/admm_ph.rst | 27 +++++++++++----------- doc/src/helper_functions.rst | 45 ++++++++++++++++++++++++++++++++++++ doc/src/index.rst | 1 + examples/distr/distr.py | 6 ++--- 4 files changed, 63 insertions(+), 16 deletions(-) create mode 100644 doc/src/helper_functions.rst diff --git a/doc/src/admm_ph.rst b/doc/src/admm_ph.rst index 916e8f739..729fc621a 100644 --- a/doc/src/admm_ph.rst +++ b/doc/src/admm_ph.rst @@ -19,7 +19,7 @@ object which will solve the subproblems in a parallel way, insuring that merging Data needed in the driver +++++++++++++++++++++++++ -The driver file requires a function which creates the model for each scenario. +The driver file requires the `scenario_creator function `_ which creates the model for each scenario. .. py:function:: scenario_creator(scenario_name) @@ -32,14 +32,14 @@ The driver file requires a function which creates the model for each scenario. Returns: Pyomo ConcreteModel: the instantiated model -The driver file also requires helper arguments that are used in mpi-sppy. Here is a summary: -.. TBD add hyperlink on precedent line +The driver file also requires helper arguments that are used in mpi-sppy. They are detailed `in helper_functions `_. +Here is a summary: * ``scenario_creator_kwargs``(dict[str]): key words arguments needed in ``scenario_creator`` * ``all_scenario_names`` (list of str): the subproblem names -* A function that prints the results once the problem is solved +* A function that is called at termination in some modules (e.g. PH) .. py:function:: scenario_denouement Args: @@ -109,7 +109,7 @@ Distribution example ``distr.py`` is an example of model creator in admm_ph for a (linear) inter-region minimal cost distribution problem. ``distr_admm_cylinders.py`` is the driver. -Original data dictionnaries +Original data dictionaries +++++++++++++++++++++++++++ In the example the ``inter_region_dict_creator`` creates the inter-region information. @@ -137,8 +137,7 @@ The purpose of ``distr.dummy_nodes_generator`` is to do that. .. note:: - In our example we have chosen to split equally among the regions - the cost of transport from a region source to a region target equally. + In the example the cost of transport is chosen to be split equally in the region source and the region target. We here represent the flow problem with a directed graph. If a flow from DC2 to DC1 were to be authorized we would create a dummy node DC2DC1. @@ -149,7 +148,8 @@ Once the local_dict is created, the Pyomo model can be created thanks to ``min_c Transforming data for the driver ++++++++++++++++++++++++++++++++ -The driver requires three essential elements: ``all_scenario_names``, ``scenario_creator`` and ``consensus_vars``. +The driver requires five elements given by the model: ``all_scenario_names``, ``scenario_creator``, ``scenario_creator_kwargs``, +``inparser_adder`` and ``consensus_vars``. ``all_scenario_names`` (see above) is given by ``scenario_names_creator`` @@ -157,16 +157,17 @@ The driver requires three essential elements: ``all_scenario_names``, ``scenario .. autofunction:: distr.scenario_creator -The function ``consensus_vars_creator`` creates the required ``consensus_vars`` dictionary. - -.. autofunction:: distr.consensus_vars_creator - The dictionary ``scenario_creator_kwargs`` is created with .. autofunction:: distr.kw_creator -The function ``inparser_adder`` requires the user to give ``num_scens`` during the config. +The function ``inparser_adder`` requires the user to give ``num_scens`` (the number of regions) during the configuration. +.. autofunction:: distr.inparser_adder +Contrary to the other helper functions, ``consensus_vars_creator`` is specific to admm_ph. +The function ``consensus_vars_creator`` creates the required ``consensus_vars`` dictionary. + +.. autofunction:: distr.consensus_vars_creator Non local solvers diff --git a/doc/src/helper_functions.rst b/doc/src/helper_functions.rst new file mode 100644 index 000000000..af0d3896b --- /dev/null +++ b/doc/src/helper_functions.rst @@ -0,0 +1,45 @@ +.. _helper_functions: + +Helper functions in the model file +================================== + +The `scenario_creator function `_ is required but some modules also need helper functions in the model file. + +All these functions can be found in the example ``examples.distr.distr.py`` and are documented in `the admm ph documentation `_ + +.. py:function:: kw_creator(cfg) + + Args: + cfg (config object): specifications for the problem + + Returns: + dict (str): the dictionary of key word arguments that is used in ``scenario_creator`` + + +.. py:function:: scenario_names_creator(num_scens, start=0) + + Creates the name of the ``num_scens`` scenarios starting from ``start`` + + Args: + num_scens (int): number of scenarios + start (int): starting index for the names + + Returns: + list (str): the list of names + +.. py:function:: inparser_adder(cfg) + + Adds arguments to the config object which are specific to the problem + + Args: + cfg (config object): specifications for the problem given in the command line + +Some modules (e.g. PH) call this function at termination +.. py:function:: scenario_denouement + + Args: + rank (int): rank in the cylinder + + scenario_name (str): name of the scenario + + scenario (Pyomo ConcreteModel): the instantiated model diff --git a/doc/src/index.rst b/doc/src/index.rst index 7011e7a7b..31595de87 100644 --- a/doc/src/index.rst +++ b/doc/src/index.rst @@ -32,6 +32,7 @@ MPI is used. grad_rho.rst w_rho.rst admm_ph.rst + helper_functions.rst contributors.rst internals.rst api.rst diff --git a/examples/distr/distr.py b/examples/distr/distr.py index c708849c7..c9733d808 100644 --- a/examples/distr/distr.py +++ b/examples/distr/distr.py @@ -139,7 +139,7 @@ def region_dict_creator(scenario_name): """ Create a scenario for the inter-region max profit distribution example. The convention for node names is: - Symbol + number of the region (+ _ + number of the example if needed), \n + Symbol + number of the region (+ _ + number of the example if needed), with symbols DC for distribution centers, F for factory nodes, B for buyer nodes. \n For instance: F3_1 is the 1st factory node of region 3. \n @@ -149,7 +149,7 @@ def region_dict_creator(scenario_name): Returns: region_dict (dict): contains all the information in the given region to create the model. It is composed of:\n - "nodes" (list of str): all the nodes. The following subsets are also nodes: \n + "nodes" (list of str): all the nodes. The following subsets are also nodes: "factory nodes", "buyer nodes", "distribution center nodes", \n "arcs" (list of 2 tuples of str) : (node, node) pairs\n "supply" (dict[n] of float): supply; keys are nodes (negative for demand)\n @@ -397,6 +397,6 @@ def kw_creator(cfg): return kwargs -def inparser_adder(cfg): +def rser_adder(cfg): #requires the user to give the number of scenarios cfg.num_scens_required() \ No newline at end of file From 6a8bb54a7836bd02fafdecc8a449f7a23e0e2f65 Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Thu, 9 May 2024 09:37:50 -0700 Subject: [PATCH 06/31] modifying documentation --- doc/src/admm_ph.rst | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/doc/src/admm_ph.rst b/doc/src/admm_ph.rst index 729fc621a..80af02108 100644 --- a/doc/src/admm_ph.rst +++ b/doc/src/admm_ph.rst @@ -69,26 +69,13 @@ It should be provided in ``consensus_vars``. Every variable in ``consensus_vars[subproblem]`` should also appear as a variable of the subproblem. -Solving methods in config -++++++++++++++++++++++++++++++ +Using the config system ++++++++++++++++++++++++ In addition to the previously presented data, the driver also requires arguments to create the PH Model and solve it. These arguments are asked to the user via config. .. TBD add external link on precedent line -The user must specify the solver name with ``--solver-name (str)``. - -The number of cylinders used for progressive hedging must be specified with ``-np (int)``. - -If the config argument ``run_async`` is used, the method used will be asynchronous projective hedging instead of ph. - -.. In the following line add link to config page -The other arguments are detailed in config. They include: -* ``config.popular_args()``: the popular arguments, note that ``default_rho`` is needed -* ``xhatxbar``, ``lagrangian``, ``ph_ob``, ``fwph`` each of them create a new cylinder -* ``two_sided_args`` these optionnal arguments include give stopping conditions: ``rel_gap``, ``abs_gap`` for -relative or absolute termination gap and ``max_stalled_iters`` for maximum iterations with no reduction in gap -* ``ph_args`` and ``aph_args`` which are specific arguments required when using PH or APH Direct solver of the extensive form +++++++++++++++++++++++++++++++++++ @@ -169,6 +156,18 @@ The function ``consensus_vars_creator`` creates the required ``consensus_vars`` .. autofunction:: distr.consensus_vars_creator +Constructing the driver ++++++++++++++++++++++++ + +If the config argument ``run_async`` is used, the method used will be asynchronous projective hedging instead of ph. + +.. In the following line add link to config page +The other arguments are detailed in config. They include: +* ``config.popular_args()``: the popular arguments, note that ``default_rho`` is needed +* ``xhatxbar``, ``lagrangian``, ``ph_ob``, ``fwph`` each of them create a new cylinder +* ``two_sided_args`` these optionnal arguments include give stopping conditions: ``rel_gap``, ``abs_gap`` for +relative or absolute termination gap and ``max_stalled_iters`` for maximum iterations with no reduction in gap +* ``ph_args`` and ``aph_args`` which are specific arguments required when using PH or APH Non local solvers +++++++++++++++++ From a2e2cec7a73a868421bdb5f3a1a91e3dc6b8ff5d Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Thu, 9 May 2024 10:49:55 -0700 Subject: [PATCH 07/31] mainly modifying documentation --- doc/src/admm_ph.rst | 15 +++++++++++++-- doc/src/helper_functions.rst | 3 ++- examples/distr/distr.py | 4 ++-- examples/distr/distr_admm_cylinders.py | 1 - mpisppy/tests/test_admm_ph.py | 6 ++++++ 5 files changed, 23 insertions(+), 6 deletions(-) diff --git a/doc/src/admm_ph.rst b/doc/src/admm_ph.rst index 80af02108..de1d25c1d 100644 --- a/doc/src/admm_ph.rst +++ b/doc/src/admm_ph.rst @@ -133,6 +133,7 @@ Once the local_dict is created, the Pyomo model can be created thanks to ``min_c .. autofunction:: distr.min_cost_distr_problem +.. _sectiondatafordriver: Transforming data for the driver ++++++++++++++++++++++++++++++++ The driver requires five elements given by the model: ``all_scenario_names``, ``scenario_creator``, ``scenario_creator_kwargs``, @@ -159,6 +160,7 @@ The function ``consensus_vars_creator`` creates the required ``consensus_vars`` Constructing the driver +++++++++++++++++++++++ + If the config argument ``run_async`` is used, the method used will be asynchronous projective hedging instead of ph. .. In the following line add link to config page @@ -168,6 +170,13 @@ The other arguments are detailed in config. They include: * ``two_sided_args`` these optionnal arguments include give stopping conditions: ``rel_gap``, ``abs_gap`` for relative or absolute termination gap and ``max_stalled_iters`` for maximum iterations with no reduction in gap * ``ph_args`` and ``aph_args`` which are specific arguments required when using PH or APH +* ``tracking_args``: if required, prints additional information from PH on the screen + +.. note:: + + Some methods, such as ``run_async`` and all the methods creating new cylinders + not only need to be added in the ``_parse_args()`` method, but also need to be called later in the driver. + Non local solvers +++++++++++++++++ @@ -176,14 +185,16 @@ The file ``globalmodel.py`` and ``distr_ef.py`` are used for debugging. They don problem without decomposition. * ``globalmodel.py`` + In ``globalmodel.py``, ``global_dict_creator`` merges the data into a global dictionary - thanks to the inter-region dictionary, without creating dummy nodes. It then creates the model (as if there was - only one region without dummy nodes) and solves it. + thanks to the inter-region dictionary, without creating dummy nodes. Then model is created (as if there was + only one region without dummy nodes) and solved. However this file strongly depends on the structure of the problem and doesn't decompose the problems. It may be difficultly adpated and inefficient. * ``distr_ef.py`` + As presented previously solves the extensive form. The arguments are the same as ``distr_admm_cylinders.py``, the method doesn't need to be adapted with the model. diff --git a/doc/src/helper_functions.rst b/doc/src/helper_functions.rst index af0d3896b..56c9ff299 100644 --- a/doc/src/helper_functions.rst +++ b/doc/src/helper_functions.rst @@ -5,7 +5,8 @@ Helper functions in the model file The `scenario_creator function `_ is required but some modules also need helper functions in the model file. -All these functions can be found in the example ``examples.distr.distr.py`` and are documented in `the admm ph documentation `_ +All these functions can be found in the example ``examples.distr.distr.py`` and +are documented in `the admm ph documentation `_ .. py:function:: kw_creator(cfg) diff --git a/examples/distr/distr.py b/examples/distr/distr.py index c9733d808..699919a48 100644 --- a/examples/distr/distr.py +++ b/examples/distr/distr.py @@ -393,10 +393,10 @@ def kw_creator(cfg): kwargs = {"num_scens" : cfg.get('num_scens', None), } if not kwargs["num_scens"] in [2, 3, 4]: - RuntimeError (f"unexpected number of regions {cfg.num_scens}, whould be in [2, 3, 4]") + RuntimeError (f"unexpected number of regions {cfg.num_scens}, should be in [2, 3, 4]") return kwargs -def rser_adder(cfg): +def inparser_adder(cfg): #requires the user to give the number of scenarios cfg.num_scens_required() \ No newline at end of file diff --git a/examples/distr/distr_admm_cylinders.py b/examples/distr/distr_admm_cylinders.py index 9ff51a6aa..9a41f1f18 100644 --- a/examples/distr/distr_admm_cylinders.py +++ b/examples/distr/distr_admm_cylinders.py @@ -27,7 +27,6 @@ def _parse_args(): cfg.fwph_args() cfg.lagrangian_args() cfg.ph_ob_args() - cfg.wxbar_read_write_args() cfg.tracking_args() cfg.add_to_config("run_async", description="Run with async projective hedging instead of progressive hedging", diff --git a/mpisppy/tests/test_admm_ph.py b/mpisppy/tests/test_admm_ph.py index 613828543..5d3230a16 100644 --- a/mpisppy/tests/test_admm_ph.py +++ b/mpisppy/tests/test_admm_ph.py @@ -61,11 +61,17 @@ def _slack_name(self, dummy_node): return f"y[{dummy_node}]" def test_assign_variable_probs_error1(self): + print ("**************** test_assign_variable_probs_error1 ********************") + print("Pyomo warning is expected") + admm = self._make_admm(3) admm.consensus_vars["Region1"].append(self._slack_name("DC2DC3")) self.assertRaises(RuntimeError, admm.assign_variable_probs, admm) def test_assign_variable_probs_error2(self): + print ("**************** test_assign_variable_probs_error2 ********************") + print("Pyomo warning is expected") + admm = self._make_admm(3) admm.consensus_vars["Region1"].remove(self._slack_name("DC3_1DC1")) self.assertRaises(RuntimeError, admm.assign_variable_probs, admm) From 3b38a0c730a3991aad1b5d95ef87a181faf452d1 Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Thu, 9 May 2024 15:02:28 -0700 Subject: [PATCH 08/31] avoiding the warning --- mpisppy/tests/test_admm_ph.py | 12 +++--------- mpisppy/utils/admm_ph.py | 13 ++++++++----- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/mpisppy/tests/test_admm_ph.py b/mpisppy/tests/test_admm_ph.py index 5d3230a16..aa16a337b 100644 --- a/mpisppy/tests/test_admm_ph.py +++ b/mpisppy/tests/test_admm_ph.py @@ -42,7 +42,7 @@ def test_constructor(self): for i in range(3,5): self._make_admm(i) - def test_variable_probability(self): + def test_variable_probability(self): admm = self._make_admm(3) q = dict() for sname, s in admm.local_scenarios.items(): @@ -61,20 +61,14 @@ def _slack_name(self, dummy_node): return f"y[{dummy_node}]" def test_assign_variable_probs_error1(self): - print ("**************** test_assign_variable_probs_error1 ********************") - print("Pyomo warning is expected") - admm = self._make_admm(3) admm.consensus_vars["Region1"].append(self._slack_name("DC2DC3")) - self.assertRaises(RuntimeError, admm.assign_variable_probs, admm) + self.assertRaises(RuntimeError, admm.assign_variable_probs) def test_assign_variable_probs_error2(self): - print ("**************** test_assign_variable_probs_error2 ********************") - print("Pyomo warning is expected") - admm = self._make_admm(3) admm.consensus_vars["Region1"].remove(self._slack_name("DC3_1DC1")) - self.assertRaises(RuntimeError, admm.assign_variable_probs, admm) + self.assertRaises(RuntimeError, admm.assign_variable_probs) if __name__ == '__main__': diff --git a/mpisppy/utils/admm_ph.py b/mpisppy/utils/admm_ph.py index 2174a02ac..7f1ebbc2c 100644 --- a/mpisppy/utils/admm_ph.py +++ b/mpisppy/utils/admm_ph.py @@ -2,7 +2,7 @@ import mpisppy.utils.sputils as sputils import pyomo.environ as pyo import mpisppy - +import warnings def _consensus_vars_number_creator(consensus_vars): """associates to each consensus vars the number of time it appears @@ -124,17 +124,20 @@ def assign_variable_probs(self, verbose=False): else: if v is None: # This var will not be indexed but that might not matter?? - #Going to replace the brackets - v2str = vstr.replace("[","__").replace("]","__") # + # Going to replace the brackets + v2str = vstr.replace("[","__").replace("]","__") # To distinguish the consensus_vars fixed at 0 v = pyo.Var() - setattr(s, v2str, v) + + ### Lines equivalent to setattr(s, v2str, v) without warning + s.del_component(v2str) + s.add_component(v2str, v) + v.fix(0) self.varprob_dict[s].append((id(v),0)) else: error_list2.append((sname,vstr)) if v is not None: #if None the error is trapped earlier varlist.append(v) - objfunc = sputils.find_active_objective(s) #this will overwrite the nonants already there sputils.attach_root_node(s, objfunc, varlist) From 82c2f09531d7ad556a40f7c7024d1c90b63e1527 Mon Sep 17 00:00:00 2001 From: David L Woodruff Date: Thu, 16 May 2024 14:50:29 -0700 Subject: [PATCH 09/31] Add a comment to globalmodel.py --- examples/distr/globalmodel.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/distr/globalmodel.py b/examples/distr/globalmodel.py index 2b4caf5a9..e85a675ad 100644 --- a/examples/distr/globalmodel.py +++ b/examples/distr/globalmodel.py @@ -1,3 +1,4 @@ +# Distribution example without decomposition import pyomo.environ as pyo import distr from mpisppy.utils import config @@ -131,4 +132,4 @@ def main(): if __name__ == "__main__": - main() \ No newline at end of file + main() From e0eeb20f14706326ef5c5b1eb9a19cfd6ac9df13 Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Thu, 16 May 2024 16:03:26 -0700 Subject: [PATCH 10/31] correcting varlist creation --- mpisppy/utils/admm_ph.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/mpisppy/utils/admm_ph.py b/mpisppy/utils/admm_ph.py index 7f1ebbc2c..d483a4e67 100644 --- a/mpisppy/utils/admm_ph.py +++ b/mpisppy/utils/admm_ph.py @@ -2,7 +2,6 @@ import mpisppy.utils.sputils as sputils import pyomo.environ as pyo import mpisppy -import warnings def _consensus_vars_number_creator(consensus_vars): """associates to each consensus vars the number of time it appears @@ -66,11 +65,11 @@ def __init__(self, scenario_names_to_rank, _rank_slices, _scenario_slices =\ scenario_tree.scen_names_to_ranks(ranks_per_cylinder) - cylinder_rank = mpicomm.Get_rank() % ranks_per_cylinder - + self.cylinder_rank = mpicomm.Get_rank() % ranks_per_cylinder + self.all_scenario_names = all_scenario_names #taken from spbase self.local_scenario_names = [ - all_scenario_names[i] for i in _rank_slices[cylinder_rank] + all_scenario_names[i] for i in _rank_slices[self.cylinder_rank] ] for sname in self.local_scenario_names: s = scenario_creator(sname, **scenario_creator_kwargs) @@ -102,7 +101,7 @@ def assign_variable_probs(self, verbose=False): #we collect the consensus variables allvars_list = list() - for sname,s in self.local_scenarios.items(): + for sname in self.all_scenario_names: for var in self.consensus_vars[sname]: if not var in allvars_list: allvars_list.append(var) From d82f7652e283c08e2cb0c6cb426c32d60a8d089b Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Fri, 17 May 2024 10:23:19 -0700 Subject: [PATCH 11/31] Replacing the formula calculating cylinder ranks. Seems to work with several ranks per cylinder and multiple cylinders --- mpisppy/utils/admm_ph.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/mpisppy/utils/admm_ph.py b/mpisppy/utils/admm_ph.py index d483a4e67..f207ea2e3 100644 --- a/mpisppy/utils/admm_ph.py +++ b/mpisppy/utils/admm_ph.py @@ -2,6 +2,8 @@ import mpisppy.utils.sputils as sputils import pyomo.environ as pyo import mpisppy +from mpisppy import MPI +global_rank = MPI.COMM_WORLD.Get_rank() def _consensus_vars_number_creator(consensus_vars): """associates to each consensus vars the number of time it appears @@ -65,7 +67,8 @@ def __init__(self, scenario_names_to_rank, _rank_slices, _scenario_slices =\ scenario_tree.scen_names_to_ranks(ranks_per_cylinder) - self.cylinder_rank = mpicomm.Get_rank() % ranks_per_cylinder + self.cylinder_rank = mpicomm.Get_rank() // n_cylinders + #self.cylinder_rank = mpicomm.Get_rank() % ranks_per_cylinder self.all_scenario_names = all_scenario_names #taken from spbase self.local_scenario_names = [ @@ -149,7 +152,9 @@ def assign_variable_probs(self, verbose=False): def admm_ph_scenario_creator(self, sname): - #this is the function the user will supply for all cylinders + #this is the function the user will supply for all cylinders + assert sname in self.local_scenario_names, f"{global_rank=} {sname=} \n {self.local_scenario_names=}" + #should probably be deleted as it takes time scenario = self.local_scenarios[sname] # Grabs the objective function and multiplies its value by the number of scenarios to compensate for the probabilities From 45b4ed53b373ff6460f0e96504d55cbb74ed51ca Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Mon, 20 May 2024 16:53:34 -0700 Subject: [PATCH 12/31] Allowing the spokes xhatxbar and ph_ob to use variable probability and deleting it in the other spokes --- examples/distr/distr_admm_cylinders.py | 8 +++----- mpisppy/utils/cfg_vanilla.py | 10 +++++++--- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/examples/distr/distr_admm_cylinders.py b/examples/distr/distr_admm_cylinders.py index 9a41f1f18..5dfdae840 100644 --- a/examples/distr/distr_admm_cylinders.py +++ b/examples/distr/distr_admm_cylinders.py @@ -1,5 +1,3 @@ -# Copyright 2020 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff -# This software is distributed under the 3-clause BSD License. # general example driver for distr with cylinders import mpisppy.utils.admm_ph as admm_ph import distr @@ -99,14 +97,14 @@ def main(): # FWPH spoke if cfg.fwph: + print("May not give the expected result due to variable probability") fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) # Standard Lagrangian bound spoke if cfg.lagrangian: lagrangian_spoke = vanilla.lagrangian_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = None, - variable_probability=variable_probability) + rho_setter = None) # ph outer bounder spoke @@ -142,7 +140,7 @@ def main(): wheel.write_tree_solution('distr_full_solution') if global_rank == 0: - best_objective = wheel.spcomm.BestInnerBound * len(all_scenario_names) + best_objective = wheel.spcomm.BestInnerBound print(f"{best_objective=}") diff --git a/mpisppy/utils/cfg_vanilla.py b/mpisppy/utils/cfg_vanilla.py index cb88bf74d..8db9496bf 100644 --- a/mpisppy/utils/cfg_vanilla.py +++ b/mpisppy/utils/cfg_vanilla.py @@ -512,7 +512,8 @@ def xhatxbar_spoke( all_scenario_names, scenario_creator_kwargs=None, variable_probability=None, - ph_extensions=None + ph_extensions=None, + all_nodenames=None, ): xhatxbar_dict = _Xhat_Eval_spoke_foundation( XhatXbarInnerBound, @@ -522,6 +523,7 @@ def xhatxbar_spoke( all_scenario_names, scenario_creator_kwargs=scenario_creator_kwargs, ph_extensions=ph_extensions, + all_nodenames=all_nodenames, ) xhatxbar_dict["opt_kwargs"]["options"]['bundles_per_rank'] = 0 # no bundles for xhat @@ -702,7 +704,8 @@ def ph_ob_spoke( all_scenario_names, scenario_creator_kwargs=None, rho_setter=None, - all_nodenames = None, + all_nodenames=None, + variable_probability=None, ): shoptions = shared_options(cfg) ph_ob_spoke = { @@ -715,7 +718,8 @@ def ph_ob_spoke( "scenario_creator_kwargs": scenario_creator_kwargs, 'scenario_denouement': scenario_denouement, "rho_setter": rho_setter, - "all_nodenames": all_nodenames + "all_nodenames": all_nodenames, + "variable_probability": variable_probability, } } if cfg.ph_ob_rho_rescale_factors_json is not None: From 553c9695165c01c78027a788f96b9839178f8723 Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Wed, 22 May 2024 15:49:00 -0700 Subject: [PATCH 13/31] deleting fwph from the driver and changing the doc for the driver --- doc/src/admm_ph.rst | 23 ++++++++--------------- examples/distr/distr_admm_cylinders.py | 11 ++--------- 2 files changed, 10 insertions(+), 24 deletions(-) diff --git a/doc/src/admm_ph.rst b/doc/src/admm_ph.rst index de1d25c1d..3828ebae6 100644 --- a/doc/src/admm_ph.rst +++ b/doc/src/admm_ph.rst @@ -72,8 +72,8 @@ It should be provided in ``consensus_vars``. Using the config system +++++++++++++++++++++++ -In addition to the previously presented data, the driver also requires arguments to -create the PH Model and solve it. These arguments are asked to the user via config. +In addition to the previously presented data, the driver also requires arguments tocreate the PH Model and solve it. +Some arguments may be passed to the user via config, but the cylinders need to be added. .. TBD add external link on precedent line @@ -157,24 +157,17 @@ The function ``consensus_vars_creator`` creates the required ``consensus_vars`` .. autofunction:: distr.consensus_vars_creator -Constructing the driver -+++++++++++++++++++++++ - +Understanding the driver +++++++++++++++++++++++++ +In the example the driver gets argument from the command line through the function ``_parse_args`` -If the config argument ``run_async`` is used, the method used will be asynchronous projective hedging instead of ph. +.. py:function:: _parse_args() + Gets argument from the command line and add them to a config argument. Some arguments are required. -.. In the following line add link to config page -The other arguments are detailed in config. They include: -* ``config.popular_args()``: the popular arguments, note that ``default_rho`` is needed -* ``xhatxbar``, ``lagrangian``, ``ph_ob``, ``fwph`` each of them create a new cylinder -* ``two_sided_args`` these optionnal arguments include give stopping conditions: ``rel_gap``, ``abs_gap`` for -relative or absolute termination gap and ``max_stalled_iters`` for maximum iterations with no reduction in gap -* ``ph_args`` and ``aph_args`` which are specific arguments required when using PH or APH -* ``tracking_args``: if required, prints additional information from PH on the screen .. note:: - Some methods, such as ``run_async`` and all the methods creating new cylinders + Some arguments, such as ``cfg.run_async`` and all the methods creating new cylinders not only need to be added in the ``_parse_args()`` method, but also need to be called later in the driver. diff --git a/examples/distr/distr_admm_cylinders.py b/examples/distr/distr_admm_cylinders.py index 5dfdae840..bb7017006 100644 --- a/examples/distr/distr_admm_cylinders.py +++ b/examples/distr/distr_admm_cylinders.py @@ -22,7 +22,6 @@ def _parse_args(): cfg.ph_args() cfg.aph_args() cfg.xhatxbar_args() - cfg.fwph_args() cfg.lagrangian_args() cfg.ph_ob_args() cfg.tracking_args() @@ -37,7 +36,7 @@ def _parse_args(): def _count_cylinders(cfg): count = 1 - cfglist = ["xhatxbar", "lagrangian", "ph_ob", "fwph"] #all the cfg arguments that create a new cylinders + cfglist = ["xhatxbar", "lagrangian", "ph_ob"] #all the cfg arguments that create a new cylinders for cylname in cfglist: if cfg[cylname]: count += 1 @@ -94,11 +93,7 @@ def main(): rho_setter=None, variable_probability=variable_probability) - - # FWPH spoke - if cfg.fwph: - print("May not give the expected result due to variable probability") - fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + # FWPH spoke DOES NOT WORK with variable probability # Standard Lagrangian bound spoke if cfg.lagrangian: @@ -119,8 +114,6 @@ def main(): xhatxbar_spoke = vanilla.xhatxbar_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) list_of_spoke_dict = list() - if cfg.fwph: - list_of_spoke_dict.append(fw_spoke) if cfg.lagrangian: list_of_spoke_dict.append(lagrangian_spoke) if cfg.ph_ob: From 34ad32e803249a714506cf5771d812f77157df10 Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Wed, 22 May 2024 15:57:11 -0700 Subject: [PATCH 14/31] indicating that fwph does not support variable probability --- mpisppy/fwph/fwph.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mpisppy/fwph/fwph.py b/mpisppy/fwph/fwph.py index cf29fbd75..08c5236bc 100644 --- a/mpisppy/fwph/fwph.py +++ b/mpisppy/fwph/fwph.py @@ -64,6 +64,7 @@ def __init__( scenario_creator_kwargs=None, ph_converger=None, rho_setter=None, + variable_probability=None, ): super().__init__( PH_options, @@ -77,8 +78,8 @@ def __init__( extension_kwargs=None, ph_converger=ph_converger, rho_setter=rho_setter, - ) - + ) + assert (variable_probability == None), "variable probability is not allowed with fwph" self._init(FW_options) def _init(self, FW_options): From 00a1c4df38da7d2c588b0cda004c854256aae1a4 Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Fri, 24 May 2024 14:54:02 -0700 Subject: [PATCH 15/31] modifying what had to be modified (including the name in every file) --- .../{testadmm_ph.yml => testadmmWrapper.yml} | 4 +-- doc/src/{admm_ph.rst => admmWrapper.rst} | 20 ++++++------- doc/src/helper_functions.rst | 2 +- doc/src/index.rst | 2 +- examples/distr/distr.py | 4 +-- examples/distr/distr_admm_cylinders.py | 16 +++++----- examples/distr/distr_ef.py | 6 ++-- mpisppy/tests/examples/distr.py | 7 +++-- .../{test_admm_ph.py => test_admmWrapper.py} | 12 ++++---- mpisppy/utils/{admm_ph.py => admmWrapper.py} | 30 ++++++++----------- 10 files changed, 51 insertions(+), 52 deletions(-) rename .github/workflows/{testadmm_ph.yml => testadmmWrapper.yml} (92%) rename doc/src/{admm_ph.rst => admmWrapper.rst} (93%) rename mpisppy/tests/{test_admm_ph.py => test_admmWrapper.py} (89%) rename mpisppy/utils/{admm_ph.py => admmWrapper.py} (89%) diff --git a/.github/workflows/testadmm_ph.yml b/.github/workflows/testadmmWrapper.yml similarity index 92% rename from .github/workflows/testadmm_ph.yml rename to .github/workflows/testadmmWrapper.yml index d0e148791..008cc0138 100644 --- a/.github/workflows/testadmm_ph.yml +++ b/.github/workflows/testadmmWrapper.yml @@ -1,6 +1,6 @@ # aph (pyomo released) -name: admm_ph tests +name: admmWrapper tests on: push: @@ -39,4 +39,4 @@ jobs: run: | cd mpisppy/tests # envall does nothing - python test_admm_ph.py + python test_admmWrapper.py diff --git a/doc/src/admm_ph.rst b/doc/src/admmWrapper.rst similarity index 93% rename from doc/src/admm_ph.rst rename to doc/src/admmWrapper.rst index 3828ebae6..78b249e65 100644 --- a/doc/src/admm_ph.rst +++ b/doc/src/admmWrapper.rst @@ -1,7 +1,7 @@ -.. _admm_ph: +.. _admmWrapper: -ADMM PH -======= +AdmmWrapper +=========== **ADMM_PH** uses progressive hedging implemented in mpi-sppy to solve a non-stochastic problem by breaking them into subproblems. @@ -11,9 +11,9 @@ An example of usage is given below. Usage ----- -The driver (in the example ``distr_admm_cylinders.py``) calls ``admm_ph.py``, +The driver (in the example ``distr_admm_cylinders.py``) calls ``admmWrapper.py``, thanks to the formated data provided by the model (in the example ``distr.py``). -The file ``admm_ph.py`` returns variable probabilities that can be used in the driver to create the PH (or APH) +The file ``admmWrapper.py`` returns variable probabilities that can be used in the driver to create the PH (or APH) object which will solve the subproblems in a parallel way, insuring that merging conditions are respected. Data needed in the driver @@ -80,20 +80,20 @@ Some arguments may be passed to the user via config, but the cylinders need to b Direct solver of the extensive form +++++++++++++++++++++++++++++++++++ ``distr_ef.py`` can be used as a verification or debugging tool for small instances. -It directly solves the extensive form using the wrapper ``scenario_creator`` from ``admm_ph``. +It directly solves the extensive form using the wrapper ``scenario_creator`` from ``admmWrapper``. It has the advantage of requiring the same arguments as ``distr_admm_cylinders`` because both solve the extensive form. -This method offers a verification for small instances, but is slower than ``admm_ph`` +This method offers a verification for small instances, but is slower than ``admmWrapper`` as it doesn't decompose the problem. .. note:: - ``admm_ph`` doesn't collect instanciation time. + ``admmWrapper`` doesn't collect instanciation time. Distribution example -------------------- -``distr.py`` is an example of model creator in admm_ph for a (linear) inter-region minimal cost distribution problem. +``distr.py`` is an example of model creator in admmWrapper for a (linear) inter-region minimal cost distribution problem. ``distr_admm_cylinders.py`` is the driver. Original data dictionaries @@ -152,7 +152,7 @@ The dictionary ``scenario_creator_kwargs`` is created with The function ``inparser_adder`` requires the user to give ``num_scens`` (the number of regions) during the configuration. .. autofunction:: distr.inparser_adder -Contrary to the other helper functions, ``consensus_vars_creator`` is specific to admm_ph. +Contrary to the other helper functions, ``consensus_vars_creator`` is specific to admmWrapper. The function ``consensus_vars_creator`` creates the required ``consensus_vars`` dictionary. .. autofunction:: distr.consensus_vars_creator diff --git a/doc/src/helper_functions.rst b/doc/src/helper_functions.rst index 56c9ff299..38aeeddfa 100644 --- a/doc/src/helper_functions.rst +++ b/doc/src/helper_functions.rst @@ -6,7 +6,7 @@ Helper functions in the model file The `scenario_creator function `_ is required but some modules also need helper functions in the model file. All these functions can be found in the example ``examples.distr.distr.py`` and -are documented in `the admm ph documentation `_ +are documented in `the admm wrapper documentation `_ .. py:function:: kw_creator(cfg) diff --git a/doc/src/index.rst b/doc/src/index.rst index 31595de87..9dfe8c71c 100644 --- a/doc/src/index.rst +++ b/doc/src/index.rst @@ -31,7 +31,7 @@ MPI is used. pickledbundles.rst grad_rho.rst w_rho.rst - admm_ph.rst + admmWrapper.rst helper_functions.rst contributors.rst internals.rst diff --git a/examples/distr/distr.py b/examples/distr/distr.py index 699919a48..a16d3d5dd 100644 --- a/examples/distr/distr.py +++ b/examples/distr/distr.py @@ -298,7 +298,7 @@ def FlowBalance_rule(m, n): ###Creates the scenario def scenario_creator(scenario_name, num_scens=None): """Creates the model, which should include the consensus variables. \n - However, this function shouldn't attach the consensus variables to root nodes, as it is done in admm_ph. + However, this function shouldn't attach the consensus variables to root nodes, as it is done in admmWrapper. Args: scenario_name (str): the name of the scenario that will be created. Here is of the shape f"Region{i}" with 1<=i<=num_scens \n @@ -339,7 +339,7 @@ def scenario_denouement(rank, scenario_name, scenario): def consensus_vars_creator(num_scens): """The following function creates the consensus_vars dictionary thanks to the inter-region dictionary. \n - This dictionary has redundant information, but is useful for admm_ph. + This dictionary has redundant information, but is useful for admmWrapper. Args: num_scens (int): select the number of scenarios (regions) wanted diff --git a/examples/distr/distr_admm_cylinders.py b/examples/distr/distr_admm_cylinders.py index bb7017006..44491a632 100644 --- a/examples/distr/distr_admm_cylinders.py +++ b/examples/distr/distr_admm_cylinders.py @@ -1,5 +1,5 @@ # general example driver for distr with cylinders -import mpisppy.utils.admm_ph as admm_ph +import mpisppy.utils.admmWrapper as admmWrapper import distr import mpisppy.cylinders @@ -34,9 +34,11 @@ def _parse_args(): return cfg -def _count_cylinders(cfg): +# This need to be executed long before the cylinders are created +def _count_cylinders(cfg): count = 1 - cfglist = ["xhatxbar", "lagrangian", "ph_ob"] #all the cfg arguments that create a new cylinders + cfglist = ["xhatxbar", "lagrangian", "ph_ob"] # All the cfg arguments that create a new cylinders + # Add to this list any cfg attribute that would create a spoke for cylname in cfglist: if cfg[cylname]: count += 1 @@ -58,7 +60,7 @@ def main(): scenario_creator_kwargs = distr.kw_creator(cfg) consensus_vars = distr.consensus_vars_creator(cfg.num_scens) n_cylinders = _count_cylinders(cfg) - admm = admm_ph.ADMM_PH(options, + admm = admmWrapper.AdmmWrapper(options, all_scenario_names, scenario_creator, consensus_vars, @@ -68,11 +70,11 @@ def main(): ) # Things needed for vanilla cylinders - scenario_creator = admm.admm_ph_scenario_creator ##change needed because of the wrapper + scenario_creator = admm.admmWrapper_scenario_creator ##change needed because of the wrapper scenario_creator_kwargs = None scenario_denouement = distr.scenario_denouement - #note that the admm_ph scenario_creator wrapper doesn't take any arguments - variable_probability = admm.var_prob_list_fct + #note that the admmWrapper scenario_creator wrapper doesn't take any arguments + variable_probability = admm.var_prob_list beans = (cfg, scenario_creator, scenario_denouement, all_scenario_names) diff --git a/examples/distr/distr_ef.py b/examples/distr/distr_ef.py index e2cd3b7d7..34ccf0748 100644 --- a/examples/distr/distr_ef.py +++ b/examples/distr/distr_ef.py @@ -1,7 +1,7 @@ # Copyright 2020 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff # This software is distributed under the 3-clause BSD License. # general example driver for distr with cylinders -import mpisppy.utils.admm_ph as admm_ph +import mpisppy.utils.admmWrapper as admmWrapper import distr import mpisppy.cylinders import pyomo.environ as pyo @@ -29,7 +29,7 @@ def _parse_args(): def solve_EF_directly(admm,solver_name): scenario_names = admm.local_scenario_names - scenario_creator = admm.admm_ph_scenario_creator + scenario_creator = admm.admmWrapper_scenario_creator ef = sputils.create_EF( scenario_names, @@ -55,7 +55,7 @@ def main(): consensus_vars = distr.consensus_vars_creator(cfg.num_scens) n_cylinders = 1 - admm = admm_ph.ADMM_PH(options, + admm = admmWrapper.AdmmWrapper(options, all_scenario_names, scenario_creator, consensus_vars, diff --git a/mpisppy/tests/examples/distr.py b/mpisppy/tests/examples/distr.py index c708849c7..699d50ba0 100644 --- a/mpisppy/tests/examples/distr.py +++ b/mpisppy/tests/examples/distr.py @@ -1,4 +1,5 @@ -# Network Flow - various formulations +# This file is used in the tests and should not be modified! + import pyomo.environ as pyo import mpisppy.utils.sputils as sputils @@ -298,7 +299,7 @@ def FlowBalance_rule(m, n): ###Creates the scenario def scenario_creator(scenario_name, num_scens=None): """Creates the model, which should include the consensus variables. \n - However, this function shouldn't attach the consensus variables to root nodes, as it is done in admm_ph. + However, this function shouldn't attach the consensus variables to root nodes, as it is done in admmWrapper. Args: scenario_name (str): the name of the scenario that will be created. Here is of the shape f"Region{i}" with 1<=i<=num_scens \n @@ -339,7 +340,7 @@ def scenario_denouement(rank, scenario_name, scenario): def consensus_vars_creator(num_scens): """The following function creates the consensus_vars dictionary thanks to the inter-region dictionary. \n - This dictionary has redundant information, but is useful for admm_ph. + This dictionary has redundant information, but is useful for admmWrapper. Args: num_scens (int): select the number of scenarios (regions) wanted diff --git a/mpisppy/tests/test_admm_ph.py b/mpisppy/tests/test_admmWrapper.py similarity index 89% rename from mpisppy/tests/test_admm_ph.py rename to mpisppy/tests/test_admmWrapper.py index aa16a337b..426c2fa9e 100644 --- a/mpisppy/tests/test_admm_ph.py +++ b/mpisppy/tests/test_admmWrapper.py @@ -1,10 +1,10 @@ import unittest -import mpisppy.utils.admm_ph as admm_ph +import mpisppy.utils.admmWrapper as admmWrapper import examples.distr as distr from mpisppy.utils import config from mpisppy import MPI -class TestADMMPH(unittest.TestCase): +class TestAdmmWrapper(unittest.TestCase): def setUp(self): pass @@ -27,7 +27,7 @@ def _make_admm(self, num_scens,verbose=False): scenario_creator_kwargs = distr.kw_creator(cfg) consensus_vars = distr.consensus_vars_creator(cfg.num_scens) n_cylinders = 1 #distr_admm_cylinders._count_cylinders(cfg) - return admm_ph.ADMM_PH(options, + return admmWrapper.AdmmWrapper(options, all_scenario_names, scenario_creator, consensus_vars, @@ -46,14 +46,14 @@ def test_variable_probability(self): admm = self._make_admm(3) q = dict() for sname, s in admm.local_scenarios.items(): - q[sname] = admm.var_prob_list_fct(s) + q[sname] = admm.var_prob_list(s) self.assertEqual(q["Region1"][0][1], 0.5) self.assertEqual(q["Region3"][0][1], 0) - def test_admm_ph_scenario_creator(self): + def test_admmWrapper_scenario_creator(self): admm = self._make_admm(3) sname = "Region3" - q = admm.admm_ph_scenario_creator(sname) + q = admm.admmWrapper_scenario_creator(sname) self.assertTrue(q.y__DC1DC2__.is_fixed()) self.assertFalse(q.y["DC3_1DC1"].is_fixed()) diff --git a/mpisppy/utils/admm_ph.py b/mpisppy/utils/admmWrapper.py similarity index 89% rename from mpisppy/utils/admm_ph.py rename to mpisppy/utils/admmWrapper.py index f207ea2e3..2cb0ddbe1 100644 --- a/mpisppy/utils/admm_ph.py +++ b/mpisppy/utils/admmWrapper.py @@ -1,7 +1,8 @@ -#creating the class admm_ph +#creating the class AdmmWrapper import mpisppy.utils.sputils as sputils import pyomo.environ as pyo import mpisppy +from collections import OrderedDict from mpisppy import MPI global_rank = MPI.COMM_WORLD.Get_rank() @@ -27,8 +28,8 @@ def _consensus_vars_number_creator(consensus_vars): print(f"The consensus variable {var} appears in a single subproblem") return consensus_vars_number -class ADMM_PH(): - """ Defines an interface to all strata (hubs and spokes) +class AdmmWrapper(): + """ This class assigns variable probabilities and creates wrapper for the scenario creator Args: options (dict): options @@ -56,7 +57,7 @@ def __init__(self, scenario_creator_kwargs=None, verbose=None, ): - assert len(options) == 0, "no options supported by admm_ph" + assert len(options) == 0, "no options supported by AdmmWrapper" # We need local_scenarios self.local_scenarios = {} scenario_tree = sputils._ScenTree(["ROOT"], all_scenario_names) @@ -86,7 +87,7 @@ def __init__(self, self.assign_variable_probs(verbose=self.verbose) self.number_of_scenario = len(all_scenario_names) - def var_prob_list_fct(self, s): + def var_prob_list(self, s): """Associates probabilities to variables and raises exceptions if the model doesn't match the dictionary consensus_vars Args: @@ -103,19 +104,18 @@ def assign_variable_probs(self, verbose=False): self.varprob_dict = {} #we collect the consensus variables - allvars_list = list() + allvars_dict = OrderedDict() for sname in self.all_scenario_names: - for var in self.consensus_vars[sname]: - if not var in allvars_list: - allvars_list.append(var) + for var_name in self.consensus_vars[sname]: + allvars_dict[var_name] = None error_list1 = [] error_list2 = [] for sname,s in self.local_scenarios.items(): if verbose: - print(f"admm_ph.assign_variable_probs is processing scenario: {sname}") + print(f"AdmmWrapper.assign_variable_probs is processing scenario: {sname}") varlist = list() self.varprob_dict[s] = list() - for vstr in allvars_list: + for vstr in allvars_dict.keys(): v = s.find_component(vstr) if vstr in self.consensus_vars[sname]: if v is not None: @@ -151,18 +151,14 @@ def assign_variable_probs(self, verbose=False): f"in the model, but not in consensus var: \n {error_list2}") - def admm_ph_scenario_creator(self, sname): + def admmWrapper_scenario_creator(self, sname): #this is the function the user will supply for all cylinders assert sname in self.local_scenario_names, f"{global_rank=} {sname=} \n {self.local_scenario_names=}" #should probably be deleted as it takes time scenario = self.local_scenarios[sname] # Grabs the objective function and multiplies its value by the number of scenarios to compensate for the probabilities - objectives = scenario.component_objects(pyo.Objective, active=True) - count = 0 - for obj in objectives: - count += 1 - assert count == 1, f"only one objective function is authorized, there are {count}" + obj = sputils.find_active_objective(scenario) obj.expr = obj.expr * self.number_of_scenario return scenario From 03a0862b985e8d4e498d38e8bf08ae7c22cc6105 Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Fri, 24 May 2024 14:55:25 -0700 Subject: [PATCH 16/31] nothing to deal with ADMM, correcting the name definition of a node in hydro --- examples/hydro/hydro.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/hydro/hydro.py b/examples/hydro/hydro.py index 77b7d146d..000837a12 100644 --- a/examples/hydro/hydro.py +++ b/examples/hydro/hydro.py @@ -184,7 +184,8 @@ def MakeNodesforScen(model, BFs, scennum): Args: BFs (list of int): branching factors """ - ndn = "ROOT_"+str((scennum-1) // BFs[0]) # scennum is one-based + # In general divide by the product of the branching factors that come after the node (here prod(BFs[1:])=BFs[1]) + ndn = "ROOT_"+str((scennum-1) // BFs[1]) # scennum is one-based retval = [scenario_tree.ScenarioNode("ROOT", 1.0, 1, From 2fffec9bb122c584ccf4d27301647e85d7703e6a Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Tue, 28 May 2024 10:32:45 -0700 Subject: [PATCH 17/31] correcting wrong directions in the documentation and avoiding writing files in distr_admm_cylinders --- doc/src/admmWrapper.rst | 28 +++++++++++++++----------- examples/distr/distr_admm_cylinders.py | 2 +- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/doc/src/admmWrapper.rst b/doc/src/admmWrapper.rst index 78b249e65..dfc64116b 100644 --- a/doc/src/admmWrapper.rst +++ b/doc/src/admmWrapper.rst @@ -12,7 +12,7 @@ Usage ----- The driver (in the example ``distr_admm_cylinders.py``) calls ``admmWrapper.py``, -thanks to the formated data provided by the model (in the example ``distr.py``). +thanks to the formated data provided by the model (in the example ``examples.distr.distr.py``). The file ``admmWrapper.py`` returns variable probabilities that can be used in the driver to create the PH (or APH) object which will solve the subproblems in a parallel way, insuring that merging conditions are respected. @@ -32,7 +32,8 @@ The driver file requires the `scenario_creator function `_ whi Returns: Pyomo ConcreteModel: the instantiated model -The driver file also requires helper arguments that are used in mpi-sppy. They are detailed `in helper_functions `_. +The driver file also requires helper arguments that are used in mpi-sppy. They are detailed `in helper_functions `_ +and in the example below. Here is a summary: * ``scenario_creator_kwargs``(dict[str]): key words arguments needed in ``scenario_creator`` @@ -93,7 +94,7 @@ as it doesn't decompose the problem. Distribution example -------------------- -``distr.py`` is an example of model creator in admmWrapper for a (linear) inter-region minimal cost distribution problem. +``examples.distr.distr.py`` is an example of model creator in admmWrapper for a (linear) inter-region minimal cost distribution problem. ``distr_admm_cylinders.py`` is the driver. Original data dictionaries @@ -101,11 +102,11 @@ Original data dictionaries In the example the ``inter_region_dict_creator`` creates the inter-region information. -.. autofunction:: distr.inter_region_dict_creator +.. autofunction:: examples.distr.distr.inter_region_dict_creator The ``region_dict_creator`` creates the information specific to a region regardless of the other regions. -.. autofunction:: distr.region_dict_creator +.. autofunction:: examples.distr.distr.region_dict_creator Adapting the data to create the model +++++++++++++++++++++++++++++++++++++ @@ -118,9 +119,9 @@ If ``inter-region-dict`` indicates that an arc exists from the node DC1 (distrub node DC1DC2 stored both in Region1's local_dict["dummy node source"] and in Region2's local_dict["dummy node target"], with a slack whose only constraint is to be lower than the flow possible from DC1 to DC2. PH then insures that the slacks are equal. -The purpose of ``distr.dummy_nodes_generator`` is to do that. +The purpose of ``examples.distr.distr.dummy_nodes_generator`` is to do that. -.. autofunction:: distr.dummy_nodes_generator +.. autofunction:: examples.distr.distr.dummy_nodes_generator .. note:: @@ -131,11 +132,13 @@ The purpose of ``distr.dummy_nodes_generator`` is to do that. Once the local_dict is created, the Pyomo model can be created thanks to ``min_cost_distr_problem``. -.. autofunction:: distr.min_cost_distr_problem +.. autofunction:: examples.distr.distr.min_cost_distr_problem .. _sectiondatafordriver: + Transforming data for the driver ++++++++++++++++++++++++++++++++ + The driver requires five elements given by the model: ``all_scenario_names``, ``scenario_creator``, ``scenario_creator_kwargs``, ``inparser_adder`` and ``consensus_vars``. @@ -143,25 +146,26 @@ The driver requires five elements given by the model: ``all_scenario_names``, `` ``scenario_creator`` is created thanks to the previous functions. -.. autofunction:: distr.scenario_creator +.. autofunction:: examples.distr.distr.scenario_creator The dictionary ``scenario_creator_kwargs`` is created with -.. autofunction:: distr.kw_creator +.. autofunction:: examples.distr.distr.kw_creator The function ``inparser_adder`` requires the user to give ``num_scens`` (the number of regions) during the configuration. -.. autofunction:: distr.inparser_adder +.. autofunction:: examples.distr.distr.inparser_adder Contrary to the other helper functions, ``consensus_vars_creator`` is specific to admmWrapper. The function ``consensus_vars_creator`` creates the required ``consensus_vars`` dictionary. -.. autofunction:: distr.consensus_vars_creator +.. autofunction:: examples.distr.distr.consensus_vars_creator Understanding the driver ++++++++++++++++++++++++ In the example the driver gets argument from the command line through the function ``_parse_args`` .. py:function:: _parse_args() + Gets argument from the command line and add them to a config argument. Some arguments are required. diff --git a/examples/distr/distr_admm_cylinders.py b/examples/distr/distr_admm_cylinders.py index 44491a632..0c745e948 100644 --- a/examples/distr/distr_admm_cylinders.py +++ b/examples/distr/distr_admm_cylinders.py @@ -11,7 +11,7 @@ global_rank = MPI.COMM_WORLD.Get_rank() -write_solution = True +write_solution = False def _parse_args(): # create a config object and parse From 9033b40d7d94054e01f656bf52e9c814b25769fa Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Tue, 28 May 2024 17:29:12 -0700 Subject: [PATCH 18/31] dictionary comprehension --- mpisppy/utils/admmWrapper.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/mpisppy/utils/admmWrapper.py b/mpisppy/utils/admmWrapper.py index 2cb0ddbe1..6795a7fb6 100644 --- a/mpisppy/utils/admmWrapper.py +++ b/mpisppy/utils/admmWrapper.py @@ -104,10 +104,7 @@ def assign_variable_probs(self, verbose=False): self.varprob_dict = {} #we collect the consensus variables - allvars_dict = OrderedDict() - for sname in self.all_scenario_names: - for var_name in self.consensus_vars[sname]: - allvars_dict[var_name] = None + all_consensus_vars = {var_stage_tuple: None for admm_subproblem_names in self.consensus_vars for var_stage_tuple in self.consensus_vars[admm_subproblem_names]} error_list1 = [] error_list2 = [] for sname,s in self.local_scenarios.items(): @@ -115,7 +112,7 @@ def assign_variable_probs(self, verbose=False): print(f"AdmmWrapper.assign_variable_probs is processing scenario: {sname}") varlist = list() self.varprob_dict[s] = list() - for vstr in allvars_dict.keys(): + for vstr in all_consensus_vars.keys(): v = s.find_component(vstr) if vstr in self.consensus_vars[sname]: if v is not None: From ad48aaf8353ab2c2a85ce24211ef765f2c4b6560 Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Mon, 3 Jun 2024 09:58:26 -0700 Subject: [PATCH 19/31] mainly modifying bash demo script to avoid --with-lagrangian --- examples/hydro/demo.bash | 2 +- examples/hydro/hydro.py | 1 + examples/hydro/hydro_cylinders.py | 3 ++- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/examples/hydro/demo.bash b/examples/hydro/demo.bash index e0df5f989..05880482a 100644 --- a/examples/hydro/demo.bash +++ b/examples/hydro/demo.bash @@ -2,7 +2,7 @@ SOLVERNAME=cplex -mpiexec --oversubscribe --np 3 python -m mpi4py hydro_cylinders.py --branching-factors 3 3 --bundles-per-rank=0 --max-iterations=100 --default-rho=1 --with-xhatshuffle --with-lagrangian --solver-name=${SOLVERNAME} --stage2EFsolvern=${SOLVERNAME} +mpiexec --oversubscribe --np 3 python -m mpi4py hydro_cylinders.py --branching-factors 3 3 --bundles-per-rank=0 --max-iterations=100 --default-rho=1 --xhatshuffle --lagrangian --solver-name=${SOLVERNAME} --stage2EFsolvern=${SOLVERNAME} # including this option will cause the upper bounder to solve the EF since there are only three ranks in total. #--stage2EFsolvern=${SOLVERNAME} diff --git a/examples/hydro/hydro.py b/examples/hydro/hydro.py index 77b7d146d..9a0c4b8cc 100644 --- a/examples/hydro/hydro.py +++ b/examples/hydro/hydro.py @@ -228,6 +228,7 @@ def scenario_creator(scenario_name, branching_factors=None, data_path=None): instance = model.create_instance(fname, name=scenario_name) instance._mpisppy_node_list = MakeNodesforScen(instance, branching_factors, snum) + model._mpisppy_probability = "uniform" return instance #============================================================================= diff --git a/examples/hydro/hydro_cylinders.py b/examples/hydro/hydro_cylinders.py index 12e94ce09..a0b96e90b 100644 --- a/examples/hydro/hydro_cylinders.py +++ b/examples/hydro/hydro_cylinders.py @@ -45,6 +45,7 @@ def main(): raise RuntimeError("Hydro is a three stage problem, so it needs 2 BFs") xhatshuffle = cfg["xhatshuffle"] + assert xhatshuffle is not None lagrangian = cfg["lagrangian"] # This is multi-stage, so we need to supply node names @@ -91,7 +92,7 @@ def main(): if xhatshuffle: list_of_spoke_dict.append(xhatshuffle_spoke) - if cfg.stage2EFsolvern is not None: + if cfg.stage2EFsolvern is not None and xhatshuffle: xhatshuffle_spoke["opt_kwargs"]["options"]["stage2EFsolvern"] = cfg["stage2EFsolvern"] xhatshuffle_spoke["opt_kwargs"]["options"]["branching_factors"] = cfg["branching_factors"] From 06d515d8bac07e96957824a6679369d172e640e6 Mon Sep 17 00:00:00 2001 From: David L Woodruff Date: Mon, 3 Jun 2024 14:46:57 -0700 Subject: [PATCH 20/31] Update hydro_cylinders.py --- examples/hydro/hydro_cylinders.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/hydro/hydro_cylinders.py b/examples/hydro/hydro_cylinders.py index a0b96e90b..5bc06776e 100644 --- a/examples/hydro/hydro_cylinders.py +++ b/examples/hydro/hydro_cylinders.py @@ -45,7 +45,6 @@ def main(): raise RuntimeError("Hydro is a three stage problem, so it needs 2 BFs") xhatshuffle = cfg["xhatshuffle"] - assert xhatshuffle is not None lagrangian = cfg["lagrangian"] # This is multi-stage, so we need to supply node names @@ -92,7 +91,8 @@ def main(): if xhatshuffle: list_of_spoke_dict.append(xhatshuffle_spoke) - if cfg.stage2EFsolvern is not None and xhatshuffle: + if cfg.stage2EFsolvern is not None: + assert xhatshuffle is not None, "xhatshuffle is required for stage2EFsolvern" xhatshuffle_spoke["opt_kwargs"]["options"]["stage2EFsolvern"] = cfg["stage2EFsolvern"] xhatshuffle_spoke["opt_kwargs"]["options"]["branching_factors"] = cfg["branching_factors"] From e0522b113f7b6cce68a47e692d786811dd6728fc Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Mon, 3 Jun 2024 15:07:58 -0700 Subject: [PATCH 21/31] stop using setup.py, start using pip install --- .github/workflows/nompi4py.yml | 2 +- .github/workflows/pull_push_regression.yml | 2 +- .github/workflows/schur_complement.yml | 4 ++-- .github/workflows/straight.yml | 2 +- .github/workflows/testaph.yml | 2 +- .github/workflows/testbunpick.yml | 2 +- .github/workflows/testconfint.yml | 2 +- .github/workflows/testgradient.yml | 2 +- .github/workflows/testpysp.yml | 2 +- .github/workflows/testwithcylinders.yml | 2 +- 10 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/nompi4py.yml b/.github/workflows/nompi4py.yml index d2e8b9871..47fd84a2f 100644 --- a/.github/workflows/nompi4py.yml +++ b/.github/workflows/nompi4py.yml @@ -29,7 +29,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: PH EF tests run: | diff --git a/.github/workflows/pull_push_regression.yml b/.github/workflows/pull_push_regression.yml index 6ee65f648..de321fa90 100644 --- a/.github/workflows/pull_push_regression.yml +++ b/.github/workflows/pull_push_regression.yml @@ -35,7 +35,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: Test EF/PH run: | diff --git a/.github/workflows/schur_complement.yml b/.github/workflows/schur_complement.yml index 2f6cf47d0..22aed76af 100644 --- a/.github/workflows/schur_complement.yml +++ b/.github/workflows/schur_complement.yml @@ -31,12 +31,12 @@ jobs: pip install git+https://github.com/pyutilib/pyutilib.git git clone https://github.com/pyomo/pyomo.git cd pyomo/ - python setup.py develop + pip install -e . pyomo download-extensions pyomo build-extensions cd ../ pip install git+https://github.com/parapint/parapint.git - python setup.py develop + pip install -e . - name: Test with nose run: | nosetests -v mpisppy/tests/test_sc.py diff --git a/.github/workflows/straight.yml b/.github/workflows/straight.yml index c8ff9eb41..2818b876b 100644 --- a/.github/workflows/straight.yml +++ b/.github/workflows/straight.yml @@ -31,7 +31,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: mpi tests run: | diff --git a/.github/workflows/testaph.yml b/.github/workflows/testaph.yml index 44319a9f8..6f1060455 100644 --- a/.github/workflows/testaph.yml +++ b/.github/workflows/testaph.yml @@ -32,7 +32,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: run tests timeout-minutes: 100 diff --git a/.github/workflows/testbunpick.yml b/.github/workflows/testbunpick.yml index b5b2d9f36..17d57150e 100644 --- a/.github/workflows/testbunpick.yml +++ b/.github/workflows/testbunpick.yml @@ -31,7 +31,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: run pickled bundles tests timeout-minutes: 10 diff --git a/.github/workflows/testconfint.yml b/.github/workflows/testconfint.yml index acf314726..19d5ab9e0 100644 --- a/.github/workflows/testconfint.yml +++ b/.github/workflows/testconfint.yml @@ -31,7 +31,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: run farmer tests timeout-minutes: 10 diff --git a/.github/workflows/testgradient.yml b/.github/workflows/testgradient.yml index f71746f6c..9babb7e32 100644 --- a/.github/workflows/testgradient.yml +++ b/.github/workflows/testgradient.yml @@ -29,7 +29,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: Build Pyomo extensions run: | diff --git a/.github/workflows/testpysp.yml b/.github/workflows/testpysp.yml index e7a139517..6f461098f 100644 --- a/.github/workflows/testpysp.yml +++ b/.github/workflows/testpysp.yml @@ -33,7 +33,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: run pysp model tests timeout-minutes: 100 diff --git a/.github/workflows/testwithcylinders.yml b/.github/workflows/testwithcylinders.yml index 60f2fb74c..2602dcceb 100644 --- a/.github/workflows/testwithcylinders.yml +++ b/.github/workflows/testwithcylinders.yml @@ -31,7 +31,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: run tests timeout-minutes: 10 From 40071687bdb5766c9d0d1b275c7fea6f566ee5cd Mon Sep 17 00:00:00 2001 From: AymericPCLegros <166345445+AymericPCLegros@users.noreply.github.com> Date: Mon, 3 Jun 2024 15:37:41 -0700 Subject: [PATCH 22/31] Replacing "--with-lagrangian" with "--lagrangian" in hydro example and other details (#400) * mainly modifying bash demo script to avoid --with-lagrangian * Update hydro_cylinders.py * stop using setup.py, start using pip install --------- Co-authored-by: David L Woodruff --- .github/workflows/nompi4py.yml | 2 +- .github/workflows/pull_push_regression.yml | 2 +- .github/workflows/schur_complement.yml | 4 ++-- .github/workflows/straight.yml | 2 +- .github/workflows/testaph.yml | 2 +- .github/workflows/testbunpick.yml | 2 +- .github/workflows/testconfint.yml | 2 +- .github/workflows/testgradient.yml | 2 +- .github/workflows/testpysp.yml | 2 +- .github/workflows/testwithcylinders.yml | 2 +- examples/hydro/demo.bash | 2 +- examples/hydro/hydro.py | 1 + examples/hydro/hydro_cylinders.py | 1 + 13 files changed, 14 insertions(+), 12 deletions(-) diff --git a/.github/workflows/nompi4py.yml b/.github/workflows/nompi4py.yml index d2e8b9871..47fd84a2f 100644 --- a/.github/workflows/nompi4py.yml +++ b/.github/workflows/nompi4py.yml @@ -29,7 +29,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: PH EF tests run: | diff --git a/.github/workflows/pull_push_regression.yml b/.github/workflows/pull_push_regression.yml index 6ee65f648..de321fa90 100644 --- a/.github/workflows/pull_push_regression.yml +++ b/.github/workflows/pull_push_regression.yml @@ -35,7 +35,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: Test EF/PH run: | diff --git a/.github/workflows/schur_complement.yml b/.github/workflows/schur_complement.yml index 2f6cf47d0..22aed76af 100644 --- a/.github/workflows/schur_complement.yml +++ b/.github/workflows/schur_complement.yml @@ -31,12 +31,12 @@ jobs: pip install git+https://github.com/pyutilib/pyutilib.git git clone https://github.com/pyomo/pyomo.git cd pyomo/ - python setup.py develop + pip install -e . pyomo download-extensions pyomo build-extensions cd ../ pip install git+https://github.com/parapint/parapint.git - python setup.py develop + pip install -e . - name: Test with nose run: | nosetests -v mpisppy/tests/test_sc.py diff --git a/.github/workflows/straight.yml b/.github/workflows/straight.yml index c8ff9eb41..2818b876b 100644 --- a/.github/workflows/straight.yml +++ b/.github/workflows/straight.yml @@ -31,7 +31,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: mpi tests run: | diff --git a/.github/workflows/testaph.yml b/.github/workflows/testaph.yml index 44319a9f8..6f1060455 100644 --- a/.github/workflows/testaph.yml +++ b/.github/workflows/testaph.yml @@ -32,7 +32,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: run tests timeout-minutes: 100 diff --git a/.github/workflows/testbunpick.yml b/.github/workflows/testbunpick.yml index b5b2d9f36..17d57150e 100644 --- a/.github/workflows/testbunpick.yml +++ b/.github/workflows/testbunpick.yml @@ -31,7 +31,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: run pickled bundles tests timeout-minutes: 10 diff --git a/.github/workflows/testconfint.yml b/.github/workflows/testconfint.yml index acf314726..19d5ab9e0 100644 --- a/.github/workflows/testconfint.yml +++ b/.github/workflows/testconfint.yml @@ -31,7 +31,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: run farmer tests timeout-minutes: 10 diff --git a/.github/workflows/testgradient.yml b/.github/workflows/testgradient.yml index f71746f6c..9babb7e32 100644 --- a/.github/workflows/testgradient.yml +++ b/.github/workflows/testgradient.yml @@ -29,7 +29,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: Build Pyomo extensions run: | diff --git a/.github/workflows/testpysp.yml b/.github/workflows/testpysp.yml index e7a139517..6f461098f 100644 --- a/.github/workflows/testpysp.yml +++ b/.github/workflows/testpysp.yml @@ -33,7 +33,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: run pysp model tests timeout-minutes: 100 diff --git a/.github/workflows/testwithcylinders.yml b/.github/workflows/testwithcylinders.yml index 60f2fb74c..2602dcceb 100644 --- a/.github/workflows/testwithcylinders.yml +++ b/.github/workflows/testwithcylinders.yml @@ -31,7 +31,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: run tests timeout-minutes: 10 diff --git a/examples/hydro/demo.bash b/examples/hydro/demo.bash index e0df5f989..05880482a 100644 --- a/examples/hydro/demo.bash +++ b/examples/hydro/demo.bash @@ -2,7 +2,7 @@ SOLVERNAME=cplex -mpiexec --oversubscribe --np 3 python -m mpi4py hydro_cylinders.py --branching-factors 3 3 --bundles-per-rank=0 --max-iterations=100 --default-rho=1 --with-xhatshuffle --with-lagrangian --solver-name=${SOLVERNAME} --stage2EFsolvern=${SOLVERNAME} +mpiexec --oversubscribe --np 3 python -m mpi4py hydro_cylinders.py --branching-factors 3 3 --bundles-per-rank=0 --max-iterations=100 --default-rho=1 --xhatshuffle --lagrangian --solver-name=${SOLVERNAME} --stage2EFsolvern=${SOLVERNAME} # including this option will cause the upper bounder to solve the EF since there are only three ranks in total. #--stage2EFsolvern=${SOLVERNAME} diff --git a/examples/hydro/hydro.py b/examples/hydro/hydro.py index 77b7d146d..9a0c4b8cc 100644 --- a/examples/hydro/hydro.py +++ b/examples/hydro/hydro.py @@ -228,6 +228,7 @@ def scenario_creator(scenario_name, branching_factors=None, data_path=None): instance = model.create_instance(fname, name=scenario_name) instance._mpisppy_node_list = MakeNodesforScen(instance, branching_factors, snum) + model._mpisppy_probability = "uniform" return instance #============================================================================= diff --git a/examples/hydro/hydro_cylinders.py b/examples/hydro/hydro_cylinders.py index 12e94ce09..5bc06776e 100644 --- a/examples/hydro/hydro_cylinders.py +++ b/examples/hydro/hydro_cylinders.py @@ -92,6 +92,7 @@ def main(): list_of_spoke_dict.append(xhatshuffle_spoke) if cfg.stage2EFsolvern is not None: + assert xhatshuffle is not None, "xhatshuffle is required for stage2EFsolvern" xhatshuffle_spoke["opt_kwargs"]["options"]["stage2EFsolvern"] = cfg["stage2EFsolvern"] xhatshuffle_spoke["opt_kwargs"]["options"]["branching_factors"] = cfg["branching_factors"] From 346d44fcdc0dbb0754ac0e2a4e87142c5ea4d625 Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Fri, 7 Jun 2024 14:53:27 -0700 Subject: [PATCH 23/31] implementing the scalable example for distr --- examples/distr/data_params.json | 45 +++ examples/distr/distr_data.py | 357 ++++++++++++++++++ examples/distr/distr_scalable.py | 254 +++++++++++++ examples/distr/go.bash | 3 + .../distr/scalable_distr_admm_cylinders.py | 165 ++++++++ 5 files changed, 824 insertions(+) create mode 100644 examples/distr/data_params.json create mode 100644 examples/distr/distr_data.py create mode 100644 examples/distr/distr_scalable.py create mode 100644 examples/distr/go.bash create mode 100644 examples/distr/scalable_distr_admm_cylinders.py diff --git a/examples/distr/data_params.json b/examples/distr/data_params.json new file mode 100644 index 000000000..7a00b0c95 --- /dev/null +++ b/examples/distr/data_params.json @@ -0,0 +1,45 @@ +{ + "revenues mean": 1200, + "revenues cv": 300, + "production costs mean": 400, + "production costs cv": 100, + "supply factory mean": 150, + "supply factory cv": 50, + "supply buyer mean": 170, + "supply buyer cv": 40, + "arc_F_B": { + "prob": 0.4, + "mean cost": 800, + "cv cost": 200, + "mean capacity": 50, + "cv capacity": 10 + }, + "arc_F_DC": { + "prob": 0.8, + "mean cost": 400, + "cv cost": 250, + "mean capacity": 100, + "cv capacity": 30 + }, + "arc_DC_DC": { + "prob": 0.6, + "mean cost": 200, + "cv cost": 100, + "mean capacity": 120, + "cv capacity": 50 + }, + "arc_DC_B": { + "prob": 0.9, + "mean cost": 300, + "cv cost": 200, + "mean capacity": 110, + "cv capacity": 50 + }, + "inter_region_arc": { + "prob": 0.6, + "mean cost": 200, + "cv cost": 50, + "mean capacity": 40, + "cv capacity": 10 + } + } \ No newline at end of file diff --git a/examples/distr/distr_data.py b/examples/distr/distr_data.py new file mode 100644 index 000000000..f8046109f --- /dev/null +++ b/examples/distr/distr_data.py @@ -0,0 +1,357 @@ +# Our data, gives the constraints inside each in region in region_dict_creator +# and amongst the different regions in inter_region_dict_creator +# The data slightly differs depending on the number of regions (num_scens) which is created for 2, 3 or 4 regions + +### This file creates the data through the region_dict and inter_region_dict +# First there is a hard wired data_set, then there is a scalable dataset + +# Hardwired data sets + +def inter_region_dict_creator(num_scens): + """Creates the oriented arcs between the regions, with their capacities and costs. \n + This dictionary represents the inter-region constraints and flows. It indicates where to add dummy nodes. + + Args: + num_scens (int): select the number of scenarios (regions) wanted + + Returns: + dict: + Each arc is presented as a pair (source, target) with source and target containing (scenario_name, node_name) \n + The arcs are used as keys for the dictionaries of costs and capacities + """ + inter_region_dict={} + + if num_scens == 2: + inter_region_dict["arcs"]=[(("Region1","DC1"),("Region2","DC2"))] + inter_region_dict["costs"]={(("Region1","DC1"),("Region2","DC2")): 100} + inter_region_dict["capacities"]={(("Region1","DC1"),("Region2","DC2")): 70} + + elif num_scens == 3: + inter_region_dict["arcs"] = [(("Region1","DC1"),("Region2","DC2")),(("Region2","DC2"),("Region1","DC1")),\ + (("Region1","DC1"),("Region3","DC3_1")),(("Region3","DC3_1"),("Region1","DC1")),\ + (("Region2","DC2"),("Region3","DC3_2")),(("Region3","DC3_2"),("Region2","DC2")),\ + ] + inter_region_dict["costs"] = {(("Region1","DC1"),("Region2","DC2")): 100, (("Region2","DC2"),("Region1","DC1")): 50,\ + (("Region1","DC1"),("Region3","DC3_1")): 200, (("Region3","DC3_1"),("Region1","DC1")): 200,\ + (("Region2","DC2"),("Region3","DC3_2")): 200, (("Region3","DC3_2"),("Region2","DC2")): 200,\ + } + inter_region_dict["capacities"] = {(("Region1","DC1"),("Region2","DC2")): 70, (("Region2","DC2"),("Region1","DC1")): 100,\ + (("Region1","DC1"),("Region3","DC3_1")): 50, (("Region3","DC3_1"),("Region1","DC1")): 50,\ + (("Region2","DC2"),("Region3","DC3_2")): 50, (("Region3","DC3_2"),("Region2","DC2")): 50,\ + } + + elif num_scens == 4: + inter_region_dict["arcs"] = [(("Region1","DC1"),("Region2","DC2")),(("Region2","DC2"),("Region1","DC1")),\ + (("Region1","DC1"),("Region3","DC3_1")),(("Region3","DC3_1"),("Region1","DC1")),\ + (("Region2","DC2"),("Region3","DC3_2")),(("Region3","DC3_2"),("Region2","DC2")),\ + (("Region1","DC1"),("Region4","DC4")),(("Region4","DC4"),("Region1","DC1")),\ + (("Region4","DC4"),("Region2","DC2")),(("Region2","DC2"),("Region4","DC4")),\ + ] + inter_region_dict["costs"] = {(("Region1","DC1"),("Region2","DC2")): 100, (("Region2","DC2"),("Region1","DC1")): 50,\ + (("Region1","DC1"),("Region3","DC3_1")): 200, (("Region3","DC3_1"),("Region1","DC1")): 200,\ + (("Region2","DC2"),("Region3","DC3_2")): 200, (("Region3","DC3_2"),("Region2","DC2")): 200,\ + (("Region1","DC1"),("Region4","DC4")): 30, (("Region4","DC4"),("Region1","DC1")): 50,\ + (("Region4","DC4"),("Region2","DC2")): 100, (("Region2","DC2"),("Region4","DC4")): 70,\ + } + inter_region_dict["capacities"] = {(("Region1","DC1"),("Region2","DC2")): 70, (("Region2","DC2"),("Region1","DC1")): 100,\ + (("Region1","DC1"),("Region3","DC3_1")): 50, (("Region3","DC3_1"),("Region1","DC1")): 50,\ + (("Region2","DC2"),("Region3","DC3_2")): 50, (("Region3","DC3_2"),("Region2","DC2")): 50,\ + (("Region1","DC1"),("Region4","DC4")): 100, (("Region4","DC4"),("Region1","DC1")): 60,\ + (("Region4","DC4"),("Region2","DC2")): 20, (("Region2","DC2"),("Region4","DC4")): 40,\ + } + + return inter_region_dict + + +def region_dict_creator(scenario_name): + """ Create a scenario for the inter-region max profit distribution example. + + The convention for node names is: + Symbol + number of the region (+ _ + number of the example if needed), + with symbols DC for distribution centers, F for factory nodes, B for buyer nodes. \n + For instance: F3_1 is the 1st factory node of region 3. \n + + Args: + scenario_name (str): + Name of the scenario to construct. + + Returns: + region_dict (dict): contains all the information in the given region to create the model. It is composed of:\n + "nodes" (list of str): all the nodes. The following subsets are also nodes: + "factory nodes", "buyer nodes", "distribution center nodes", \n + "arcs" (list of 2 tuples of str) : (node, node) pairs\n + "supply" (dict[n] of float): supply; keys are nodes (negative for demand)\n + "production costs" (dict of float): at each factory node\n + "revenues" (dict of float): at each buyer node \n + "flow costs" (dict[a] of float) : costs per unit flow on each arc \n + "flow capacities" (dict[a] of floats) : upper bound capacities of each arc \n + """ + def _is_partition(L, *lists): + # Step 1: Verify that the union of all sublists contains all elements of L + if set(L) != set().union(*lists): + return False + + # Step 2: Ensure each element in L appears in exactly one sublist + for item in L: + count = 0 + for sublist in lists: + if item in sublist: + count += 1 + if count != 1: + return False + + return True + if scenario_name == "Region1" : + # Creates data for Region1 + region_dict={"name": "Region1"} + region_dict["nodes"] = ["F1_1", "F1_2", "DC1", "B1_1", "B1_2"] + region_dict["factory nodes"] = ["F1_1","F1_2"] + region_dict["buyer nodes"] = ["B1_1","B1_2"] + region_dict["distribution center nodes"]= ["DC1"] + region_dict["supply"] = {"F1_1": 80, "F1_2": 70, "B1_1": -60, "B1_2": -90, "DC1": 0} + region_dict["arcs"] = [("F1_1","DC1"), ("F1_2","DC1"), ("DC1","B1_1"), + ("DC1","B1_2"), ("F1_1","B1_1"), ("F1_2","B1_2")] + + region_dict["production costs"] = {"F1_1": 50, "F1_2": 80} + region_dict["revenues"] = {"B1_1": 800, "B1_2": 900} + # most capacities are 50, so start there and then override + region_dict["flow capacities"] = {a: 50 for a in region_dict["arcs"]} + region_dict["flow capacities"][("F1_1","B1_1")] = None + region_dict["flow capacities"][("F1_2","B1_2")] = None + region_dict["flow costs"] = {("F1_1","DC1"): 300, ("F1_2","DC1"): 500, ("DC1","B1_1"): 200, + ("DC1","B1_2"): 400, ("F1_1","B1_1"): 700, ("F1_2","B1_2"): 1000} + + elif scenario_name=="Region2": + # Creates data for Region2 + region_dict={"name": "Region2"} + region_dict["nodes"] = ["DC2", "B2_1", "B2_2", "B2_3"] + region_dict["factory nodes"] = list() + region_dict["buyer nodes"] = ["B2_1","B2_2","B2_3"] + region_dict["distribution center nodes"]= ["DC2"] + region_dict["supply"] = {"B2_1": -200, "B2_2": -150, "B2_3": -100, "DC2": 0} + region_dict["arcs"] = [("DC2","B2_1"), ("DC2","B2_2"), ("DC2","B2_3")] + + region_dict["production costs"] = {} + region_dict["revenues"] = {"B2_1": 900, "B2_2": 800, "B2_3":1200} + region_dict["flow capacities"] = {("DC2","B2_1"): 200, ("DC2","B2_2"): 150, ("DC2","B2_3"): 100} + region_dict["flow costs"] = {("DC2","B2_1"): 100, ("DC2","B2_2"): 200, ("DC2","B2_3"): 300} + + elif scenario_name == "Region3" : + # Creates data for Region3 + region_dict={"name": "Region3"} + region_dict["nodes"] = ["F3_1", "F3_2", "DC3_1", "DC3_2", "B3_1", "B3_2"] + region_dict["factory nodes"] = ["F3_1","F3_2"] + region_dict["buyer nodes"] = ["B3_1","B3_2"] + region_dict["distribution center nodes"]= ["DC3_1","DC3_2"] + region_dict["supply"] = {"F3_1": 80, "F3_2": 60, "B3_1": -100, "B3_2": -100, "DC3_1": 0, "DC3_2": 0} + region_dict["arcs"] = [("F3_1","DC3_1"), ("F3_2","DC3_2"), ("DC3_1","B3_1"), + ("DC3_2","B3_2"), ("DC3_1","DC3_2"), ("DC3_2","DC3_1")] + + region_dict["production costs"] = {"F3_1": 50, "F3_2": 50} + region_dict["revenues"] = {"B3_1": 900, "B3_2": 700} + region_dict["flow capacities"] = {("F3_1","DC3_1"): 80, ("F3_2","DC3_2"): 60, ("DC3_1","B3_1"): 100, + ("DC3_2","B3_2"): 100, ("DC3_1","DC3_2"): 70, ("DC3_2","DC3_1"): 50} + region_dict["flow costs"] = {("F3_1","DC3_1"): 100, ("F3_2","DC3_2"): 100, ("DC3_1","B3_1"): 201, + ("DC3_2","B3_2"): 200, ("DC3_1","DC3_2"): 100, ("DC3_2","DC3_1"): 100} + + elif scenario_name == "Region4": + # Creates data for Region4 + region_dict={"name": "Region4"} + region_dict["nodes"] = ["F4_1", "F4_2", "DC4", "B4_1", "B4_2"] + region_dict["factory nodes"] = ["F4_1","F4_2"] + region_dict["buyer nodes"] = ["B4_1","B4_2"] + region_dict["distribution center nodes"] = ["DC4"] + region_dict["supply"] = {"F4_1": 200, "F4_2": 30, "B4_1": -100, "B4_2": -100, "DC4": 0} + region_dict["arcs"] = [("F4_1","DC4"), ("F4_2","DC4"), ("DC4","B4_1"), ("DC4","B4_2")] + + region_dict["production costs"] = {"F4_1": 50, "F4_2": 50} + region_dict["revenues"] = {"B4_1": 900, "B4_2": 700} + region_dict["flow capacities"] = {("F4_1","DC4"): 80, ("F4_2","DC4"): 60, ("DC4","B4_1"): 100, ("DC4","B4_2"): 100} + region_dict["flow costs"] = {("F4_1","DC4"): 100, ("F4_2","DC4"): 80, ("DC4","B4_1"): 90, ("DC4","B4_2"): 70} + + else: + raise RuntimeError (f"unknown Region name {scenario_name}") + + assert _is_partition(region_dict["nodes"], region_dict["factory nodes"], region_dict["buyer nodes"], region_dict["distribution center nodes"]) + + return region_dict + +import json +if __name__ == "__main__": + #creating the json files + for num_scens in range(2,5): + inter_region_dict_path = f'json_dicts.inter_region_dict{num_scens}.json' + data = inter_region_dict_creator(num_scens) + + # Write the data to the JSON file + with open(inter_region_dict_path, 'w') as json_file: + json.dump(data, json_file, indent=4) + + for i in range(1,5): + region_dict_path = f'json_dicts.region{i}_dict.json' + data = region_dict_creator(f"Region{i}") + + # Write the data to the JSON file + with open(region_dict_path, 'w') as json_file: + json.dump(data, json_file, indent=4) + +######################################################################################################################## +# Scalable datasets + +import re +import numpy as np + +def parse_node_name(name): + # Define the regular expression pattern + pattern = r'^([A-Za-z]+)(\d+)(?:_(\d+))?$' + + # Match the pattern against the provided name + match = re.match(pattern, name) + + if match: + # Extract the prefix, the first number, and the second number (if present) + prefix = match.group(1) + first_number = match.group(2) + second_number = match.group(3) if match.group(3) is not None else '1' + assert prefix in ["DC", "F", "B"] + return prefix, int(first_number), int(second_number) + else: + raise RuntimeError (f"the node {name} can't be well decomposed") + + +def _node_num(max_node_per_region, node_type, region_num, count): + node_types = ["DC", "F", "B"] #no need to include the dummy nodes as they are added automatically + return (max_node_per_region * region_num + count) * len(node_types) + node_types.index(node_type) + +def _pseudo_random_arc(node_1, node_2, prob, cfg, intra=True): #in a Region + max_node_per_region = cfg.mnpr + node_type1, region_num1, count1 = parse_node_name(node_1) + node_type2, region_num2, count2 = parse_node_name(node_2) + node_num1 = _node_num(max_node_per_region, node_type1, region_num1, count1) + node_num2 = _node_num(max_node_per_region, node_type2, region_num2, count2) + if intra: + assert region_num1 == region_num2, f"supposed to happen in a region ({intra=}), but {region_num1, region_num2=}" + else: + if region_num1 == region_num2: # inside a region, however intra=False so no connexion + return False + max_node_num = _node_num(max_node_per_region, "B", cfg.num_scens+1, max_node_per_region+1) # maximum number possible + np.random.seed(min(node_num1, node_num2) * max_node_num + max(node_num1, node_num2)) # it is symmetrical + random_number = np.random.rand() + # Determine if the event occurs + boo = random_number < prob + return boo + + +def _intra_arc_creator(my_dict, node_1, node_2, cfg, arc, arc_params, my_seed, intra=True): + prob = arc_params["prob"] + mean_cost = arc_params["mean cost"] + cv_cost = arc_params["cv cost"] + mean_capacity = arc_params["mean capacity"] + cv_capacity = arc_params["cv capacity"] + if _pseudo_random_arc(node_1, node_2, prob, cfg, intra=intra): + my_dict["arcs"].append(arc) + np.random.seed(my_seed % 2**32) + if intra: # not the same names used + cost_name = "flow costs" + capacity_name = "flow capacities" + else: + cost_name = "costs" + capacity_name = "capacities" + my_dict[cost_name][arc] = max(np.random.normal(mean_cost,cv_cost),0) + np.random.seed((2**31+my_seed) % 2**32) + my_dict[capacity_name][arc] = max(np.random.normal(mean_capacity,cv_capacity),0) + + +def scalable_inter_region_dict_creator(all_DC_nodes, cfg, data_params): + inter_region_arc_params = data_params["inter_region_arc"] + inter_region_dict={} + inter_region_dict["arcs"] = list() + inter_region_dict["costs"] = {} + inter_region_dict["capacities"] = {} + count = 0 + for node_1 in all_DC_nodes: #although inter_region_dict["costs"] and ["capacities"] could be done with comprehension, "arcs" can't + for node_2 in all_DC_nodes: + _, region_num1, _ = parse_node_name(node_1) + source = f"Region{region_num1}", node_1 + _, region_num2, _ = parse_node_name(node_2) + target = f"Region{region_num2}", node_2 + arc = source, target + _intra_arc_creator(inter_region_dict, node_1, node_2, cfg, arc, inter_region_arc_params, count, intra=False) + count += 1 + return inter_region_dict + + +def all_nodes_dict_creator(cfg, data_params): + all_nodes_dict = {} + num_scens = cfg.num_scens + max_node_per_region = cfg.mnpr # maximum node node of a certain type in any region + all_nodes_dict = {} + production_costs_mean = data_params["production costs mean"] + production_costs_cv = data_params["production costs cv"] #coefficient of variation + revenues_mean = data_params["revenues mean"] + revenues_cv = data_params["revenues cv"] + supply_factory_mean = data_params["supply factory mean"] + supply_factory_cv = data_params["supply factory cv"] + supply_buyer_mean = data_params["supply buyer mean"] + supply_buyer_cv = data_params["supply buyer cv"] + for i in range(1, num_scens+1): + region_name = f"Region{i}" + all_nodes_dict[region_name] = {} + node_types = ["DC", "F", "B"] + all_nodes_dict[region_name]["nodes"] = [] + association_types = {"DC": "distribution center nodes", "F": "factory nodes", "B": "buyer nodes"} + all_nodes_dict[region_name]["production costs"] = {} + all_nodes_dict[region_name]["revenues"] = {} + all_nodes_dict[region_name]["supply"] = {} + for node_type in node_types: + node_base_num = _node_num(max_node_per_region, node_type, i, 1) #the first node that will be created will have this number + # That helps us to have a seed, thanks to that we choose an integer which will be the number of nodes of this type + np.random.seed(node_base_num) + m = np.random.randint(0, max_node_per_region) + all_nodes_dict[region_name][association_types[node_type]] = [node_type + str(i) + "_" +str(j) for j in range(1, m+1)] + all_nodes_dict[region_name]["nodes"] += all_nodes_dict[region_name][association_types[node_type]] + if node_type == "F": + count = 1 + for node_name in all_nodes_dict[region_name][association_types[node_type]]: + np.random.seed(_node_num(max_node_per_region, node_type, i, count) + 2**28) + all_nodes_dict[region_name]["production costs"][node_name] = max(0,np.random.normal(production_costs_mean,production_costs_cv)) + np.random.seed(_node_num(max_node_per_region, node_type, i, count) + 2*2**28) + all_nodes_dict[region_name]["supply"][node_name] = max(0,np.random.normal(supply_factory_mean,supply_factory_cv)) #positive + count += 1 + if node_type == "B": + count = 1 + for node_name in all_nodes_dict[region_name][association_types[node_type]]: + np.random.seed(_node_num(max_node_per_region, node_type, i, count) + 2**28) + all_nodes_dict[region_name]["revenues"][node_name] = max(0, np.random.normal(revenues_mean,revenues_cv)) + np.random.seed(_node_num(max_node_per_region, node_type, i, count) + 2*2**28) + all_nodes_dict[region_name]["supply"][node_name] = - max(0, np.random.normal(supply_buyer_mean,supply_buyer_cv)) #negative + count += 1 + if node_type == "DC": + for node_name in all_nodes_dict[region_name][association_types[node_type]]: + all_nodes_dict[region_name]["supply"][node_name] = 0 + return all_nodes_dict + + +def scalable_region_dict_creator(scenario_name, all_nodes_dict=None, cfg=None, data_params=None): + assert all_nodes_dict is not None + assert cfg is not None + assert data_params is not None + local_nodes_dict = all_nodes_dict[scenario_name] + region_dict = local_nodes_dict + region_dict["name"] = scenario_name + region_dict["arcs"] = list() + region_dict["flow costs"] = {} + region_dict["flow capacities"] = {} + count = 2**30 # to have unrelated data with the production_costs + for node_1 in local_nodes_dict["nodes"]: #although inter_region_dict["costs"] and ["capacities"] could be done with comprehension, "arcs" can't + for node_2 in local_nodes_dict["nodes"]: + node_type1, _, _ = parse_node_name(node_1) + node_type2, _, _ = parse_node_name(node_2) + arcs_association = {("F","DC") : data_params["arc_F_DC"], ("DC", "B") : data_params["arc_DC_B"], ("F", "B") : data_params["arc_F_B"], ("DC", "DC"): data_params["arc_DC_DC"]} + arc_type = (node_type1, node_type2) + if arc_type in arcs_association: + arc_params = arcs_association[arc_type] + arc = (node_1, node_2) + _intra_arc_creator(region_dict, node_1, node_2, cfg, arc, arc_params, my_seed=count, intra=True) + count += 1 + return region_dict \ No newline at end of file diff --git a/examples/distr/distr_scalable.py b/examples/distr/distr_scalable.py new file mode 100644 index 000000000..ef9885c32 --- /dev/null +++ b/examples/distr/distr_scalable.py @@ -0,0 +1,254 @@ +# Network Flow - various formulations +import pyomo.environ as pyo +import mpisppy.utils.sputils as sputils +import distr_data +import time + +# In this file, we create a (linear) inter-region minimal cost distribution problem. +# Our data, gives the constraints inside each in region in region_dict_creator +# and amongst the different regions in inter_region_dict_creator +# The data slightly differs depending on the number of regions (num_scens) which is created for 2, 3 or 4 regions + +# Note: regions in our model will be represented in mpi-sppy by scenarios and to ensure the inter-region constraints +# we will use dummy-nodes, which will be represented in mpi-sppy by non-anticipative variables +# The following association of terms are made: regions = scenarios, and dummy-nodes = nonants = consensus-vars + + +def dummy_nodes_generator(region_dict, inter_region_dict): + """This function creates a new dictionary ``local_dict similar`` to ``region_dict`` with the dummy nodes and their constraints + + Args: + region_dict (dict): dictionary for the current scenario \n + inter_region_dict (dict): dictionary of the inter-region relations + + Returns: + local_dict (dict): + This dictionary copies region_dict, completes the already existing fields of + region_dict to represent the dummy nodes, and adds the following keys:\n + dummy nodes source (resp. target): the list of dummy nodes for which the source (resp. target) + is in the considered region. \n + dummy nodes source (resp. target) slack bounds: dictionary on dummy nodes source (resp. target) + """ + ### Note: The cost of the arc is here chosen to be split equally between the source region and target region + + # region_dict is renamed as local_dict because it no longer contains only information about the region + # but also contains local information that can be linked to the other scenarios (with dummy nodes) + local_dict = region_dict + local_dict["dummy nodes source"] = list() + local_dict["dummy nodes target"] = list() + local_dict["dummy nodes source slack bounds"] = {} + local_dict["dummy nodes target slack bounds"] = {} + for arc in inter_region_dict["arcs"]: + source,target = arc + region_source,node_source = source + region_target,node_target = target + + if region_source == region_dict["name"]: + dummy_node=node_source+node_target + local_dict["nodes"].append(dummy_node) + local_dict["supply"][dummy_node] = 0 + local_dict["dummy nodes source"].append(dummy_node) + local_dict["arcs"].append((node_source, dummy_node)) + local_dict["flow costs"][(node_source, dummy_node)] = inter_region_dict["costs"][(source, target)]/2 #should be adapted to model + local_dict["flow capacities"][(node_source, dummy_node)] = inter_region_dict["capacities"][(source, target)] + local_dict["dummy nodes source slack bounds"][dummy_node] = inter_region_dict["capacities"][(source, target)] + + if region_target == local_dict["name"]: + dummy_node = node_source + node_target + local_dict["nodes"].append(dummy_node) + local_dict["supply"][dummy_node] = 0 + local_dict["dummy nodes target"].append(dummy_node) + local_dict["arcs"].append((dummy_node, node_target)) + local_dict["flow costs"][(dummy_node, node_target)] = inter_region_dict["costs"][(source, target)]/2 #should be adapted to model + local_dict["flow capacities"][(dummy_node, node_target)] = inter_region_dict["capacities"][(source, target)] + local_dict["dummy nodes target slack bounds"][dummy_node] = inter_region_dict["capacities"][(source, target)] + return local_dict + + +###Creates the model when local_dict is given +def min_cost_distr_problem(local_dict, sense=pyo.minimize): + """ Create an arcs formulation of network flow + + Args: + local_dict (dict): dictionary representing a region including the dummy nodes \n + sense (=pyo.minimize): we aim to minimize the cost, this should always be minimize + + Returns: + model (Pyomo ConcreteModel) : the instantiated model + """ + # Assert sense == pyo.minimize, "sense should be equal to pyo.minimize" + # First, make the special In, Out arc lists for each node + arcsout = {n: list() for n in local_dict["nodes"]} + arcsin = {n: list() for n in local_dict["nodes"]} + for a in local_dict["arcs"]: + arcsout[a[0]].append(a) + arcsin[a[1]].append(a) + + model = pyo.ConcreteModel(name='MinCostFlowArcs') + def flowBounds_rule(model, i,j): + return (0, local_dict["flow capacities"][(i,j)]) + model.flow = pyo.Var(local_dict["arcs"], bounds=flowBounds_rule) # x + + def slackBounds_rule(model, n): + if n in local_dict["factory nodes"]: + return (0, local_dict["supply"][n]) + elif n in local_dict["buyer nodes"]: + return (local_dict["supply"][n], 0) + elif n in local_dict["dummy nodes source"]: + return (0,local_dict["dummy nodes source slack bounds"][n]) + elif n in local_dict["dummy nodes target"]: #this slack will respect the opposite flow balance rule + return (0,local_dict["dummy nodes target slack bounds"][n]) + elif n in local_dict["distribution center nodes"]: + return (0,0) + else: + raise ValueError(f"unknown node type for node {n}") + + model.y = pyo.Var(local_dict["nodes"], bounds=slackBounds_rule) + + model.MinCost = pyo.Objective(expr=\ + sum(local_dict["flow costs"][a]*model.flow[a] for a in local_dict["arcs"]) \ + + sum(local_dict["production costs"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["factory nodes"]) \ + + sum(local_dict["revenues"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["buyer nodes"]) , + sense=sense) + + def FlowBalance_rule(m, n): + #we change the definition of the slack for target dummy nodes so that we have the slack from the source and from the target equal + if n in local_dict["dummy nodes target"]: + return sum(m.flow[a] for a in arcsout[n])\ + - sum(m.flow[a] for a in arcsin[n])\ + - m.y[n] == local_dict["supply"][n] + else: + return sum(m.flow[a] for a in arcsout[n])\ + - sum(m.flow[a] for a in arcsin[n])\ + + m.y[n] == local_dict["supply"][n] + model.FlowBalance= pyo.Constraint(local_dict["nodes"], rule=FlowBalance_rule) + + return model + + +###Creates the scenario +def scenario_creator(scenario_name, all_nodes_dict=None, inter_region_dict=None, cfg=None, data_params=None): + """Creates the model, which should include the consensus variables. \n + However, this function shouldn't attach the consensus variables to root nodes, as it is done in admmWrapper. + + Args: + scenario_name (str): the name of the scenario that will be created. Here is of the shape f"Region{i}" with 1<=i<=num_scens \n + num_scens (int): number of scenarios (regions). Useful to create the corresponding inter-region dictionary + + Returns: + Pyomo ConcreteModel: the instantiated model + """ + assert (all_nodes_dict is not None) + assert (inter_region_dict is not None) + assert (cfg is not None) + assert (data_params is not None) + region_creation_starting_time = time.time() + region_dict = distr_data.scalable_region_dict_creator(scenario_name, all_nodes_dict=all_nodes_dict, cfg=cfg, data_params=data_params) + region_creation_end_time = time.time() + print(f"time for creating region {scenario_name}: {region_creation_end_time - region_creation_starting_time}") + # Adding dummy nodes and associated features + local_dict = dummy_nodes_generator(region_dict, inter_region_dict) + # Generating the model + model = min_cost_distr_problem(local_dict) + + varlist = list() + sputils.attach_root_node(model, model.MinCost, varlist) + + return model + + +###Functions required in other files, which constructions are specific to the problem + +def scenario_denouement(rank, scenario_name, scenario): + """for each scenario prints its name and the final variable values + + Args: + rank (int): not used here, but could be helpful to know the location + scenario_name (str): name of the scenario + scenario (Pyomo ConcreteModel): the instantiated model + """ + #print(f"flow values for {scenario_name}") + #scenario.flow.pprint() + #print(f"slack values for {scenario_name}") + #scenario.y.pprint() + pass + + +def consensus_vars_creator(num_scens, inter_region_dict, all_scenario_names): + """The following function creates the consensus_vars dictionary thanks to the inter-region dictionary. \n + This dictionary has redundant information, but is useful for admmWrapper. + + Args: + num_scens (int): select the number of scenarios (regions) wanted + + Returns: + dict: dictionary which keys are the regions and values are the list of consensus variables + present in the region + """ + # Due to the small size of inter_region_dict, it is not given as argument but rather created. + consensus_vars = {} + for arc in inter_region_dict["arcs"]: + source,target = arc + region_source,node_source = source + region_target,node_target = target + dummy_node = node_source + node_target + vstr = f"y[{dummy_node}]" #variable name as string, y is the slack + + #adds dummy_node in the source region + if not region_source in consensus_vars: #initiates consensus_vars[region_source] + consensus_vars[region_source] = list() + consensus_vars[region_source].append(vstr) + + #adds dummy_node in the target region + if not region_target in consensus_vars: #initiates consensus_vars[region_target] + consensus_vars[region_target] = list() + consensus_vars[region_target].append(vstr) + for region in all_scenario_names: + if region not in consensus_vars: + print(f"WARNING: {region} has no consensus_vars") + consensus_vars[region] = list() + return consensus_vars + + +def scenario_names_creator(num_scens): + """Creates the name of every scenario. + + Args: + num_scens (int): number of scenarios + + Returns: + list (str): the list of names + """ + return [f"Region{i+1}" for i in range(num_scens)] + + +def kw_creator(all_nodes_dict, cfg, inter_region_dict, data_params): + """ + Args: + cfg (config): specifications for the problem. We only look at the number of scenarios + + Returns: + dict (str): the kwargs that are used in distr.scenario_creator, here {"num_scens": num_scens} + """ + kwargs = { + "all_nodes_dict" : all_nodes_dict, + "inter_region_dict" : inter_region_dict, + "cfg" : cfg, + "data_params" : data_params, + } + return kwargs + + +def inparser_adder(cfg): + #requires the user to give the number of scenarios + cfg.num_scens_required() + + cfg.add_to_config("scalable", + description="decides whether a sclale model is used", + domain=bool, + default=True) + + cfg.add_to_config("mnpr", + description="max number of nodes per region and per type", + domain=int, + default=4) \ No newline at end of file diff --git a/examples/distr/go.bash b/examples/distr/go.bash new file mode 100644 index 000000000..742886f84 --- /dev/null +++ b/examples/distr/go.bash @@ -0,0 +1,3 @@ +#execute with bash go.bash + +mpiexec -np 3 python -u -m mpi4py scalable_distr_admm_cylinders.py --num-scens 3 --default-rho 10 --solver-name xpress --max-iterations 200 --xhatxbar --lagrangian --mnpr 40 --rel-gap 0.005 \ No newline at end of file diff --git a/examples/distr/scalable_distr_admm_cylinders.py b/examples/distr/scalable_distr_admm_cylinders.py new file mode 100644 index 000000000..b5fec74f3 --- /dev/null +++ b/examples/distr/scalable_distr_admm_cylinders.py @@ -0,0 +1,165 @@ +# general example driver for distr_scalable with cylinders +import mpisppy.utils.admmWrapper as admmWrapper +import distr_data +import distr_scalable +import mpisppy.cylinders + +from mpisppy.spin_the_wheel import WheelSpinner +import mpisppy.utils.sputils as sputils +from mpisppy.utils import config +import mpisppy.utils.cfg_vanilla as vanilla +from mpisppy import MPI +global_rank = MPI.COMM_WORLD.Get_rank() + +from pyomo.environ import ConcreteModel, Var, Objective, Constraint, SolverFactory + +import time + +write_solution = False + +def _parse_args(): + # create a config object and parse + cfg = config.Config() + distr_scalable.inparser_adder(cfg) + cfg.popular_args() + cfg.two_sided_args() + cfg.ph_args() + cfg.aph_args() + cfg.xhatxbar_args() + cfg.lagrangian_args() + cfg.ph_ob_args() + cfg.tracking_args() + cfg.add_to_config("run_async", + description="Run with async projective hedging instead of progressive hedging", + domain=bool, + default=False) + + cfg.parse_command_line("distr_admm_cylinders") + return cfg + + +# This need to be executed long before the cylinders are created +def _count_cylinders(cfg): + count = 1 + cfglist = ["xhatxbar", "lagrangian", "ph_ob"] # All the cfg arguments that create a new cylinders + # Add to this list any cfg attribute that would create a spoke + for cylname in cfglist: + if cfg[cylname]: + count += 1 + return count + + +def main(): + + cfg = _parse_args() + + if cfg.default_rho is None: # and rho_setter is None + raise RuntimeError("No rho_setter so a default must be specified via --default-rho") + + if cfg.scalable: + import json + json_file_path = "data_params.json" + + # Read the JSON file + with open(json_file_path, 'r') as file: + start_time_creating_data = time.time() + + data_params = json.load(file) + all_nodes_dict = distr_data.all_nodes_dict_creator(cfg, data_params) + all_DC_nodes = [DC_node for region in all_nodes_dict for DC_node in all_nodes_dict[region]["distribution center nodes"]] + inter_region_dict = distr_data.scalable_inter_region_dict_creator(all_DC_nodes, cfg, data_params) + end_time_creating_data = time.time() + creating_data_time = end_time_creating_data - start_time_creating_data + print(f"{creating_data_time=}") + else: + raise RuntimeError ("Not implemented yet") + + ph_converger = None + + options = {} + all_scenario_names = distr_scalable.scenario_names_creator(num_scens=cfg.num_scens) + scenario_creator = distr_scalable.scenario_creator + scenario_creator_kwargs = distr_scalable.kw_creator(all_nodes_dict, cfg, inter_region_dict, data_params) + consensus_vars = distr_scalable.consensus_vars_creator(cfg.num_scens, inter_region_dict, all_scenario_names) + n_cylinders = _count_cylinders(cfg) + admm = admmWrapper.AdmmWrapper(options, + all_scenario_names, + scenario_creator, + consensus_vars, + n_cylinders=n_cylinders, + mpicomm=MPI.COMM_WORLD, + scenario_creator_kwargs=scenario_creator_kwargs, + ) + + # Things needed for vanilla cylinders + scenario_creator = admm.admmWrapper_scenario_creator ##change needed because of the wrapper + scenario_creator_kwargs = None + scenario_denouement = distr_scalable.scenario_denouement + #note that the admmWrapper scenario_creator wrapper doesn't take any arguments + variable_probability = admm.var_prob_list + + beans = (cfg, scenario_creator, scenario_denouement, all_scenario_names) + + if cfg.run_async: + # Vanilla APH hub + hub_dict = vanilla.aph_hub(*beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + rho_setter = None, + variable_probability=variable_probability) + + else: + # Vanilla PH hub + hub_dict = vanilla.ph_hub(*beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + ph_converger=ph_converger, + rho_setter=None, + variable_probability=variable_probability) + + # FWPH spoke DOES NOT WORK with variable probability + + # Standard Lagrangian bound spoke + if cfg.lagrangian: + lagrangian_spoke = vanilla.lagrangian_spoke(*beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter = None) + + + # ph outer bounder spoke + if cfg.ph_ob: + ph_ob_spoke = vanilla.ph_ob_spoke(*beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter = None, + variable_probability=variable_probability) + + # xhat looper bound spoke + if cfg.xhatxbar: + xhatxbar_spoke = vanilla.xhatxbar_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + + list_of_spoke_dict = list() + if cfg.lagrangian: + list_of_spoke_dict.append(lagrangian_spoke) + if cfg.ph_ob: + list_of_spoke_dict.append(ph_ob_spoke) + if cfg.xhatxbar: + list_of_spoke_dict.append(xhatxbar_spoke) + + assert n_cylinders == 1 + len(list_of_spoke_dict), f"n_cylinders = {n_cylinders}, len(list_of_spoke_dict) = {len(list_of_spoke_dict)}" + + wheel = WheelSpinner(hub_dict, list_of_spoke_dict) + wheel.spin() + + if write_solution: + wheel.write_first_stage_solution('distr_soln.csv') + wheel.write_first_stage_solution('distr_cyl_nonants.npy', + first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer) + wheel.write_tree_solution('distr_full_solution') + + if global_rank == 0: + best_objective = wheel.spcomm.BestInnerBound + print(f"{best_objective=}") + + +if __name__ == "__main__": + main() \ No newline at end of file From ff383ac0dbc10d105e99d6311003f2d4387acbc3 Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Mon, 10 Jun 2024 11:09:59 -0700 Subject: [PATCH 24/31] adding a scalable example, a bash script, splitting the model and the data, the json files are parameters for the creation of the distr file --- examples/distr/distr.py | 229 +++------------- examples/distr/distr_admm_cylinders.py | 26 +- examples/distr/distr_scalable.py | 254 ------------------ examples/distr/go.bash | 17 +- .../distr/scalable_distr_admm_cylinders.py | 165 ------------ 5 files changed, 81 insertions(+), 610 deletions(-) delete mode 100644 examples/distr/distr_scalable.py delete mode 100644 examples/distr/scalable_distr_admm_cylinders.py diff --git a/examples/distr/distr.py b/examples/distr/distr.py index a16d3d5dd..5dff6468a 100644 --- a/examples/distr/distr.py +++ b/examples/distr/distr.py @@ -1,6 +1,8 @@ # Network Flow - various formulations import pyomo.environ as pyo import mpisppy.utils.sputils as sputils +import distr_data +import time # In this file, we create a (linear) inter-region minimal cost distribution problem. # Our data, gives the constraints inside each in region in region_dict_creator @@ -11,63 +13,6 @@ # we will use dummy-nodes, which will be represented in mpi-sppy by non-anticipative variables # The following association of terms are made: regions = scenarios, and dummy-nodes = nonants = consensus-vars -### The following functions create the data - -def inter_region_dict_creator(num_scens): - """Creates the oriented arcs between the regions, with their capacities and costs. \n - This dictionary represents the inter-region constraints and flows. It indicates where to add dummy nodes. - - Args: - num_scens (int): select the number of scenarios (regions) wanted - - Returns: - dict: - Each arc is presented as a pair (source, target) with source and target containing (scenario_name, node_name) \n - The arcs are used as keys for the dictionaries of costs and capacities - """ - inter_region_dict={} - - if num_scens == 2: - inter_region_dict["arcs"]=[(("Region1","DC1"),("Region2","DC2"))] - inter_region_dict["costs"]={(("Region1","DC1"),("Region2","DC2")): 100} - inter_region_dict["capacities"]={(("Region1","DC1"),("Region2","DC2")): 70} - - elif num_scens == 3: - inter_region_dict["arcs"] = [(("Region1","DC1"),("Region2","DC2")),(("Region2","DC2"),("Region1","DC1")),\ - (("Region1","DC1"),("Region3","DC3_1")),(("Region3","DC3_1"),("Region1","DC1")),\ - (("Region2","DC2"),("Region3","DC3_2")),(("Region3","DC3_2"),("Region2","DC2")),\ - ] - inter_region_dict["costs"] = {(("Region1","DC1"),("Region2","DC2")): 100, (("Region2","DC2"),("Region1","DC1")): 50,\ - (("Region1","DC1"),("Region3","DC3_1")): 200, (("Region3","DC3_1"),("Region1","DC1")): 200,\ - (("Region2","DC2"),("Region3","DC3_2")): 200, (("Region3","DC3_2"),("Region2","DC2")): 200,\ - } - inter_region_dict["capacities"] = {(("Region1","DC1"),("Region2","DC2")): 70, (("Region2","DC2"),("Region1","DC1")): 100,\ - (("Region1","DC1"),("Region3","DC3_1")): 50, (("Region3","DC3_1"),("Region1","DC1")): 50,\ - (("Region2","DC2"),("Region3","DC3_2")): 50, (("Region3","DC3_2"),("Region2","DC2")): 50,\ - } - - elif num_scens == 4: - inter_region_dict["arcs"] = [(("Region1","DC1"),("Region2","DC2")),(("Region2","DC2"),("Region1","DC1")),\ - (("Region1","DC1"),("Region3","DC3_1")),(("Region3","DC3_1"),("Region1","DC1")),\ - (("Region2","DC2"),("Region3","DC3_2")),(("Region3","DC3_2"),("Region2","DC2")),\ - (("Region1","DC1"),("Region4","DC4")),(("Region4","DC4"),("Region1","DC1")),\ - (("Region4","DC4"),("Region2","DC2")),(("Region2","DC2"),("Region4","DC4")),\ - ] - inter_region_dict["costs"] = {(("Region1","DC1"),("Region2","DC2")): 100, (("Region2","DC2"),("Region1","DC1")): 50,\ - (("Region1","DC1"),("Region3","DC3_1")): 200, (("Region3","DC3_1"),("Region1","DC1")): 200,\ - (("Region2","DC2"),("Region3","DC3_2")): 200, (("Region3","DC3_2"),("Region2","DC2")): 200,\ - (("Region1","DC1"),("Region4","DC4")): 30, (("Region4","DC4"),("Region1","DC1")): 50,\ - (("Region4","DC4"),("Region2","DC2")): 100, (("Region2","DC2"),("Region4","DC4")): 70,\ - } - inter_region_dict["capacities"] = {(("Region1","DC1"),("Region2","DC2")): 70, (("Region2","DC2"),("Region1","DC1")): 100,\ - (("Region1","DC1"),("Region3","DC3_1")): 50, (("Region3","DC3_1"),("Region1","DC1")): 50,\ - (("Region2","DC2"),("Region3","DC3_2")): 50, (("Region3","DC3_2"),("Region2","DC2")): 50,\ - (("Region1","DC1"),("Region4","DC4")): 100, (("Region4","DC4"),("Region1","DC1")): 60,\ - (("Region4","DC4"),("Region2","DC2")): 20, (("Region2","DC2"),("Region4","DC4")): 40,\ - } - - return inter_region_dict - def dummy_nodes_generator(region_dict, inter_region_dict): """This function creates a new dictionary ``local_dict similar`` to ``region_dict`` with the dummy nodes and their constraints @@ -119,120 +64,6 @@ def dummy_nodes_generator(region_dict, inter_region_dict): local_dict["dummy nodes target slack bounds"][dummy_node] = inter_region_dict["capacities"][(source, target)] return local_dict -def _is_partition(L, *lists): - # Step 1: Verify that the union of all sublists contains all elements of L - if set(L) != set().union(*lists): - return False - - # Step 2: Ensure each element in L appears in exactly one sublist - for item in L: - count = 0 - for sublist in lists: - if item in sublist: - count += 1 - if count != 1: - return False - - return True - -def region_dict_creator(scenario_name): - """ Create a scenario for the inter-region max profit distribution example. - - The convention for node names is: - Symbol + number of the region (+ _ + number of the example if needed), - with symbols DC for distribution centers, F for factory nodes, B for buyer nodes. \n - For instance: F3_1 is the 1st factory node of region 3. \n - - Args: - scenario_name (str): - Name of the scenario to construct. - - Returns: - region_dict (dict): contains all the information in the given region to create the model. It is composed of:\n - "nodes" (list of str): all the nodes. The following subsets are also nodes: - "factory nodes", "buyer nodes", "distribution center nodes", \n - "arcs" (list of 2 tuples of str) : (node, node) pairs\n - "supply" (dict[n] of float): supply; keys are nodes (negative for demand)\n - "production costs" (dict of float): at each factory node\n - "revenues" (dict of float): at each buyer node \n - "flow costs" (dict[a] of float) : costs per unit flow on each arc \n - "flow capacities" (dict[a] of floats) : upper bound capacities of each arc \n - """ - if scenario_name == "Region1" : - # Creates data for Region1 - region_dict={"name": "Region1"} - region_dict["nodes"] = ["F1_1", "F1_2", "DC1", "B1_1", "B1_2"] - region_dict["factory nodes"] = ["F1_1","F1_2"] - region_dict["buyer nodes"] = ["B1_1","B1_2"] - region_dict["distribution center nodes"]= ["DC1"] - region_dict["supply"] = {"F1_1": 80, "F1_2": 70, "B1_1": -60, "B1_2": -90, "DC1": 0} - region_dict["arcs"] = [("F1_1","DC1"), ("F1_2","DC1"), ("DC1","B1_1"), - ("DC1","B1_2"), ("F1_1","B1_1"), ("F1_2","B1_2")] - - region_dict["production costs"] = {"F1_1": 50, "F1_2": 80} - region_dict["revenues"] = {"B1_1": 800, "B1_2": 900} - # most capacities are 50, so start there and then override - region_dict["flow capacities"] = {a: 50 for a in region_dict["arcs"]} - region_dict["flow capacities"][("F1_1","B1_1")] = None - region_dict["flow capacities"][("F1_2","B1_2")] = None - region_dict["flow costs"] = {("F1_1","DC1"): 300, ("F1_2","DC1"): 500, ("DC1","B1_1"): 200, - ("DC1","B1_2"): 400, ("F1_1","B1_1"): 700, ("F1_2","B1_2"): 1000} - - elif scenario_name=="Region2": - # Creates data for Region2 - region_dict={"name": "Region2"} - region_dict["nodes"] = ["DC2", "B2_1", "B2_2", "B2_3"] - region_dict["factory nodes"] = list() - region_dict["buyer nodes"] = ["B2_1","B2_2","B2_3"] - region_dict["distribution center nodes"]= ["DC2"] - region_dict["supply"] = {"B2_1": -200, "B2_2": -150, "B2_3": -100, "DC2": 0} - region_dict["arcs"] = [("DC2","B2_1"), ("DC2","B2_2"), ("DC2","B2_3")] - - region_dict["production costs"] = {} - region_dict["revenues"] = {"B2_1": 900, "B2_2": 800, "B2_3":1200} - region_dict["flow capacities"] = {("DC2","B2_1"): 200, ("DC2","B2_2"): 150, ("DC2","B2_3"): 100} - region_dict["flow costs"] = {("DC2","B2_1"): 100, ("DC2","B2_2"): 200, ("DC2","B2_3"): 300} - - elif scenario_name == "Region3" : - # Creates data for Region3 - region_dict={"name": "Region3"} - region_dict["nodes"] = ["F3_1", "F3_2", "DC3_1", "DC3_2", "B3_1", "B3_2"] - region_dict["factory nodes"] = ["F3_1","F3_2"] - region_dict["buyer nodes"] = ["B3_1","B3_2"] - region_dict["distribution center nodes"]= ["DC3_1","DC3_2"] - region_dict["supply"] = {"F3_1": 80, "F3_2": 60, "B3_1": -100, "B3_2": -100, "DC3_1": 0, "DC3_2": 0} - region_dict["arcs"] = [("F3_1","DC3_1"), ("F3_2","DC3_2"), ("DC3_1","B3_1"), - ("DC3_2","B3_2"), ("DC3_1","DC3_2"), ("DC3_2","DC3_1")] - - region_dict["production costs"] = {"F3_1": 50, "F3_2": 50} - region_dict["revenues"] = {"B3_1": 900, "B3_2": 700} - region_dict["flow capacities"] = {("F3_1","DC3_1"): 80, ("F3_2","DC3_2"): 60, ("DC3_1","B3_1"): 100, - ("DC3_2","B3_2"): 100, ("DC3_1","DC3_2"): 70, ("DC3_2","DC3_1"): 50} - region_dict["flow costs"] = {("F3_1","DC3_1"): 100, ("F3_2","DC3_2"): 100, ("DC3_1","B3_1"): 201, - ("DC3_2","B3_2"): 200, ("DC3_1","DC3_2"): 100, ("DC3_2","DC3_1"): 100} - - elif scenario_name == "Region4": - # Creates data for Region4 - region_dict={"name": "Region4"} - region_dict["nodes"] = ["F4_1", "F4_2", "DC4", "B4_1", "B4_2"] - region_dict["factory nodes"] = ["F4_1","F4_2"] - region_dict["buyer nodes"] = ["B4_1","B4_2"] - region_dict["distribution center nodes"] = ["DC4"] - region_dict["supply"] = {"F4_1": 200, "F4_2": 30, "B4_1": -100, "B4_2": -100, "DC4": 0} - region_dict["arcs"] = [("F4_1","DC4"), ("F4_2","DC4"), ("DC4","B4_1"), ("DC4","B4_2")] - - region_dict["production costs"] = {"F4_1": 50, "F4_2": 50} - region_dict["revenues"] = {"B4_1": 900, "B4_2": 700} - region_dict["flow capacities"] = {("F4_1","DC4"): 80, ("F4_2","DC4"): 60, ("DC4","B4_1"): 100, ("DC4","B4_2"): 100} - region_dict["flow costs"] = {("F4_1","DC4"): 100, ("F4_2","DC4"): 80, ("DC4","B4_1"): 90, ("DC4","B4_2"): 70} - - else: - raise RuntimeError (f"unknown Region name {scenario_name}") - - assert _is_partition(region_dict["nodes"], region_dict["factory nodes"], region_dict["buyer nodes"], region_dict["distribution center nodes"]) - - return region_dict - ###Creates the model when local_dict is given def min_cost_distr_problem(local_dict, sense=pyo.minimize): @@ -296,7 +127,7 @@ def FlowBalance_rule(m, n): ###Creates the scenario -def scenario_creator(scenario_name, num_scens=None): +def scenario_creator(scenario_name, inter_region_dict=None, cfg=None, data_params=None, all_nodes_dict=None): """Creates the model, which should include the consensus variables. \n However, this function shouldn't attach the consensus variables to root nodes, as it is done in admmWrapper. @@ -306,10 +137,18 @@ def scenario_creator(scenario_name, num_scens=None): Returns: Pyomo ConcreteModel: the instantiated model - """ - assert (num_scens is not None) - inter_region_dict = inter_region_dict_creator(num_scens) - region_dict = region_dict_creator(scenario_name) + """ + assert (inter_region_dict is not None) + assert (cfg is not None) + if cfg.scalable: + assert (data_params is not None) + assert (all_nodes_dict is not None) + region_creation_starting_time = time.time() + region_dict = distr_data.scalable_region_dict_creator(scenario_name, all_nodes_dict=all_nodes_dict, cfg=cfg, data_params=data_params) + region_creation_end_time = time.time() + print(f"time for creating region {scenario_name}: {region_creation_end_time - region_creation_starting_time}") + else: + region_dict = distr_data.region_dict_creator(scenario_name) # Adding dummy nodes and associated features local_dict = dummy_nodes_generator(region_dict, inter_region_dict) # Generating the model @@ -331,13 +170,14 @@ def scenario_denouement(rank, scenario_name, scenario): scenario_name (str): name of the scenario scenario (Pyomo ConcreteModel): the instantiated model """ - print(f"flow values for {scenario_name}") - scenario.flow.pprint() - print(f"slack values for {scenario_name}") - scenario.y.pprint() + #print(f"flow values for {scenario_name}") + #scenario.flow.pprint() + #print(f"slack values for {scenario_name}") + #scenario.y.pprint() + pass -def consensus_vars_creator(num_scens): +def consensus_vars_creator(num_scens, inter_region_dict, all_scenario_names): """The following function creates the consensus_vars dictionary thanks to the inter-region dictionary. \n This dictionary has redundant information, but is useful for admmWrapper. @@ -349,7 +189,6 @@ def consensus_vars_creator(num_scens): present in the region """ # Due to the small size of inter_region_dict, it is not given as argument but rather created. - inter_region_dict = inter_region_dict_creator(num_scens) consensus_vars = {} for arc in inter_region_dict["arcs"]: source,target = arc @@ -367,6 +206,10 @@ def consensus_vars_creator(num_scens): if not region_target in consensus_vars: #initiates consensus_vars[region_target] consensus_vars[region_target] = list() consensus_vars[region_target].append(vstr) + for region in all_scenario_names: + if region not in consensus_vars: + print(f"WARNING: {region} has no consensus_vars") + consensus_vars[region] = list() return consensus_vars @@ -382,7 +225,7 @@ def scenario_names_creator(num_scens): return [f"Region{i+1}" for i in range(num_scens)] -def kw_creator(cfg): +def kw_creator(all_nodes_dict, cfg, inter_region_dict, data_params): """ Args: cfg (config): specifications for the problem. We only look at the number of scenarios @@ -390,13 +233,25 @@ def kw_creator(cfg): Returns: dict (str): the kwargs that are used in distr.scenario_creator, here {"num_scens": num_scens} """ - kwargs = {"num_scens" : cfg.get('num_scens', None), + kwargs = { + "all_nodes_dict" : all_nodes_dict, + "inter_region_dict" : inter_region_dict, + "cfg" : cfg, + "data_params" : data_params, } - if not kwargs["num_scens"] in [2, 3, 4]: - RuntimeError (f"unexpected number of regions {cfg.num_scens}, should be in [2, 3, 4]") return kwargs def inparser_adder(cfg): #requires the user to give the number of scenarios - cfg.num_scens_required() \ No newline at end of file + cfg.num_scens_required() + + cfg.add_to_config("scalable", + description="decides whether a sclale model is used", + domain=bool, + default=False) + + cfg.add_to_config("mnpr", + description="max number of nodes per region and per type", + domain=int, + default=4) \ No newline at end of file diff --git a/examples/distr/distr_admm_cylinders.py b/examples/distr/distr_admm_cylinders.py index 0c745e948..895fe1aae 100644 --- a/examples/distr/distr_admm_cylinders.py +++ b/examples/distr/distr_admm_cylinders.py @@ -1,5 +1,6 @@ # general example driver for distr with cylinders import mpisppy.utils.admmWrapper as admmWrapper +import distr_data import distr import mpisppy.cylinders @@ -10,6 +11,7 @@ from mpisppy import MPI global_rank = MPI.COMM_WORLD.Get_rank() +import time write_solution = False @@ -52,13 +54,33 @@ def main(): if cfg.default_rho is None: # and rho_setter is None raise RuntimeError("No rho_setter so a default must be specified via --default-rho") + if cfg.scalable: + import json + json_file_path = "data_params.json" + + # Read the JSON file + with open(json_file_path, 'r') as file: + start_time_creating_data = time.time() + + data_params = json.load(file) + all_nodes_dict = distr_data.all_nodes_dict_creator(cfg, data_params) + all_DC_nodes = [DC_node for region in all_nodes_dict for DC_node in all_nodes_dict[region]["distribution center nodes"]] + inter_region_dict = distr_data.scalable_inter_region_dict_creator(all_DC_nodes, cfg, data_params) + end_time_creating_data = time.time() + creating_data_time = end_time_creating_data - start_time_creating_data + print(f"{creating_data_time=}") + else: + inter_region_dict = distr_data.inter_region_dict_creator(num_scens=cfg.num_scens) + all_nodes_dict = None + data_params = None + ph_converger = None options = {} all_scenario_names = distr.scenario_names_creator(num_scens=cfg.num_scens) scenario_creator = distr.scenario_creator - scenario_creator_kwargs = distr.kw_creator(cfg) - consensus_vars = distr.consensus_vars_creator(cfg.num_scens) + scenario_creator_kwargs = distr.kw_creator(all_nodes_dict, cfg, inter_region_dict, data_params) + consensus_vars = distr.consensus_vars_creator(cfg.num_scens, inter_region_dict, all_scenario_names) n_cylinders = _count_cylinders(cfg) admm = admmWrapper.AdmmWrapper(options, all_scenario_names, diff --git a/examples/distr/distr_scalable.py b/examples/distr/distr_scalable.py deleted file mode 100644 index ef9885c32..000000000 --- a/examples/distr/distr_scalable.py +++ /dev/null @@ -1,254 +0,0 @@ -# Network Flow - various formulations -import pyomo.environ as pyo -import mpisppy.utils.sputils as sputils -import distr_data -import time - -# In this file, we create a (linear) inter-region minimal cost distribution problem. -# Our data, gives the constraints inside each in region in region_dict_creator -# and amongst the different regions in inter_region_dict_creator -# The data slightly differs depending on the number of regions (num_scens) which is created for 2, 3 or 4 regions - -# Note: regions in our model will be represented in mpi-sppy by scenarios and to ensure the inter-region constraints -# we will use dummy-nodes, which will be represented in mpi-sppy by non-anticipative variables -# The following association of terms are made: regions = scenarios, and dummy-nodes = nonants = consensus-vars - - -def dummy_nodes_generator(region_dict, inter_region_dict): - """This function creates a new dictionary ``local_dict similar`` to ``region_dict`` with the dummy nodes and their constraints - - Args: - region_dict (dict): dictionary for the current scenario \n - inter_region_dict (dict): dictionary of the inter-region relations - - Returns: - local_dict (dict): - This dictionary copies region_dict, completes the already existing fields of - region_dict to represent the dummy nodes, and adds the following keys:\n - dummy nodes source (resp. target): the list of dummy nodes for which the source (resp. target) - is in the considered region. \n - dummy nodes source (resp. target) slack bounds: dictionary on dummy nodes source (resp. target) - """ - ### Note: The cost of the arc is here chosen to be split equally between the source region and target region - - # region_dict is renamed as local_dict because it no longer contains only information about the region - # but also contains local information that can be linked to the other scenarios (with dummy nodes) - local_dict = region_dict - local_dict["dummy nodes source"] = list() - local_dict["dummy nodes target"] = list() - local_dict["dummy nodes source slack bounds"] = {} - local_dict["dummy nodes target slack bounds"] = {} - for arc in inter_region_dict["arcs"]: - source,target = arc - region_source,node_source = source - region_target,node_target = target - - if region_source == region_dict["name"]: - dummy_node=node_source+node_target - local_dict["nodes"].append(dummy_node) - local_dict["supply"][dummy_node] = 0 - local_dict["dummy nodes source"].append(dummy_node) - local_dict["arcs"].append((node_source, dummy_node)) - local_dict["flow costs"][(node_source, dummy_node)] = inter_region_dict["costs"][(source, target)]/2 #should be adapted to model - local_dict["flow capacities"][(node_source, dummy_node)] = inter_region_dict["capacities"][(source, target)] - local_dict["dummy nodes source slack bounds"][dummy_node] = inter_region_dict["capacities"][(source, target)] - - if region_target == local_dict["name"]: - dummy_node = node_source + node_target - local_dict["nodes"].append(dummy_node) - local_dict["supply"][dummy_node] = 0 - local_dict["dummy nodes target"].append(dummy_node) - local_dict["arcs"].append((dummy_node, node_target)) - local_dict["flow costs"][(dummy_node, node_target)] = inter_region_dict["costs"][(source, target)]/2 #should be adapted to model - local_dict["flow capacities"][(dummy_node, node_target)] = inter_region_dict["capacities"][(source, target)] - local_dict["dummy nodes target slack bounds"][dummy_node] = inter_region_dict["capacities"][(source, target)] - return local_dict - - -###Creates the model when local_dict is given -def min_cost_distr_problem(local_dict, sense=pyo.minimize): - """ Create an arcs formulation of network flow - - Args: - local_dict (dict): dictionary representing a region including the dummy nodes \n - sense (=pyo.minimize): we aim to minimize the cost, this should always be minimize - - Returns: - model (Pyomo ConcreteModel) : the instantiated model - """ - # Assert sense == pyo.minimize, "sense should be equal to pyo.minimize" - # First, make the special In, Out arc lists for each node - arcsout = {n: list() for n in local_dict["nodes"]} - arcsin = {n: list() for n in local_dict["nodes"]} - for a in local_dict["arcs"]: - arcsout[a[0]].append(a) - arcsin[a[1]].append(a) - - model = pyo.ConcreteModel(name='MinCostFlowArcs') - def flowBounds_rule(model, i,j): - return (0, local_dict["flow capacities"][(i,j)]) - model.flow = pyo.Var(local_dict["arcs"], bounds=flowBounds_rule) # x - - def slackBounds_rule(model, n): - if n in local_dict["factory nodes"]: - return (0, local_dict["supply"][n]) - elif n in local_dict["buyer nodes"]: - return (local_dict["supply"][n], 0) - elif n in local_dict["dummy nodes source"]: - return (0,local_dict["dummy nodes source slack bounds"][n]) - elif n in local_dict["dummy nodes target"]: #this slack will respect the opposite flow balance rule - return (0,local_dict["dummy nodes target slack bounds"][n]) - elif n in local_dict["distribution center nodes"]: - return (0,0) - else: - raise ValueError(f"unknown node type for node {n}") - - model.y = pyo.Var(local_dict["nodes"], bounds=slackBounds_rule) - - model.MinCost = pyo.Objective(expr=\ - sum(local_dict["flow costs"][a]*model.flow[a] for a in local_dict["arcs"]) \ - + sum(local_dict["production costs"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["factory nodes"]) \ - + sum(local_dict["revenues"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["buyer nodes"]) , - sense=sense) - - def FlowBalance_rule(m, n): - #we change the definition of the slack for target dummy nodes so that we have the slack from the source and from the target equal - if n in local_dict["dummy nodes target"]: - return sum(m.flow[a] for a in arcsout[n])\ - - sum(m.flow[a] for a in arcsin[n])\ - - m.y[n] == local_dict["supply"][n] - else: - return sum(m.flow[a] for a in arcsout[n])\ - - sum(m.flow[a] for a in arcsin[n])\ - + m.y[n] == local_dict["supply"][n] - model.FlowBalance= pyo.Constraint(local_dict["nodes"], rule=FlowBalance_rule) - - return model - - -###Creates the scenario -def scenario_creator(scenario_name, all_nodes_dict=None, inter_region_dict=None, cfg=None, data_params=None): - """Creates the model, which should include the consensus variables. \n - However, this function shouldn't attach the consensus variables to root nodes, as it is done in admmWrapper. - - Args: - scenario_name (str): the name of the scenario that will be created. Here is of the shape f"Region{i}" with 1<=i<=num_scens \n - num_scens (int): number of scenarios (regions). Useful to create the corresponding inter-region dictionary - - Returns: - Pyomo ConcreteModel: the instantiated model - """ - assert (all_nodes_dict is not None) - assert (inter_region_dict is not None) - assert (cfg is not None) - assert (data_params is not None) - region_creation_starting_time = time.time() - region_dict = distr_data.scalable_region_dict_creator(scenario_name, all_nodes_dict=all_nodes_dict, cfg=cfg, data_params=data_params) - region_creation_end_time = time.time() - print(f"time for creating region {scenario_name}: {region_creation_end_time - region_creation_starting_time}") - # Adding dummy nodes and associated features - local_dict = dummy_nodes_generator(region_dict, inter_region_dict) - # Generating the model - model = min_cost_distr_problem(local_dict) - - varlist = list() - sputils.attach_root_node(model, model.MinCost, varlist) - - return model - - -###Functions required in other files, which constructions are specific to the problem - -def scenario_denouement(rank, scenario_name, scenario): - """for each scenario prints its name and the final variable values - - Args: - rank (int): not used here, but could be helpful to know the location - scenario_name (str): name of the scenario - scenario (Pyomo ConcreteModel): the instantiated model - """ - #print(f"flow values for {scenario_name}") - #scenario.flow.pprint() - #print(f"slack values for {scenario_name}") - #scenario.y.pprint() - pass - - -def consensus_vars_creator(num_scens, inter_region_dict, all_scenario_names): - """The following function creates the consensus_vars dictionary thanks to the inter-region dictionary. \n - This dictionary has redundant information, but is useful for admmWrapper. - - Args: - num_scens (int): select the number of scenarios (regions) wanted - - Returns: - dict: dictionary which keys are the regions and values are the list of consensus variables - present in the region - """ - # Due to the small size of inter_region_dict, it is not given as argument but rather created. - consensus_vars = {} - for arc in inter_region_dict["arcs"]: - source,target = arc - region_source,node_source = source - region_target,node_target = target - dummy_node = node_source + node_target - vstr = f"y[{dummy_node}]" #variable name as string, y is the slack - - #adds dummy_node in the source region - if not region_source in consensus_vars: #initiates consensus_vars[region_source] - consensus_vars[region_source] = list() - consensus_vars[region_source].append(vstr) - - #adds dummy_node in the target region - if not region_target in consensus_vars: #initiates consensus_vars[region_target] - consensus_vars[region_target] = list() - consensus_vars[region_target].append(vstr) - for region in all_scenario_names: - if region not in consensus_vars: - print(f"WARNING: {region} has no consensus_vars") - consensus_vars[region] = list() - return consensus_vars - - -def scenario_names_creator(num_scens): - """Creates the name of every scenario. - - Args: - num_scens (int): number of scenarios - - Returns: - list (str): the list of names - """ - return [f"Region{i+1}" for i in range(num_scens)] - - -def kw_creator(all_nodes_dict, cfg, inter_region_dict, data_params): - """ - Args: - cfg (config): specifications for the problem. We only look at the number of scenarios - - Returns: - dict (str): the kwargs that are used in distr.scenario_creator, here {"num_scens": num_scens} - """ - kwargs = { - "all_nodes_dict" : all_nodes_dict, - "inter_region_dict" : inter_region_dict, - "cfg" : cfg, - "data_params" : data_params, - } - return kwargs - - -def inparser_adder(cfg): - #requires the user to give the number of scenarios - cfg.num_scens_required() - - cfg.add_to_config("scalable", - description="decides whether a sclale model is used", - domain=bool, - default=True) - - cfg.add_to_config("mnpr", - description="max number of nodes per region and per type", - domain=int, - default=4) \ No newline at end of file diff --git a/examples/distr/go.bash b/examples/distr/go.bash index 742886f84..6bbc09ba1 100644 --- a/examples/distr/go.bash +++ b/examples/distr/go.bash @@ -1,3 +1,16 @@ -#execute with bash go.bash +#!/bin/bash -mpiexec -np 3 python -u -m mpi4py scalable_distr_admm_cylinders.py --num-scens 3 --default-rho 10 --solver-name xpress --max-iterations 200 --xhatxbar --lagrangian --mnpr 40 --rel-gap 0.005 \ No newline at end of file +# How to run this bash script: +# Execute with "bash go.bash scalable" for the scalable example +# Execute with "bash go.bash anything" otherwise + +# This file runs either a scalable example or a non scalable example +if [ ${1} = "scalable" ] +then + mpiexec -np 3 python -u -m mpi4py distr_admm_cylinders.py --num-scens 3 --default-rho 10 --solver-name xpress --max-iterations 50 --xhatxbar --lagrangian --mnpr 40 --rel-gap 0.05 --scalable +else + mpiexec -np 3 python -u -m mpi4py distr_admm_cylinders.py --num-scens 3 --default-rho 10 --solver-name xpress --max-iterations 10 --xhatxbar --lagrangian --rel-gap 0.05 +fi + +#mpiexec -np 3 python -u -m mpi4py distr_admm_cylinders.py --num-scens 3 --default-rho 10 --solver-name xpress --max-iterations 50 --xhatxbar --lagrangian --mnpr 40 --rel-gap 0.05 --scalable +#mpiexec -np 3 python -u -m mpi4py distr_admm_cylinders.py --num-scens 3 --default-rho 10 --solver-name xpress --max-iterations 10 --xhatxbar --lagrangian --rel-gap 0.05 \ No newline at end of file diff --git a/examples/distr/scalable_distr_admm_cylinders.py b/examples/distr/scalable_distr_admm_cylinders.py deleted file mode 100644 index b5fec74f3..000000000 --- a/examples/distr/scalable_distr_admm_cylinders.py +++ /dev/null @@ -1,165 +0,0 @@ -# general example driver for distr_scalable with cylinders -import mpisppy.utils.admmWrapper as admmWrapper -import distr_data -import distr_scalable -import mpisppy.cylinders - -from mpisppy.spin_the_wheel import WheelSpinner -import mpisppy.utils.sputils as sputils -from mpisppy.utils import config -import mpisppy.utils.cfg_vanilla as vanilla -from mpisppy import MPI -global_rank = MPI.COMM_WORLD.Get_rank() - -from pyomo.environ import ConcreteModel, Var, Objective, Constraint, SolverFactory - -import time - -write_solution = False - -def _parse_args(): - # create a config object and parse - cfg = config.Config() - distr_scalable.inparser_adder(cfg) - cfg.popular_args() - cfg.two_sided_args() - cfg.ph_args() - cfg.aph_args() - cfg.xhatxbar_args() - cfg.lagrangian_args() - cfg.ph_ob_args() - cfg.tracking_args() - cfg.add_to_config("run_async", - description="Run with async projective hedging instead of progressive hedging", - domain=bool, - default=False) - - cfg.parse_command_line("distr_admm_cylinders") - return cfg - - -# This need to be executed long before the cylinders are created -def _count_cylinders(cfg): - count = 1 - cfglist = ["xhatxbar", "lagrangian", "ph_ob"] # All the cfg arguments that create a new cylinders - # Add to this list any cfg attribute that would create a spoke - for cylname in cfglist: - if cfg[cylname]: - count += 1 - return count - - -def main(): - - cfg = _parse_args() - - if cfg.default_rho is None: # and rho_setter is None - raise RuntimeError("No rho_setter so a default must be specified via --default-rho") - - if cfg.scalable: - import json - json_file_path = "data_params.json" - - # Read the JSON file - with open(json_file_path, 'r') as file: - start_time_creating_data = time.time() - - data_params = json.load(file) - all_nodes_dict = distr_data.all_nodes_dict_creator(cfg, data_params) - all_DC_nodes = [DC_node for region in all_nodes_dict for DC_node in all_nodes_dict[region]["distribution center nodes"]] - inter_region_dict = distr_data.scalable_inter_region_dict_creator(all_DC_nodes, cfg, data_params) - end_time_creating_data = time.time() - creating_data_time = end_time_creating_data - start_time_creating_data - print(f"{creating_data_time=}") - else: - raise RuntimeError ("Not implemented yet") - - ph_converger = None - - options = {} - all_scenario_names = distr_scalable.scenario_names_creator(num_scens=cfg.num_scens) - scenario_creator = distr_scalable.scenario_creator - scenario_creator_kwargs = distr_scalable.kw_creator(all_nodes_dict, cfg, inter_region_dict, data_params) - consensus_vars = distr_scalable.consensus_vars_creator(cfg.num_scens, inter_region_dict, all_scenario_names) - n_cylinders = _count_cylinders(cfg) - admm = admmWrapper.AdmmWrapper(options, - all_scenario_names, - scenario_creator, - consensus_vars, - n_cylinders=n_cylinders, - mpicomm=MPI.COMM_WORLD, - scenario_creator_kwargs=scenario_creator_kwargs, - ) - - # Things needed for vanilla cylinders - scenario_creator = admm.admmWrapper_scenario_creator ##change needed because of the wrapper - scenario_creator_kwargs = None - scenario_denouement = distr_scalable.scenario_denouement - #note that the admmWrapper scenario_creator wrapper doesn't take any arguments - variable_probability = admm.var_prob_list - - beans = (cfg, scenario_creator, scenario_denouement, all_scenario_names) - - if cfg.run_async: - # Vanilla APH hub - hub_dict = vanilla.aph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=None, - rho_setter = None, - variable_probability=variable_probability) - - else: - # Vanilla PH hub - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=None, - ph_converger=ph_converger, - rho_setter=None, - variable_probability=variable_probability) - - # FWPH spoke DOES NOT WORK with variable probability - - # Standard Lagrangian bound spoke - if cfg.lagrangian: - lagrangian_spoke = vanilla.lagrangian_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = None) - - - # ph outer bounder spoke - if cfg.ph_ob: - ph_ob_spoke = vanilla.ph_ob_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = None, - variable_probability=variable_probability) - - # xhat looper bound spoke - if cfg.xhatxbar: - xhatxbar_spoke = vanilla.xhatxbar_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) - - list_of_spoke_dict = list() - if cfg.lagrangian: - list_of_spoke_dict.append(lagrangian_spoke) - if cfg.ph_ob: - list_of_spoke_dict.append(ph_ob_spoke) - if cfg.xhatxbar: - list_of_spoke_dict.append(xhatxbar_spoke) - - assert n_cylinders == 1 + len(list_of_spoke_dict), f"n_cylinders = {n_cylinders}, len(list_of_spoke_dict) = {len(list_of_spoke_dict)}" - - wheel = WheelSpinner(hub_dict, list_of_spoke_dict) - wheel.spin() - - if write_solution: - wheel.write_first_stage_solution('distr_soln.csv') - wheel.write_first_stage_solution('distr_cyl_nonants.npy', - first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer) - wheel.write_tree_solution('distr_full_solution') - - if global_rank == 0: - best_objective = wheel.spcomm.BestInnerBound - print(f"{best_objective=}") - - -if __name__ == "__main__": - main() \ No newline at end of file From 9c3c03316ef8b2dc112557cc13fc05869d3b98e5 Mon Sep 17 00:00:00 2001 From: David L Woodruff Date: Tue, 11 Jun 2024 14:30:00 -0700 Subject: [PATCH 25/31] Fixing a few things for run_all, but not yet fixing the farmer runall problems with wtracker (#401) --- examples/run_all.py | 3 ++- mpisppy/spopt.py | 12 ++++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/examples/run_all.py b/examples/run_all.py index bf1a292dd..43977c916 100644 --- a/examples/run_all.py +++ b/examples/run_all.py @@ -337,7 +337,6 @@ def do_one_mmw(dirname, runefstring, npyfile, mmwargstring): # sizes kills the github tests using xpress # so we use linearized proximal terms -do_one("sizes", "sizes_demo.py", 1, " {}".format(solver_name)) do_one("sizes", "special_cylinders.py", @@ -399,6 +398,8 @@ def do_one_mmw(dirname, runefstring, npyfile, mmwargstring): "--lagrangian-iter0-mipgap=1e-7 --cross-scenario-cuts " "--ph-mipgaps-json=phmipgaps.json --cross-scenario-iter-cnt=4 " "--solver-name={}".format(solver_name)) + # this one takes a long time, so I moved it into the uc section + do_one("sizes", "sizes_demo.py", 1, " {}".format(solver_name)) if len(badguys) > 0: print("\nBad Guys:") diff --git a/mpisppy/spopt.py b/mpisppy/spopt.py index b8b8e89ea..cebfa3317 100644 --- a/mpisppy/spopt.py +++ b/mpisppy/spopt.py @@ -193,6 +193,12 @@ def _vb(msg): print ("status=", results.solver.status) print ("TerminationCondition=", results.solver.termination_condition) + else: + print("no results object, so solving agin with tee=True") + solve_keyword_args["tee"] = True + results = s._solver_plugin.solve(s, + **solve_keyword_args, + load_solutions=False) if solver_exception is not None: raise solver_exception @@ -867,8 +873,10 @@ def _create_solvers(self, presolve=True): root=0) if self.cylinder_rank == 0: asit = [sit for l_sit in all_set_instance_times for sit in l_sit] - print("Set instance times:") - print("\tmin=%4.2f mean=%4.2f max=%4.2f" % + if len(asit) == 0: + print("Set instance times not available.") + else: + print("Set instance times: \tmin=%4.2f mean=%4.2f max=%4.2f" % (np.min(asit), np.mean(asit), np.max(asit))) From 794a140a337857f14e9a2a8db8f0ca328f43fc9a Mon Sep 17 00:00:00 2001 From: David L Woodruff Date: Tue, 11 Jun 2024 17:39:48 -0700 Subject: [PATCH 26/31] trying to use a new pyotracker badge (#403) --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index f5608aa41..94dc9756a 100644 --- a/README.rst +++ b/README.rst @@ -9,7 +9,7 @@ a there is a `paper Date: Wed, 12 Jun 2024 15:28:45 -0700 Subject: [PATCH 27/31] changes related to the scalable file --- examples/distr/distr.py | 8 +++--- examples/distr/distr_data.py | 50 +++++++++++++++++++++++++++++++++-- examples/distr/distr_ef.py | 19 +++++++++++-- examples/distr/globalmodel.py | 5 ++-- 4 files changed, 72 insertions(+), 10 deletions(-) diff --git a/examples/distr/distr.py b/examples/distr/distr.py index 5dff6468a..63ce4c8f9 100644 --- a/examples/distr/distr.py +++ b/examples/distr/distr.py @@ -170,10 +170,10 @@ def scenario_denouement(rank, scenario_name, scenario): scenario_name (str): name of the scenario scenario (Pyomo ConcreteModel): the instantiated model """ - #print(f"flow values for {scenario_name}") - #scenario.flow.pprint() - #print(f"slack values for {scenario_name}") - #scenario.y.pprint() + print(f"flow values for {scenario_name}") + scenario.flow.pprint() + print(f"slack values for {scenario_name}") + scenario.y.pprint() pass diff --git a/examples/distr/distr_data.py b/examples/distr/distr_data.py index f8046109f..4aa58061c 100644 --- a/examples/distr/distr_data.py +++ b/examples/distr/distr_data.py @@ -202,6 +202,13 @@ def _is_partition(L, *lists): import numpy as np def parse_node_name(name): + """ decomposes the name, for example "DC1_2 gives" "DC",1,2 + Args: + name (str): name of the node + + Returns: + triplet (str, int, int): type of node ("DC", "F" or "B"), number of the region, and number of the node + """ # Define the regular expression pattern pattern = r'^([A-Za-z]+)(\d+)(?:_(\d+))?$' @@ -220,10 +227,29 @@ def parse_node_name(name): def _node_num(max_node_per_region, node_type, region_num, count): + """ + Args: + max_node_per_region (int): maximum number of node per region per type + + Returns: + int: a number specific to the node. This allows to have unrelated seeds + """ node_types = ["DC", "F", "B"] #no need to include the dummy nodes as they are added automatically return (max_node_per_region * region_num + count) * len(node_types) + node_types.index(node_type) def _pseudo_random_arc(node_1, node_2, prob, cfg, intra=True): #in a Region + """decides pseudo_randomly whether an arc will be created based on the two nodes + + Args: + node_1 (str): name of the source node + node_2 (str): name of the target node + prob (float): probability that the arc is created + cfg (pyomo config): the config arguments + intra (bool, optional): True if the arcs are inside a region, false if the arc is between different regions. Defaults to True. + + Returns: + _type_: _description_ + """ max_node_per_region = cfg.mnpr node_type1, region_num1, count1 = parse_node_name(node_1) node_type2, region_num2, count2 = parse_node_name(node_2) @@ -243,6 +269,18 @@ def _pseudo_random_arc(node_1, node_2, prob, cfg, intra=True): #in a Region def _intra_arc_creator(my_dict, node_1, node_2, cfg, arc, arc_params, my_seed, intra=True): + """if the arc is chosen randomly to be constructed, it is added to the dictionary with its cost and capacity + + Args: + my_dict (dict): either a region_dict if intra=True, otherwise the inter_region_dict + node_1 (str): name of the source node + node_2 (str): name of the target node + cfg (pyomo config): the config arguments + arc (pair of pair of strings): of the shape source,target with source = region_source, node_source + arc_params (dict of bool): parameters for random cost and capacity + my_seed (int): unique number used as seed + intra (bool, optional): True if the arcs are inside a region, false if the arc is between different regions. Defaults to True. + """ prob = arc_params["prob"] mean_cost = arc_params["mean cost"] cv_cost = arc_params["cv cost"] @@ -262,7 +300,7 @@ def _intra_arc_creator(my_dict, node_1, node_2, cfg, arc, arc_params, my_seed, i my_dict[capacity_name][arc] = max(np.random.normal(mean_capacity,cv_capacity),0) -def scalable_inter_region_dict_creator(all_DC_nodes, cfg, data_params): +def scalable_inter_region_dict_creator(all_DC_nodes, cfg, data_params): # same as inter_region_dict_creator but the scalable version inter_region_arc_params = data_params["inter_region_arc"] inter_region_dict={} inter_region_dict["arcs"] = list() @@ -282,6 +320,14 @@ def scalable_inter_region_dict_creator(all_DC_nodes, cfg, data_params): def all_nodes_dict_creator(cfg, data_params): + """ + Args: + cfg (pyomo config): configuration arguments + data_params (nested dict): allows to construct the random probabilities + + Returns: + (dict of str): the keys are regions containing all their nodes. + """ all_nodes_dict = {} num_scens = cfg.num_scens max_node_per_region = cfg.mnpr # maximum node node of a certain type in any region @@ -332,7 +378,7 @@ def all_nodes_dict_creator(cfg, data_params): return all_nodes_dict -def scalable_region_dict_creator(scenario_name, all_nodes_dict=None, cfg=None, data_params=None): +def scalable_region_dict_creator(scenario_name, all_nodes_dict=None, cfg=None, data_params=None): # same as region_dict_creator but the scalable version assert all_nodes_dict is not None assert cfg is not None assert data_params is not None diff --git a/examples/distr/distr_ef.py b/examples/distr/distr_ef.py index 34ccf0748..d1ca2b80a 100644 --- a/examples/distr/distr_ef.py +++ b/examples/distr/distr_ef.py @@ -3,6 +3,7 @@ # general example driver for distr with cylinders import mpisppy.utils.admmWrapper as admmWrapper import distr +import distr_data import mpisppy.cylinders import pyomo.environ as pyo @@ -47,12 +48,26 @@ def solve_EF_directly(admm,solver_name): def main(): cfg = _parse_args() + if cfg.scalable: + import json + json_file_path = "data_params.json" + + # Read the JSON file + with open(json_file_path, 'r') as file: + data_params = json.load(file) + all_nodes_dict = distr_data.all_nodes_dict_creator(cfg, data_params) + all_DC_nodes = [DC_node for region in all_nodes_dict for DC_node in all_nodes_dict[region]["distribution center nodes"]] + inter_region_dict = distr_data.scalable_inter_region_dict_creator(all_DC_nodes, cfg, data_params) + else: + inter_region_dict = distr_data.inter_region_dict_creator(num_scens=cfg.num_scens) + all_nodes_dict = None + data_params = None options = {} all_scenario_names = distr.scenario_names_creator(num_scens=cfg.num_scens) scenario_creator = distr.scenario_creator - scenario_creator_kwargs = distr.kw_creator(cfg) - consensus_vars = distr.consensus_vars_creator(cfg.num_scens) + scenario_creator_kwargs = distr.kw_creator(all_nodes_dict, cfg, inter_region_dict, data_params) + consensus_vars = distr.consensus_vars_creator(cfg.num_scens, inter_region_dict, all_scenario_names) n_cylinders = 1 admm = admmWrapper.AdmmWrapper(options, diff --git a/examples/distr/globalmodel.py b/examples/distr/globalmodel.py index e85a675ad..039b47635 100644 --- a/examples/distr/globalmodel.py +++ b/examples/distr/globalmodel.py @@ -1,6 +1,7 @@ # Distribution example without decomposition import pyomo.environ as pyo import distr +import distr_data from mpisppy.utils import config def _parse_args(): @@ -77,11 +78,11 @@ def global_dict_creator(num_scens, start=0): Returns: dict: dictionary with the information to create a min cost distribution problem """ - inter_region_dict = distr.inter_region_dict_creator(num_scens) + inter_region_dict = distr_data.inter_region_dict_creator(num_scens) global_dict={} for i in range(start,start+num_scens): scenario_name=f"Region{i+1}" - region_dict = distr.region_dict_creator(scenario_name) + region_dict = distr_data.region_dict_creator(scenario_name) for key in region_dict: if key in ["nodes","factory nodes", "buyer nodes", "distribution center nodes", "arcs"]: if i == start: From fcfa7beffb6e9c9998c2e210a0ea680ee87c5f69 Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Wed, 12 Jun 2024 15:37:43 -0700 Subject: [PATCH 28/31] other model file without dummy nodes --- examples/distr/distr_admm_cylinders.py | 1 + examples/distr/distr_no_dummy.py | 228 +++++++++++++++++++++++++ examples/distr/go.bash | 2 + 3 files changed, 231 insertions(+) create mode 100644 examples/distr/distr_no_dummy.py diff --git a/examples/distr/distr_admm_cylinders.py b/examples/distr/distr_admm_cylinders.py index 895fe1aae..f145bc620 100644 --- a/examples/distr/distr_admm_cylinders.py +++ b/examples/distr/distr_admm_cylinders.py @@ -2,6 +2,7 @@ import mpisppy.utils.admmWrapper as admmWrapper import distr_data import distr +#import distr_no_dummy as distr import mpisppy.cylinders from mpisppy.spin_the_wheel import WheelSpinner diff --git a/examples/distr/distr_no_dummy.py b/examples/distr/distr_no_dummy.py new file mode 100644 index 000000000..d3528b649 --- /dev/null +++ b/examples/distr/distr_no_dummy.py @@ -0,0 +1,228 @@ +# Network Flow - various formulations +import pyomo.environ as pyo +import mpisppy.utils.sputils as sputils +import distr_data +import time + +# In this file, we create a (linear) inter-region minimal cost distribution problem. +# Our data, gives the constraints inside each in region in region_dict_creator +# and amongst the different regions in inter_region_dict_creator +# The data slightly differs depending on the number of regions (num_scens) which is created for 2, 3 or 4 regions + +# Note: regions in our model will be represented in mpi-sppy by scenarios and to ensure the inter-region constraints +# we will impose the inter-region arcs to be consensus-variables. They will be represented as non-ants in mpi-sppy + + +def inter_arcs_adder(region_dict, inter_region_dict): + """This function adds to the region_dict the inter-region arcs + + Args: + region_dict (dict): dictionary for the current scenario \n + inter_region_dict (dict): dictionary of the inter-region relations + + Returns: + local_dict (dict): + This dictionary copies region_dict, completes the already existing fields of + region_dict to represent the inter-region arcs and their capcities and costs + """ + ### Note: The cost of the arc is here chosen to be split equally between the source region and target region + + # region_dict is renamed as local_dict because it no longer contains only information about the region + # but also contains local information that can be linked to the other scenarios (with inter-region arcs) + local_dict = region_dict + for inter_arc in inter_region_dict["arcs"]: + source,target = inter_arc + region_source,node_source = source + region_target,node_target = target + + + if region_source == region_dict["name"] or region_target == region_dict["name"]: + arc = node_source, node_target + local_dict["arcs"].append(arc) + local_dict["flow capacities"][arc] = inter_region_dict["capacities"][inter_arc] + local_dict["flow costs"][arc] = inter_region_dict["costs"][inter_arc]/2 + #print(f"scenario name = {region_dict['name']} \n {local_dict=} ") + return local_dict + +###Creates the model when local_dict is given +def min_cost_distr_problem(local_dict, sense=pyo.minimize): + """ Create an arcs formulation of network flow + + Args: + local_dict (dict): dictionary representing a region including the inter region arcs \n + sense (=pyo.minimize): we aim to minimize the cost, this should always be minimize + + Returns: + model (Pyomo ConcreteModel) : the instantiated model + """ + # Assert sense == pyo.minimize, "sense should be equal to pyo.minimize" + # First, make the special In, Out arc lists for each node + arcsout = {n: list() for n in local_dict["nodes"]} + arcsin = {n: list() for n in local_dict["nodes"]} + for a in local_dict["arcs"]: + if a[0] in local_dict["nodes"]: + arcsout[a[0]].append(a) + if a[1] in local_dict["nodes"]: + arcsin[a[1]].append(a) + + model = pyo.ConcreteModel(name='MinCostFlowArcs') + def flowBounds_rule(model, i,j): + return (0, local_dict["flow capacities"][(i,j)]) + model.flow = pyo.Var(local_dict["arcs"], bounds=flowBounds_rule) # x + + def slackBounds_rule(model, n): + if n in local_dict["factory nodes"]: + return (0, local_dict["supply"][n]) + elif n in local_dict["buyer nodes"]: + return (local_dict["supply"][n], 0) + elif n in local_dict["distribution center nodes"]: + return (0,0) + else: + raise ValueError(f"unknown node type for node {n}") + + model.y = pyo.Var(local_dict["nodes"], bounds=slackBounds_rule) + + model.MinCost = pyo.Objective(expr=\ + sum(local_dict["flow costs"][a]*model.flow[a] for a in local_dict["arcs"]) \ + + sum(local_dict["production costs"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["factory nodes"]) \ + + sum(local_dict["revenues"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["buyer nodes"]) , + sense=sense) + + def FlowBalance_rule(m, n): + return sum(m.flow[a] for a in arcsout[n])\ + - sum(m.flow[a] for a in arcsin[n])\ + + m.y[n] == local_dict["supply"][n] + model.FlowBalance = pyo.Constraint(local_dict["nodes"], rule=FlowBalance_rule) + + return model + + +###Creates the scenario +def scenario_creator(scenario_name, inter_region_dict=None, cfg=None, data_params=None, all_nodes_dict=None): + """Creates the model, which should include the consensus variables. \n + However, this function shouldn't attach the consensus variables to root nodes, as it is done in admmWrapper. + + Args: + scenario_name (str): the name of the scenario that will be created. Here is of the shape f"Region{i}" with 1<=i<=num_scens \n + num_scens (int): number of scenarios (regions). Useful to create the corresponding inter-region dictionary + + Returns: + Pyomo ConcreteModel: the instantiated model + """ + assert (inter_region_dict is not None) + assert (cfg is not None) + if cfg.scalable: + assert (data_params is not None) + assert (all_nodes_dict is not None) + region_creation_starting_time = time.time() + region_dict = distr_data.scalable_region_dict_creator(scenario_name, all_nodes_dict=all_nodes_dict, cfg=cfg, data_params=data_params) + region_creation_end_time = time.time() + print(f"time for creating region {scenario_name}: {region_creation_end_time - region_creation_starting_time}") + else: + region_dict = distr_data.region_dict_creator(scenario_name) + # Adding inter region arcs nodes and associated features + local_dict = inter_arcs_adder(region_dict, inter_region_dict) + # Generating the model + model = min_cost_distr_problem(local_dict) + + #varlist = list() + #sputils.attach_root_node(model, model.MinCost, varlist) + + return model + + +###Functions required in other files, which constructions are specific to the problem + +def scenario_denouement(rank, scenario_name, scenario): + """for each scenario prints its name and the final variable values + + Args: + rank (int): not used here, but could be helpful to know the location + scenario_name (str): name of the scenario + scenario (Pyomo ConcreteModel): the instantiated model + """ + print(f"flow values for {scenario_name}") + scenario.flow.pprint() + print(f"slack values for {scenario_name}") + scenario.y.pprint() + pass + + +def consensus_vars_creator(num_scens, inter_region_dict, all_scenario_names): + """The following function creates the consensus_vars dictionary thanks to the inter-region dictionary. \n + This dictionary has redundant information, but is useful for admmWrapper. + + Args: + num_scens (int): select the number of scenarios (regions) wanted + + Returns: + dict: dictionary which keys are the regions and values are the list of consensus variables + present in the region + """ + # Due to the small size of inter_region_dict, it is not given as argument but rather created. + consensus_vars = {} + for inter_arc in inter_region_dict["arcs"]: + source,target = inter_arc + region_source,node_source = source + region_target,node_target = target + arc = node_source, node_target + vstr = f"flow[{arc}]" #variable name as string, y is the slack + + #adds inter region arcs in the source region + if not region_source in consensus_vars: #initiates consensus_vars[region_source] + consensus_vars[region_source] = list() + consensus_vars[region_source].append(vstr) + + #adds inter region arcs in the target region + if not region_target in consensus_vars: #initiates consensus_vars[region_target] + consensus_vars[region_target] = list() + consensus_vars[region_target].append(vstr) + for region in all_scenario_names: + if region not in consensus_vars: + print(f"WARNING: {region} has no consensus_vars") + consensus_vars[region] = list() + return consensus_vars + + +def scenario_names_creator(num_scens): + """Creates the name of every scenario. + + Args: + num_scens (int): number of scenarios + + Returns: + list (str): the list of names + """ + return [f"Region{i+1}" for i in range(num_scens)] + + +def kw_creator(all_nodes_dict, cfg, inter_region_dict, data_params): + """ + Args: + cfg (config): specifications for the problem. We only look at the number of scenarios + + Returns: + dict (str): the kwargs that are used in distr.scenario_creator, here {"num_scens": num_scens} + """ + kwargs = { + "all_nodes_dict" : all_nodes_dict, + "inter_region_dict" : inter_region_dict, + "cfg" : cfg, + "data_params" : data_params, + } + return kwargs + + +def inparser_adder(cfg): + #requires the user to give the number of scenarios + cfg.num_scens_required() + + cfg.add_to_config("scalable", + description="decides whether a sclale model is used", + domain=bool, + default=False) + + cfg.add_to_config("mnpr", + description="max number of nodes per region and per type", + domain=int, + default=4) \ No newline at end of file diff --git a/examples/distr/go.bash b/examples/distr/go.bash index 6bbc09ba1..9a73ce6f0 100644 --- a/examples/distr/go.bash +++ b/examples/distr/go.bash @@ -4,6 +4,8 @@ # Execute with "bash go.bash scalable" for the scalable example # Execute with "bash go.bash anything" otherwise +# Right now, chosing between the version with dummy nodes and without dummy nodes is hardwired in distr_admm_cylinders.py + # This file runs either a scalable example or a non scalable example if [ ${1} = "scalable" ] then From f0dda48023a9d4450cfb2c84e412e097681bbf63 Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Thu, 20 Jun 2024 09:38:51 -0700 Subject: [PATCH 29/31] modifying to only keep the distr example without dummy node --- doc/src/admmWrapper.rst | 42 +++--- examples/distr/distr.py | 96 +++++-------- examples/distr/distr_ef.py | 3 + examples/distr/distr_no_dummy.py | 228 ------------------------------- examples/distr/globalmodel.py | 59 ++------ examples/distr/go.bash | 12 +- 6 files changed, 68 insertions(+), 372 deletions(-) delete mode 100644 examples/distr/distr_no_dummy.py diff --git a/doc/src/admmWrapper.rst b/doc/src/admmWrapper.rst index dfc64116b..e5f73e942 100644 --- a/doc/src/admmWrapper.rst +++ b/doc/src/admmWrapper.rst @@ -12,7 +12,7 @@ Usage ----- The driver (in the example ``distr_admm_cylinders.py``) calls ``admmWrapper.py``, -thanks to the formated data provided by the model (in the example ``examples.distr.distr.py``). +thanks to the formated model provided by the model file (in the example ``examples.distr.distr.py``). The file ``admmWrapper.py`` returns variable probabilities that can be used in the driver to create the PH (or APH) object which will solve the subproblems in a parallel way, insuring that merging conditions are respected. @@ -56,7 +56,7 @@ Here is a summary: Subproblems will be represented in mpi-sppy by scenarios. Consensus variables ensure the problem constraints are respected, they are represented in mpi-sppy by non-anticipative variables. The following terms are associated: subproblems = scenarios (= regions in the example), - and nonants = consensus-vars (= dummy nodes in the example) + and nonants = consensus-vars (=flow among regions in the example) The driver also needs global information to link the subproblems. @@ -90,7 +90,7 @@ as it doesn't decompose the problem. .. note:: - ``admmWrapper`` doesn't collect instanciation time. + ``admmWrapper`` doesn't collect yet instanciation time. Distribution example -------------------- @@ -99,12 +99,15 @@ Distribution example Original data dictionaries +++++++++++++++++++++++++++ +The data is created in ``distr_data.py``. Some models are hardwired for 2, 3 and 4 regions. +Other models are created pseudo-randomly thanks to parameters defined in ``data_params.json``. -In the example the ``inter_region_dict_creator`` creates the inter-region information. +In the example the ``inter_region_dict_creator`` (or ``scalable_inter_region_dict_creator``) creates the inter-region information. .. autofunction:: examples.distr.distr.inter_region_dict_creator -The ``region_dict_creator`` creates the information specific to a region regardless of the other regions. +The ``region_dict_creator`` (or ``scalable_region_dict_creator``) creates the information specific to a region, +regardless of the other regions. .. autofunction:: examples.distr.distr.region_dict_creator @@ -112,23 +115,23 @@ Adapting the data to create the model +++++++++++++++++++++++++++++++++++++ To solve the regions independantly we must make sure that the constraints (in our example flow balance) would still be respected if the -models were merged. To impose this, dummy nodes are introduced. +models were merged. To impose this, consensus variables are introduced. -In our example a consensus variable is the slack variable on a dummy node. \\ -If ``inter-region-dict`` indicates that an arc exists from the node DC1 (distrubution center in Region 1) to DC2, then we create a dummy -node DC1DC2 stored both in Region1's local_dict["dummy node source"] and in Region2's local_dict["dummy node target"], with a slack whose -only constraint is to be lower than the flow possible from DC1 to DC2. PH then insures that the slacks are equal. +In our example a consensus variable is the flow among regions. Indeed, in each regional model we introduce the inter-region arcs +for which either the source or target is in the region to impose the flow balance rule inside the region. But at this stage, nothing +ensures that the flow from DC1 to DC2 represented in Region 1 is the same as the flow from DC1 to DC2 represented in Region 2. +That is why the flow ``flow["DC1DC2"]`` is a consensus variable in both regions: to ensure it is the same.\\ -The purpose of ``examples.distr.distr.dummy_nodes_generator`` is to do that. +The purpose of ``examples.distr.distr.inter_arcs_adder`` is to do that. -.. autofunction:: examples.distr.distr.dummy_nodes_generator +.. autofunction:: examples.distr.distr.inter_arcs_adder .. note:: In the example the cost of transport is chosen to be split equally in the region source and the region target. - We here represent the flow problem with a directed graph. If a flow from DC2 to DC1 were to be authorized we would create - a dummy node DC2DC1. + We here represent the flow problem with a directed graph. If, in additio to the flow from DC1 to DC2 represented by ``flow["DC1DC2"]``, + a flow from DC2 to DC1 were to be authorized we would also have ``flow["DC2DC1"]`` in both regions. Once the local_dict is created, the Pyomo model can be created thanks to ``min_cost_distr_problem``. @@ -178,17 +181,18 @@ In the example the driver gets argument from the command line through the functi Non local solvers +++++++++++++++++ -The file ``globalmodel.py`` and ``distr_ef.py`` are used for debugging. They don't rely on ph or admm, they simply solve the +The file ``globalmodel.py`` and ``distr_ef.py`` are used for debugging or learning. They don't rely on ph or admm, they simply solve the problem without decomposition. * ``globalmodel.py`` In ``globalmodel.py``, ``global_dict_creator`` merges the data into a global dictionary - thanks to the inter-region dictionary, without creating dummy nodes. Then model is created (as if there was - only one region without dummy nodes) and solved. + thanks to the inter-region dictionary, without creating inter_region_arcs. Then model is created (as if there was + only one region without arcs leaving it) and solved. - However this file strongly depends on the structure of the problem and doesn't decompose the problems. It may be difficultly adpated - and inefficient. + However this file depends on the structure of the problem and doesn't decompose the problems. + Luckily in this example, the model creator is the same as in distr, because the changes for consensus-vars are neglectible. + However, in general, this file may be difficultly adapted and inefficient. * ``distr_ef.py`` diff --git a/examples/distr/distr.py b/examples/distr/distr.py index 63ce4c8f9..481dfa6bf 100644 --- a/examples/distr/distr.py +++ b/examples/distr/distr.py @@ -10,12 +10,11 @@ # The data slightly differs depending on the number of regions (num_scens) which is created for 2, 3 or 4 regions # Note: regions in our model will be represented in mpi-sppy by scenarios and to ensure the inter-region constraints -# we will use dummy-nodes, which will be represented in mpi-sppy by non-anticipative variables -# The following association of terms are made: regions = scenarios, and dummy-nodes = nonants = consensus-vars +# we will impose the inter-region arcs to be consensus-variables. They will be represented as non-ants in mpi-sppy -def dummy_nodes_generator(region_dict, inter_region_dict): - """This function creates a new dictionary ``local_dict similar`` to ``region_dict`` with the dummy nodes and their constraints +def inter_arcs_adder(region_dict, inter_region_dict): + """This function adds to the region_dict the inter-region arcs Args: region_dict (dict): dictionary for the current scenario \n @@ -24,53 +23,32 @@ def dummy_nodes_generator(region_dict, inter_region_dict): Returns: local_dict (dict): This dictionary copies region_dict, completes the already existing fields of - region_dict to represent the dummy nodes, and adds the following keys:\n - dummy nodes source (resp. target): the list of dummy nodes for which the source (resp. target) - is in the considered region. \n - dummy nodes source (resp. target) slack bounds: dictionary on dummy nodes source (resp. target) + region_dict to represent the inter-region arcs and their capcities and costs """ ### Note: The cost of the arc is here chosen to be split equally between the source region and target region # region_dict is renamed as local_dict because it no longer contains only information about the region - # but also contains local information that can be linked to the other scenarios (with dummy nodes) + # but also contains local information that can be linked to the other scenarios (with inter-region arcs) local_dict = region_dict - local_dict["dummy nodes source"] = list() - local_dict["dummy nodes target"] = list() - local_dict["dummy nodes source slack bounds"] = {} - local_dict["dummy nodes target slack bounds"] = {} - for arc in inter_region_dict["arcs"]: - source,target = arc + for inter_arc in inter_region_dict["arcs"]: + source,target = inter_arc region_source,node_source = source region_target,node_target = target - if region_source == region_dict["name"]: - dummy_node=node_source+node_target - local_dict["nodes"].append(dummy_node) - local_dict["supply"][dummy_node] = 0 - local_dict["dummy nodes source"].append(dummy_node) - local_dict["arcs"].append((node_source, dummy_node)) - local_dict["flow costs"][(node_source, dummy_node)] = inter_region_dict["costs"][(source, target)]/2 #should be adapted to model - local_dict["flow capacities"][(node_source, dummy_node)] = inter_region_dict["capacities"][(source, target)] - local_dict["dummy nodes source slack bounds"][dummy_node] = inter_region_dict["capacities"][(source, target)] - - if region_target == local_dict["name"]: - dummy_node = node_source + node_target - local_dict["nodes"].append(dummy_node) - local_dict["supply"][dummy_node] = 0 - local_dict["dummy nodes target"].append(dummy_node) - local_dict["arcs"].append((dummy_node, node_target)) - local_dict["flow costs"][(dummy_node, node_target)] = inter_region_dict["costs"][(source, target)]/2 #should be adapted to model - local_dict["flow capacities"][(dummy_node, node_target)] = inter_region_dict["capacities"][(source, target)] - local_dict["dummy nodes target slack bounds"][dummy_node] = inter_region_dict["capacities"][(source, target)] + if region_source == region_dict["name"] or region_target == region_dict["name"]: + arc = node_source, node_target + local_dict["arcs"].append(arc) + local_dict["flow capacities"][arc] = inter_region_dict["capacities"][inter_arc] + local_dict["flow costs"][arc] = inter_region_dict["costs"][inter_arc]/2 + #print(f"scenario name = {region_dict['name']} \n {local_dict=} ") return local_dict - ###Creates the model when local_dict is given def min_cost_distr_problem(local_dict, sense=pyo.minimize): """ Create an arcs formulation of network flow Args: - local_dict (dict): dictionary representing a region including the dummy nodes \n + local_dict (dict): dictionary representing a region including the inter region arcs \n sense (=pyo.minimize): we aim to minimize the cost, this should always be minimize Returns: @@ -81,8 +59,10 @@ def min_cost_distr_problem(local_dict, sense=pyo.minimize): arcsout = {n: list() for n in local_dict["nodes"]} arcsin = {n: list() for n in local_dict["nodes"]} for a in local_dict["arcs"]: - arcsout[a[0]].append(a) - arcsin[a[1]].append(a) + if a[0] in local_dict["nodes"]: + arcsout[a[0]].append(a) + if a[1] in local_dict["nodes"]: + arcsin[a[1]].append(a) model = pyo.ConcreteModel(name='MinCostFlowArcs') def flowBounds_rule(model, i,j): @@ -94,10 +74,6 @@ def slackBounds_rule(model, n): return (0, local_dict["supply"][n]) elif n in local_dict["buyer nodes"]: return (local_dict["supply"][n], 0) - elif n in local_dict["dummy nodes source"]: - return (0,local_dict["dummy nodes source slack bounds"][n]) - elif n in local_dict["dummy nodes target"]: #this slack will respect the opposite flow balance rule - return (0,local_dict["dummy nodes target slack bounds"][n]) elif n in local_dict["distribution center nodes"]: return (0,0) else: @@ -112,16 +88,10 @@ def slackBounds_rule(model, n): sense=sense) def FlowBalance_rule(m, n): - #we change the definition of the slack for target dummy nodes so that we have the slack from the source and from the target equal - if n in local_dict["dummy nodes target"]: - return sum(m.flow[a] for a in arcsout[n])\ - - sum(m.flow[a] for a in arcsin[n])\ - - m.y[n] == local_dict["supply"][n] - else: - return sum(m.flow[a] for a in arcsout[n])\ - - sum(m.flow[a] for a in arcsin[n])\ - + m.y[n] == local_dict["supply"][n] - model.FlowBalance= pyo.Constraint(local_dict["nodes"], rule=FlowBalance_rule) + return sum(m.flow[a] for a in arcsout[n])\ + - sum(m.flow[a] for a in arcsin[n])\ + + m.y[n] == local_dict["supply"][n] + model.FlowBalance = pyo.Constraint(local_dict["nodes"], rule=FlowBalance_rule) return model @@ -149,13 +119,13 @@ def scenario_creator(scenario_name, inter_region_dict=None, cfg=None, data_param print(f"time for creating region {scenario_name}: {region_creation_end_time - region_creation_starting_time}") else: region_dict = distr_data.region_dict_creator(scenario_name) - # Adding dummy nodes and associated features - local_dict = dummy_nodes_generator(region_dict, inter_region_dict) + # Adding inter region arcs nodes and associated features + local_dict = inter_arcs_adder(region_dict, inter_region_dict) # Generating the model model = min_cost_distr_problem(local_dict) - - varlist = list() - sputils.attach_root_node(model, model.MinCost, varlist) + + #varlist = list() + #sputils.attach_root_node(model, model.MinCost, varlist) return model @@ -190,19 +160,19 @@ def consensus_vars_creator(num_scens, inter_region_dict, all_scenario_names): """ # Due to the small size of inter_region_dict, it is not given as argument but rather created. consensus_vars = {} - for arc in inter_region_dict["arcs"]: - source,target = arc + for inter_arc in inter_region_dict["arcs"]: + source,target = inter_arc region_source,node_source = source region_target,node_target = target - dummy_node = node_source + node_target - vstr = f"y[{dummy_node}]" #variable name as string, y is the slack + arc = node_source, node_target + vstr = f"flow[{arc}]" #variable name as string, y is the slack - #adds dummy_node in the source region + #adds inter region arcs in the source region if not region_source in consensus_vars: #initiates consensus_vars[region_source] consensus_vars[region_source] = list() consensus_vars[region_source].append(vstr) - #adds dummy_node in the target region + #adds inter region arcs in the target region if not region_target in consensus_vars: #initiates consensus_vars[region_target] consensus_vars[region_target] = list() consensus_vars[region_target].append(vstr) diff --git a/examples/distr/distr_ef.py b/examples/distr/distr_ef.py index d1ca2b80a..a07b8d930 100644 --- a/examples/distr/distr_ef.py +++ b/examples/distr/distr_ef.py @@ -1,6 +1,9 @@ # Copyright 2020 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff # This software is distributed under the 3-clause BSD License. # general example driver for distr with cylinders + +# This file can be executed thanks to python distr_ef.py --num-scens 2 --solver-name cplex_direct + import mpisppy.utils.admmWrapper as admmWrapper import distr import distr_data diff --git a/examples/distr/distr_no_dummy.py b/examples/distr/distr_no_dummy.py deleted file mode 100644 index d3528b649..000000000 --- a/examples/distr/distr_no_dummy.py +++ /dev/null @@ -1,228 +0,0 @@ -# Network Flow - various formulations -import pyomo.environ as pyo -import mpisppy.utils.sputils as sputils -import distr_data -import time - -# In this file, we create a (linear) inter-region minimal cost distribution problem. -# Our data, gives the constraints inside each in region in region_dict_creator -# and amongst the different regions in inter_region_dict_creator -# The data slightly differs depending on the number of regions (num_scens) which is created for 2, 3 or 4 regions - -# Note: regions in our model will be represented in mpi-sppy by scenarios and to ensure the inter-region constraints -# we will impose the inter-region arcs to be consensus-variables. They will be represented as non-ants in mpi-sppy - - -def inter_arcs_adder(region_dict, inter_region_dict): - """This function adds to the region_dict the inter-region arcs - - Args: - region_dict (dict): dictionary for the current scenario \n - inter_region_dict (dict): dictionary of the inter-region relations - - Returns: - local_dict (dict): - This dictionary copies region_dict, completes the already existing fields of - region_dict to represent the inter-region arcs and their capcities and costs - """ - ### Note: The cost of the arc is here chosen to be split equally between the source region and target region - - # region_dict is renamed as local_dict because it no longer contains only information about the region - # but also contains local information that can be linked to the other scenarios (with inter-region arcs) - local_dict = region_dict - for inter_arc in inter_region_dict["arcs"]: - source,target = inter_arc - region_source,node_source = source - region_target,node_target = target - - - if region_source == region_dict["name"] or region_target == region_dict["name"]: - arc = node_source, node_target - local_dict["arcs"].append(arc) - local_dict["flow capacities"][arc] = inter_region_dict["capacities"][inter_arc] - local_dict["flow costs"][arc] = inter_region_dict["costs"][inter_arc]/2 - #print(f"scenario name = {region_dict['name']} \n {local_dict=} ") - return local_dict - -###Creates the model when local_dict is given -def min_cost_distr_problem(local_dict, sense=pyo.minimize): - """ Create an arcs formulation of network flow - - Args: - local_dict (dict): dictionary representing a region including the inter region arcs \n - sense (=pyo.minimize): we aim to minimize the cost, this should always be minimize - - Returns: - model (Pyomo ConcreteModel) : the instantiated model - """ - # Assert sense == pyo.minimize, "sense should be equal to pyo.minimize" - # First, make the special In, Out arc lists for each node - arcsout = {n: list() for n in local_dict["nodes"]} - arcsin = {n: list() for n in local_dict["nodes"]} - for a in local_dict["arcs"]: - if a[0] in local_dict["nodes"]: - arcsout[a[0]].append(a) - if a[1] in local_dict["nodes"]: - arcsin[a[1]].append(a) - - model = pyo.ConcreteModel(name='MinCostFlowArcs') - def flowBounds_rule(model, i,j): - return (0, local_dict["flow capacities"][(i,j)]) - model.flow = pyo.Var(local_dict["arcs"], bounds=flowBounds_rule) # x - - def slackBounds_rule(model, n): - if n in local_dict["factory nodes"]: - return (0, local_dict["supply"][n]) - elif n in local_dict["buyer nodes"]: - return (local_dict["supply"][n], 0) - elif n in local_dict["distribution center nodes"]: - return (0,0) - else: - raise ValueError(f"unknown node type for node {n}") - - model.y = pyo.Var(local_dict["nodes"], bounds=slackBounds_rule) - - model.MinCost = pyo.Objective(expr=\ - sum(local_dict["flow costs"][a]*model.flow[a] for a in local_dict["arcs"]) \ - + sum(local_dict["production costs"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["factory nodes"]) \ - + sum(local_dict["revenues"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["buyer nodes"]) , - sense=sense) - - def FlowBalance_rule(m, n): - return sum(m.flow[a] for a in arcsout[n])\ - - sum(m.flow[a] for a in arcsin[n])\ - + m.y[n] == local_dict["supply"][n] - model.FlowBalance = pyo.Constraint(local_dict["nodes"], rule=FlowBalance_rule) - - return model - - -###Creates the scenario -def scenario_creator(scenario_name, inter_region_dict=None, cfg=None, data_params=None, all_nodes_dict=None): - """Creates the model, which should include the consensus variables. \n - However, this function shouldn't attach the consensus variables to root nodes, as it is done in admmWrapper. - - Args: - scenario_name (str): the name of the scenario that will be created. Here is of the shape f"Region{i}" with 1<=i<=num_scens \n - num_scens (int): number of scenarios (regions). Useful to create the corresponding inter-region dictionary - - Returns: - Pyomo ConcreteModel: the instantiated model - """ - assert (inter_region_dict is not None) - assert (cfg is not None) - if cfg.scalable: - assert (data_params is not None) - assert (all_nodes_dict is not None) - region_creation_starting_time = time.time() - region_dict = distr_data.scalable_region_dict_creator(scenario_name, all_nodes_dict=all_nodes_dict, cfg=cfg, data_params=data_params) - region_creation_end_time = time.time() - print(f"time for creating region {scenario_name}: {region_creation_end_time - region_creation_starting_time}") - else: - region_dict = distr_data.region_dict_creator(scenario_name) - # Adding inter region arcs nodes and associated features - local_dict = inter_arcs_adder(region_dict, inter_region_dict) - # Generating the model - model = min_cost_distr_problem(local_dict) - - #varlist = list() - #sputils.attach_root_node(model, model.MinCost, varlist) - - return model - - -###Functions required in other files, which constructions are specific to the problem - -def scenario_denouement(rank, scenario_name, scenario): - """for each scenario prints its name and the final variable values - - Args: - rank (int): not used here, but could be helpful to know the location - scenario_name (str): name of the scenario - scenario (Pyomo ConcreteModel): the instantiated model - """ - print(f"flow values for {scenario_name}") - scenario.flow.pprint() - print(f"slack values for {scenario_name}") - scenario.y.pprint() - pass - - -def consensus_vars_creator(num_scens, inter_region_dict, all_scenario_names): - """The following function creates the consensus_vars dictionary thanks to the inter-region dictionary. \n - This dictionary has redundant information, but is useful for admmWrapper. - - Args: - num_scens (int): select the number of scenarios (regions) wanted - - Returns: - dict: dictionary which keys are the regions and values are the list of consensus variables - present in the region - """ - # Due to the small size of inter_region_dict, it is not given as argument but rather created. - consensus_vars = {} - for inter_arc in inter_region_dict["arcs"]: - source,target = inter_arc - region_source,node_source = source - region_target,node_target = target - arc = node_source, node_target - vstr = f"flow[{arc}]" #variable name as string, y is the slack - - #adds inter region arcs in the source region - if not region_source in consensus_vars: #initiates consensus_vars[region_source] - consensus_vars[region_source] = list() - consensus_vars[region_source].append(vstr) - - #adds inter region arcs in the target region - if not region_target in consensus_vars: #initiates consensus_vars[region_target] - consensus_vars[region_target] = list() - consensus_vars[region_target].append(vstr) - for region in all_scenario_names: - if region not in consensus_vars: - print(f"WARNING: {region} has no consensus_vars") - consensus_vars[region] = list() - return consensus_vars - - -def scenario_names_creator(num_scens): - """Creates the name of every scenario. - - Args: - num_scens (int): number of scenarios - - Returns: - list (str): the list of names - """ - return [f"Region{i+1}" for i in range(num_scens)] - - -def kw_creator(all_nodes_dict, cfg, inter_region_dict, data_params): - """ - Args: - cfg (config): specifications for the problem. We only look at the number of scenarios - - Returns: - dict (str): the kwargs that are used in distr.scenario_creator, here {"num_scens": num_scens} - """ - kwargs = { - "all_nodes_dict" : all_nodes_dict, - "inter_region_dict" : inter_region_dict, - "cfg" : cfg, - "data_params" : data_params, - } - return kwargs - - -def inparser_adder(cfg): - #requires the user to give the number of scenarios - cfg.num_scens_required() - - cfg.add_to_config("scalable", - description="decides whether a sclale model is used", - domain=bool, - default=False) - - cfg.add_to_config("mnpr", - description="max number of nodes per region and per type", - domain=int, - default=4) \ No newline at end of file diff --git a/examples/distr/globalmodel.py b/examples/distr/globalmodel.py index 039b47635..973afad26 100644 --- a/examples/distr/globalmodel.py +++ b/examples/distr/globalmodel.py @@ -1,4 +1,9 @@ -# Distribution example without decomposition +# Distribution example without decomposition, this file is only written to execute the non-scalable example + +### This code line can execute the script for a certain example +# python globalmodel.py --solver-name cplex_direct --num-scens 3 + + import pyomo.environ as pyo import distr import distr_data @@ -19,55 +24,6 @@ def _parse_args(): return cfg -##we need to create a new model solver, to avoid dummy nodes -def min_cost_distr_problem(region_dict, sense=pyo.minimize): - """ Create an arcs formulation of network flow, the function is the simplification without dummy nodes of this function in distr - - Arg: - region_dict (dict): given by region_dict_creator with test_without_dummy=True so that there is no dummy node - - Returns: - model (Pyomo ConcreteModel) : the instantiated model - """ - # First, make the special In, Out arc lists for each node - arcsout = {n: list() for n in region_dict["nodes"]} - arcsin = {n: list() for n in region_dict["nodes"]} - for a in region_dict["arcs"]: - arcsout[a[0]].append(a) - arcsin[a[1]].append(a) - - model = pyo.ConcreteModel(name='MinCostFlowArcs') - def flowBounds_rule(model, i,j): - return (0, region_dict["flow capacities"][(i,j)]) - model.flow = pyo.Var(region_dict["arcs"], bounds=flowBounds_rule) # x - - def slackBounds_rule(model, n): - if n in region_dict["factory nodes"]: - return (0, region_dict["supply"][n]) - elif n in region_dict["buyer nodes"]: - return (region_dict["supply"][n], 0) - elif n in region_dict["distribution center nodes"]: - return (0,0) - else: - raise ValueError(f"unknown node type for node {n}") - - model.y = pyo.Var(region_dict["nodes"], bounds=slackBounds_rule) - - model.MinCost = pyo.Objective(expr=\ - sum(region_dict["flow costs"][a]*model.flow[a] for a in region_dict["arcs"]) \ - + sum(region_dict["production costs"][n]*(region_dict["supply"][n]-model.y[n]) for n in region_dict["factory nodes"]) \ - + sum(region_dict["revenues"][n]*(region_dict["supply"][n]-model.y[n])for n in region_dict["buyer nodes"]) , - sense=sense) - - def FlowBalance_rule(m, n): - return sum(m.flow[a] for a in arcsout[n])\ - - sum(m.flow[a] for a in arcsin[n])\ - + m.y[n] == region_dict["supply"][n] - model.FlowBalance= pyo.Constraint(region_dict["nodes"], rule=FlowBalance_rule) - - return model - - def global_dict_creator(num_scens, start=0): """Merges the different region_dict thanks to the inter_region_dict in distr to create a global dictionary @@ -114,7 +70,8 @@ def main(): do all the work """ cfg = _parse_args() - model = min_cost_distr_problem(global_dict_creator(num_scens=cfg.num_scens)) + assert cfg.scalable is False, "the global model example has not been adapted for the scalable example" + model = distr.min_cost_distr_problem(global_dict_creator(num_scens=cfg.num_scens)) solver_name = cfg.solver_name opt = pyo.SolverFactory(solver_name) diff --git a/examples/distr/go.bash b/examples/distr/go.bash index 9a73ce6f0..9bdcd04fb 100644 --- a/examples/distr/go.bash +++ b/examples/distr/go.bash @@ -4,15 +4,5 @@ # Execute with "bash go.bash scalable" for the scalable example # Execute with "bash go.bash anything" otherwise -# Right now, chosing between the version with dummy nodes and without dummy nodes is hardwired in distr_admm_cylinders.py - # This file runs either a scalable example or a non scalable example -if [ ${1} = "scalable" ] -then - mpiexec -np 3 python -u -m mpi4py distr_admm_cylinders.py --num-scens 3 --default-rho 10 --solver-name xpress --max-iterations 50 --xhatxbar --lagrangian --mnpr 40 --rel-gap 0.05 --scalable -else - mpiexec -np 3 python -u -m mpi4py distr_admm_cylinders.py --num-scens 3 --default-rho 10 --solver-name xpress --max-iterations 10 --xhatxbar --lagrangian --rel-gap 0.05 -fi - -#mpiexec -np 3 python -u -m mpi4py distr_admm_cylinders.py --num-scens 3 --default-rho 10 --solver-name xpress --max-iterations 50 --xhatxbar --lagrangian --mnpr 40 --rel-gap 0.05 --scalable -#mpiexec -np 3 python -u -m mpi4py distr_admm_cylinders.py --num-scens 3 --default-rho 10 --solver-name xpress --max-iterations 10 --xhatxbar --lagrangian --rel-gap 0.05 \ No newline at end of file +mpiexec -np 3 python -u -m mpi4py distr_admm_cylinders.py --num-scens 3 --default-rho 10 --solver-name xpress --max-iterations 50 --xhatxbar --lagrangian --mnpr 4 --rel-gap 0.05 From a9df23fb31964d9993178f82277cb5c22b71a9be Mon Sep 17 00:00:00 2001 From: Aymeric Legros Date: Thu, 20 Jun 2024 09:46:06 -0700 Subject: [PATCH 30/31] set up testadmmWrapper.yml with pip install -e . --- .github/workflows/testadmmWrapper.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/testadmmWrapper.yml b/.github/workflows/testadmmWrapper.yml index 008cc0138..c048e32d5 100644 --- a/.github/workflows/testadmmWrapper.yml +++ b/.github/workflows/testadmmWrapper.yml @@ -32,7 +32,7 @@ jobs: - name: setup the program run: | - python setup.py develop + pip install -e . - name: run tests timeout-minutes: 100 From d979b1dd501dd5ba1c29e47dce3281b927351fbb Mon Sep 17 00:00:00 2001 From: Bernard Knueven Date: Fri, 21 Jun 2024 11:52:34 -0600 Subject: [PATCH 31/31] trying to resolve numpy failures --- .github/workflows/nompi4py.yml | 4 ++-- .github/workflows/pyotracker.yml | 1 - .github/workflows/schur_complement.yml | 2 +- .github/workflows/testaph.yml | 1 - .github/workflows/testbunpick.yml | 2 +- .github/workflows/testconfint.yml | 4 ++-- .github/workflows/testgradient.yml | 2 +- .github/workflows/testpysp.yml | 1 - .github/workflows/testwithcylinders.yml | 2 +- setup.py | 2 +- 10 files changed, 9 insertions(+), 12 deletions(-) diff --git a/.github/workflows/nompi4py.yml b/.github/workflows/nompi4py.yml index 47fd84a2f..8918fac1c 100644 --- a/.github/workflows/nompi4py.yml +++ b/.github/workflows/nompi4py.yml @@ -24,8 +24,8 @@ jobs: auto-activate-base: false - name: Install dependencies run: | - pip install pyomo sphinx sphinx_rtd_theme cplex - pip install xpress numpy pandas scipy dill + pip install sphinx sphinx_rtd_theme cplex + pip install xpress pandas dill - name: setup the program run: | diff --git a/.github/workflows/pyotracker.yml b/.github/workflows/pyotracker.yml index f6fb9bdbc..5c8e59540 100644 --- a/.github/workflows/pyotracker.yml +++ b/.github/workflows/pyotracker.yml @@ -30,7 +30,6 @@ jobs: conda install mpi4py pandas setuptools git pip install sphinx sphinx_rtd_theme cplex pip install xpress - pip install numpy pip install matplotlib - name: set up pyomo diff --git a/.github/workflows/schur_complement.yml b/.github/workflows/schur_complement.yml index 22aed76af..8df9de5e0 100644 --- a/.github/workflows/schur_complement.yml +++ b/.github/workflows/schur_complement.yml @@ -25,7 +25,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install numpy scipy nose pybind11 + pip install nose pybind11 conda install openmpi pymumps --no-update-deps pip install mpi4py pandas pip install git+https://github.com/pyutilib/pyutilib.git diff --git a/.github/workflows/testaph.yml b/.github/workflows/testaph.yml index 6f1060455..0a5e68e55 100644 --- a/.github/workflows/testaph.yml +++ b/.github/workflows/testaph.yml @@ -28,7 +28,6 @@ jobs: run: | conda install mpi4py pandas setuptools pip install pyomo xpress cplex - pip install numpy - name: setup the program run: | diff --git a/.github/workflows/testbunpick.yml b/.github/workflows/testbunpick.yml index 17d57150e..dd4036a6e 100644 --- a/.github/workflows/testbunpick.yml +++ b/.github/workflows/testbunpick.yml @@ -26,7 +26,7 @@ jobs: auto-activate-base: false - name: Install dependencies run: | - conda install mpi4py numpy setuptools + conda install mpi4py "numpy<2" setuptools pip install pyomo pandas xpress cplex scipy sympy dill PyYAML Pympler networkx pandas - name: setup the program diff --git a/.github/workflows/testconfint.yml b/.github/workflows/testconfint.yml index 19d5ab9e0..c4ae0a1b6 100644 --- a/.github/workflows/testconfint.yml +++ b/.github/workflows/testconfint.yml @@ -26,7 +26,7 @@ jobs: auto-activate-base: false - name: Install dependencies run: | - conda install mpi4py numpy setuptools + conda install mpi4py "numpy<2" setuptools pip install pyomo pandas xpress cplex scipy sympy dill - name: setup the program @@ -43,4 +43,4 @@ jobs: timeout-minutes: 10 run: | cd mpisppy/tests - python test_conf_int_aircond.py \ No newline at end of file + python test_conf_int_aircond.py diff --git a/.github/workflows/testgradient.yml b/.github/workflows/testgradient.yml index 9babb7e32..018d185cb 100644 --- a/.github/workflows/testgradient.yml +++ b/.github/workflows/testgradient.yml @@ -24,7 +24,7 @@ jobs: auto-activate-base: false - name: Install dependencies run: | - conda install mpi4py numpy setuptools cmake + conda install mpi4py "numpy<2" setuptools cmake pip install pyomo pandas xpress cplex scipy sympy dill - name: setup the program diff --git a/.github/workflows/testpysp.yml b/.github/workflows/testpysp.yml index 6f461098f..f0d64d2a0 100644 --- a/.github/workflows/testpysp.yml +++ b/.github/workflows/testpysp.yml @@ -29,7 +29,6 @@ jobs: run: | conda install mpi4py pandas setuptools pytest pyyaml networkx pip install pyomo xpress cplex - pip install numpy - name: setup the program run: | diff --git a/.github/workflows/testwithcylinders.yml b/.github/workflows/testwithcylinders.yml index 2602dcceb..36a4aa403 100644 --- a/.github/workflows/testwithcylinders.yml +++ b/.github/workflows/testwithcylinders.yml @@ -26,7 +26,7 @@ jobs: auto-activate-base: false - name: Install dependencies run: | - conda install mpi4py numpy setuptools + conda install mpi4py "numpy<2" setuptools pip install pyomo pandas xpress cplex scipy - name: setup the program diff --git a/setup.py b/setup.py index a53d05d0b..cf2bbe59a 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ packages=packages, python_requires='>=3.8', install_requires=[ - 'numpy', + 'numpy<2', 'scipy', 'pyomo>=6.4', ],