Skip to content

Commit

Permalink
Dyn rho (#435)
Browse files Browse the repository at this point in the history
* dynamic code is factored; gradient works; sensi is next

* sensi rho and grad rho share dynamics

* ruff

* change import in test

* add a little test for dynamic gradient-based rho
  • Loading branch information
DLWoodruff authored Oct 1, 2024
1 parent 79d3310 commit f4cc7e6
Show file tree
Hide file tree
Showing 8 changed files with 179 additions and 71 deletions.
18 changes: 16 additions & 2 deletions examples/generic_cylinders.bash
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,21 @@

SOLVER="cplex"

cd farmer
mpiexec -np 3 python -m mpi4py ../../mpisppy/generic_cylinders.py --module-name farmer --num-scens 3 --solver-name ${SOLVER} --max-iterations 10 --max-solver-threads 4 --default-rho 1 --lagrangian --xhatshuffle --rel-gap 0.01 --solution-base-name farmer_nonants

mpiexec -np 3 python -m mpi4py ../../mpisppy/generic_cylinders.py --module-name farmer --num-scens 3 --bundles-per-rank=0 --max-iterations=100 --default-rho=1 --solver-name=${SOLVER} --xhatpath=./farmer_nonants.npy --grad-order-stat 0.0 --xhatxbar --ph-ob --max-stalled-iters 5000 --grad-rho-setter --rel-gap 0.001

# now do it again, but this time using dynamic rho
echo "^^^ grad rho dynamic ^^^"
mpiexec -np 3 python -m mpi4py ../../mpisppy/generic_cylinders.py --module-name farmer --num-scens 3 --bundles-per-rank=0 --max-iterations=100 --default-rho=1 --solver-name=${SOLVER} --xhatpath=./farmer_nonants.npy --grad-order-stat 0.0 --xhatxbar --ph-ob --max-stalled-iters 5000 --grad-rho-setter --rel-gap 0.001 --dynamic-rho-dual-crit --dynamic-rho-dual-thresh 0.1

echo "^^^ sensi rho dynamic ^^^"
mpiexec -np 3 python -m mpi4py ../../mpisppy/generic_cylinders.py --module-name farmer --num-scens 3 --bundles-per-rank=0 --max-iterations=100 --default-rho=1 --solver-name=${SOLVER} --xhatpath=./farmer_nonants.npy --grad-order-stat 0.0 --xhatxbar --ph-ob --max-stalled-iters 5000 --sensi-rho --rel-gap 0.001 --dynamic-rho-dual-crit --dynamic-rho-dual-thresh 0.1

cd ..
exit

echo "^^^ netdes sensi-rho ^^^"
cd netdes
mpiexec -np 3 python -m mpi4py ../../mpisppy/generic_cylinders.py --module-name netdes --netdes-data-path ./data --instance-name network-10-20-L-01 --solver-name ${SOLVER} --max-iterations 10 --max-solver-threads 4 --default-rho 1 --lagrangian --xhatshuffle --rel-gap 0.01 --sensi-rho
Expand All @@ -11,7 +26,6 @@ cd ..
echo "^^^ farmer sensi-rho ^^^"
mpiexec -np 3 python -m mpi4py ../mpisppy/generic_cylinders.py --module-name farmer/farmer --num-scens 3 --solver-name ${SOLVER} --max-iterations 10 --max-solver-threads 4 --default-rho 1 --lagrangian --xhatshuffle --rel-gap 0.01 --sensi-rho

exit

# sslp EF
echo "^^^ sslp ef ^^^"
Expand Down Expand Up @@ -94,7 +108,7 @@ mpiexec -np 3 python -m mpi4py ../../mpisppy/generic_cylinders.py --module-name

# now do it again, but this time using dynamic rho

mpiexec -np 3 python -m mpi4py ../../mpisppy/generic_cylinders.py --module-name farmer --num-scens 3 --bundles-per-rank=0 --max-iterations=100 --default-rho=1 --solver-name=${SOLVER} --xhatpath=./farmer_nonants.npy --grad-order-stat 0.0 --xhatxbar --ph-ob --max-stalled-iters 5000 --grad-rho-setter --rel-gap 0.001 --grad-dynamic-dual-crit --grad-dynamic-dual-thresh 0.1
mpiexec -np 3 python -m mpi4py ../../mpisppy/generic_cylinders.py --module-name farmer --num-scens 3 --bundles-per-rank=0 --max-iterations=100 --default-rho=1 --solver-name=${SOLVER} --xhatpath=./farmer_nonants.npy --grad-order-stat 0.0 --xhatxbar --ph-ob --max-stalled-iters 5000 --grad-rho-setter --rel-gap 0.001 --dynamic-rho-dual-crit --dynamic-rho-dual-thresh 0.1

cd ..
# end gradient based rho demo
Expand Down
113 changes: 113 additions & 0 deletions mpisppy/extensions/dyn_rho_base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
###############################################################################
# mpi-sppy: MPI-based Stochastic Programming in PYthon
#
# Copyright (c) 2024, Lawrence Livermore National Security, LLC, Alliance for
# Sustainable Energy, LLC, The Regents of the University of California, et al.
# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for
# full copyright and license information.
###############################################################################

# A dynamic rho base class that assumes a member function compute_and_update_rho
# As of Oct 2024, it only mainly provides services and needs a better mid_iter
# Children should call the parent for post_iter0


import numpy as np

import mpisppy.extensions.extension
from mpisppy.utils.wtracker import WTracker
from mpisppy import global_toc

# for trapping numpy warnings
import warnings

class Dyn_Rho_extension_base(mpisppy.extensions.extension.Extension):
"""
This extension enables dynamic rho, e.g. gradient or sensitivity based.
Args:
opt (PHBase object): gives the problem
cfg (Config object): config object
"""
def __init__(self, opt, comm=None):
super().__init__(opt)
self.cylinder_rank = self.opt.cylinder_rank

self.primal_conv_cache = []
self.dual_conv_cache = []
self.wt = WTracker(self.opt)

def _display_rho_values(self):
for sname, scenario in self.opt.local_scenarios.items():
rho_list = [scenario._mpisppy_model.rho[ndn_i]._value
for ndn_i, _ in scenario._mpisppy_data.nonant_indices.items()]
print(sname, 'rho values: ', rho_list[:5])
break

def _display_W_values(self):
for (sname, scenario) in self.opt.local_scenarios.items():
W_list = [w._value for w in scenario._mpisppy_model.W.values()]
print(sname, 'W values: ', W_list)
break


def _update_rho_primal_based(self):
with warnings.catch_warnings():
warnings.filterwarnings('error')
curr_conv, last_conv = self.primal_conv_cache[-1], self.primal_conv_cache[-2]
try:
primal_diff = np.abs((last_conv - curr_conv) / last_conv)
except Warning:
if self.cylinder_rank == 0:
print(f"dyn_rho extension reports {last_conv=} {curr_conv=} - no rho updates recommended")
return False
return (primal_diff <= self.cfg.dynamic_rho_primal_thresh)

def _update_rho_dual_based(self):
curr_conv, last_conv = self.dual_conv_cache[-1], self.dual_conv_cache[-2]
dual_diff = np.abs((last_conv - curr_conv) / last_conv) if last_conv != 0 else 0
#print(f'{dual_diff =}')
return (dual_diff <= self.cfg.dynamic_rho_dual_thresh)

def _update_recommended(self):
return (self.cfg.dynamic_rho_primal_crit and self._update_rho_primal_based()) or \
(self.cfg.dynamic_rho_dual_crit and self._update_rho_dual_based())

def pre_iter0(self):
pass

def post_iter0(self):
self.primal_conv_cache.append(self.opt.convergence_diff())
self.dual_conv_cache.append(self.wt.W_diff())

def miditer(self):
self.primal_conv_cache.append(self.opt.convergence_diff())
self.dual_conv_cache.append(self.wt.W_diff())
if self.opt._PHIter == 1:
self.grad_object.write_grad_cost()
if self.opt._PHIter == 1 or self._update_recommended():
self.grad_object.write_grad_rho()
rho_setter_kwargs = self.opt.options['rho_setter_kwargs'] \
if 'rho_setter_kwargs' in self.opt.options \
else dict()

sum_rho = 0.0
num_rhos = 0
for sname, scenario in self.opt.local_scenarios.items():
rholist = self.rho_setter(scenario, **rho_setter_kwargs)
for (vid, rho) in rholist:
(ndn, i) = scenario._mpisppy_data.varid_to_nonant_index[vid]
scenario._mpisppy_model.rho[(ndn, i)] = rho
sum_rho += rho
num_rhos += 1

rho_avg = sum_rho / num_rhos

global_toc(f"Rho values recomputed - average rank 0 rho={rho_avg}")

def enditer(self):
pass

def post_everything(self):
pass
54 changes: 5 additions & 49 deletions mpisppy/extensions/gradient_extension.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,18 +8,14 @@
###############################################################################

import os
import numpy as np

import mpisppy.extensions.extension
import mpisppy.extensions.dyn_rho_base
import mpisppy.utils.gradient as grad
import mpisppy.utils.find_rho as find_rho
from mpisppy.utils.wtracker import WTracker
from mpisppy import global_toc

# for trapping numpy warnings
import warnings

class Gradient_extension(mpisppy.extensions.extension.Extension):
class Gradient_extension(mpisppy.extensions.dyn_rho_base.Dyn_Rho_extension_base):
"""
This extension makes PH use gradient-rho and the corresponding rho setter.
Expand All @@ -32,8 +28,8 @@ class Gradient_extension(mpisppy.extensions.extension.Extension):
"""
def __init__(self, opt, comm=None):
super().__init__(opt)
self.cylinder_rank = self.opt.cylinder_rank
super().__init__(opt, comm=comm)

self.cfg = opt.options["gradient_extension_options"]["cfg"]
# This is messy because we want to be able to use or give rhos as requested.
# (e.g., if the user gave us an input file, use that)
Expand All @@ -60,53 +56,13 @@ def __init__(self, opt, comm=None):

self.grad_object = grad.Find_Grad(opt, self.cfg)
self.rho_setter = find_rho.Set_Rho(self.cfg).rho_setter
self.primal_conv_cache = []
self.dual_conv_cache = []
self.wt = WTracker(self.opt)

def _display_rho_values(self):
for sname, scenario in self.opt.local_scenarios.items():
rho_list = [scenario._mpisppy_model.rho[ndn_i]._value
for ndn_i, _ in scenario._mpisppy_data.nonant_indices.items()]
print(sname, 'rho values: ', rho_list[:5])
break

def _display_W_values(self):
for (sname, scenario) in self.opt.local_scenarios.items():
W_list = [w._value for w in scenario._mpisppy_model.W.values()]
print(sname, 'W values: ', W_list)
break


def _update_rho_primal_based(self):
with warnings.catch_warnings():
warnings.filterwarnings('error')
curr_conv, last_conv = self.primal_conv_cache[-1], self.primal_conv_cache[-2]
try:
primal_diff = np.abs((last_conv - curr_conv) / last_conv)
except Warning:
if self.cylinder_rank == 0:
print(f"Gradient extension reports {last_conv=} {curr_conv=} - no rho updates recommended")
return False
return (primal_diff <= self.cfg.grad_dynamic_primal_thresh)

def _update_rho_dual_based(self):
curr_conv, last_conv = self.dual_conv_cache[-1], self.dual_conv_cache[-2]
dual_diff = np.abs((last_conv - curr_conv) / last_conv) if last_conv != 0 else 0
#print(f'{dual_diff =}')
return (dual_diff <= self.cfg.grad_dynamic_dual_thresh)

def _update_recommended(self):
return (self.cfg.grad_dynamic_primal_crit and self._update_rho_primal_based()) or \
(self.cfg.grad_dynamic_dual_crit and self._update_rho_dual_based())

def pre_iter0(self):
pass

def post_iter0(self):
global_toc("Using gradient-based rho setter")
self.primal_conv_cache.append(self.opt.convergence_diff())
self.dual_conv_cache.append(self.wt.W_diff())
super().post_iter0()

def miditer(self):
self.primal_conv_cache.append(self.opt.convergence_diff())
Expand Down
28 changes: 21 additions & 7 deletions mpisppy/extensions/sensi_rho.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,19 +8,19 @@
###############################################################################

import numpy as np


import mpisppy.extensions.extension
from mpisppy import global_toc
import mpisppy.extensions.dyn_rho_base
import mpisppy.MPI as MPI
from mpisppy.utils.nonant_sensitivities import nonant_sensitivies


class SensiRho(mpisppy.extensions.extension.Extension):
class SensiRho(mpisppy.extensions.dyn_rho_base.Dyn_Rho_extension_base):
"""
Rho determination algorithm using nonant sensitivities
"""

def __init__(self, ph):
def __init__(self, ph, comm=None):
super().__init__(ph, comm=comm)
self.ph = ph

self.multiplier = 1.0
Expand All @@ -30,6 +30,7 @@ def __init__(self, ph):
and "multiplier" in ph.options["sensi_rho_options"]
):
self.multiplier = ph.options["sensi_rho_options"]["multiplier"]
self.cfg = ph.options["sensi_rho_options"]["cfg"]

@staticmethod
def _compute_rho_min_max(ph, npop, mpiop, start):
Expand Down Expand Up @@ -134,7 +135,6 @@ def _compute_rho_max(ph):
def _compute_rho_min(ph):
return SensiRho._compute_rho_min_max(ph, np.minimum, MPI.MIN, np.inf)

# Implementation of a virtual function
def compute_and_update_rho(self):
ph = self.ph

Expand Down Expand Up @@ -167,10 +167,24 @@ def pre_iter0(self):
pass

def post_iter0(self):
global_toc("Using sensi-rho rho setter")
super().post_iter0()
self.compute_and_update_rho()

def miditer(self):
pass
self.primal_conv_cache.append(self.opt.convergence_diff())
self.dual_conv_cache.append(self.wt.W_diff())

if self._update_recommended():
self.compute_and_update_rho()
sum_rho = 0.0
num_rhos = 0 # could be computed...
for sname, s in self.opt.local_scenarios.items():
for ndn_i, nonant in s._mpisppy_data.nonant_indices.items():
sum_rho += s._mpisppy_model.rho[ndn_i]._value
num_rhos += 1
rho_avg = sum_rho / num_rhos
global_toc(f"Rho values recomputed - average rank 0 rho={rho_avg}")

def enditer(self):
pass
Expand Down
2 changes: 1 addition & 1 deletion mpisppy/generic_cylinders.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def _parse_args(m):
cfg.wxbar_read_write_args()
cfg.tracking_args()
cfg.gradient_args()
cfg.dynamic_gradient_args()
cfg.dynamic_rho_args()
cfg.reduced_costs_args()
cfg.sep_rho_args()
cfg.coeff_rho_args()
Expand Down
15 changes: 13 additions & 2 deletions mpisppy/tests/test_gradient_rho.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def _create_cfg():
cfg.popular_args()
cfg.two_sided_args()
cfg.ph_args()
cfg.dynamic_gradient_args()
cfg.dynamic_rho_args()
cfg.solver_name = solver_name
cfg.default_rho = 1
return cfg
Expand Down Expand Up @@ -124,7 +124,6 @@ def test_grad_cost_and_rho(self):
self.assertAlmostEqual(float(rows[3][1]), 2.0616161616161617)
os.remove(self.cfg.grad_cost_file_out)
os.remove(self.cfg.grad_rho_file_out)



###############################################
Expand Down Expand Up @@ -268,6 +267,18 @@ def test_grad_extensions(self):
self.ph_object = self._run_ph_farmer()
self.assertAlmostEqual(self.ph_object.conv, 2.128968965420187, places=1)

def test_dyn_grad_extensions(self):
print("** test dynamic grad extensions **")
self.cfg.grad_rho_file_out = './_test_rho.csv'
#self.cfg.grad_cost_file_in = './examples/rho_test_data/grad_cost.csv'
self.cfg.xhatpath = './examples/rho_test_data/farmer_cyl_nonants.npy'
self.cfg.max_iterations = 4
self.cfg.dynamic_rho_dual_crit = True
self.cfg.dynamic_rho_dual_thresh = 0.1
self.ph_object = self._run_ph_farmer()
print(f"{self.ph_object.conv=}")
self.assertAlmostEqual(self.ph_object.conv, 0.039598, places=1)


if __name__ == '__main__':
unittest.main()
2 changes: 1 addition & 1 deletion mpisppy/utils/cfg_vanilla.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ def add_coeff_rho(hub_dict, cfg):

def add_sensi_rho(hub_dict, cfg):
hub_dict = extension_adder(hub_dict,SensiRho)
hub_dict["opt_kwargs"]["options"]["sensi_rho_options"] = {"multiplier" : cfg.sensi_rho_multiplier}
hub_dict["opt_kwargs"]["options"]["sensi_rho_options"] = {"multiplier" : cfg.sensi_rho_multiplier, "cfg": cfg}

def add_cross_scenario_cuts(hub_dict,
cfg,
Expand Down
18 changes: 9 additions & 9 deletions mpisppy/utils/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -826,27 +826,27 @@ def gradient_args(self):
domain=float,
default=1e3)

def dynamic_gradient_args(self): # AKA adaptive
def dynamic_rho_args(self): # AKA adaptive

self.gradient_args()

self.add_to_config('grad_dynamic_primal_crit',
description="Use dynamic gradient-based primal criterion for update",
self.add_to_config('dynamic_rho_primal_crit',
description="Use dynamic primal criterion for some rho updates",
domain=bool,
default=False)

self.add_to_config('grad_dynamic_dual_crit',
description="Use dynamic gradient-based dual criterion for update",
self.add_to_config('dynamic_rho_dual_crit',
description="Use dynamic dual criterion for some rho updates",
domain=bool,
default=False)

self.add_to_config("grad_dynamic_primal_thresh",
description="primal threshold for diff during gradient calcs",
self.add_to_config("dynamic_rho_primal_thresh",
description="primal threshold for diff during dynamic rho calcs",
domain=float,
default=0.1)

self.add_to_config("grad_dynamic_dual_thresh",
description="dual threshold for abs norm during gradient calcs",
self.add_to_config("dynamic_rho_dual_thresh",
description="dual threshold for dirr during dynamic rho calcs",
domain=float,
default=0.1)

Expand Down

0 comments on commit f4cc7e6

Please sign in to comment.