Skip to content

Commit

Permalink
Updating optimize to handle multiple risk constraints (#596)
Browse files Browse the repository at this point in the history
* Updating optimize to handle multiple risk constraints

* Lint

* adding assertion errors to check lengths of inputs associated with constraints

* Added support for List of `alpha`

* Update fixtures.py

* Update interfaces.py

* Update type hint for qoi

* forcing risk_bound, qoi, and alpha to be Lists with a warning

* Update interfaces.py

* preallocate risk_estimate in ouu code

* fixing tests

* Update ouu.py

* checking shape of samples

* Update interfaces.ipynb

* Update interfaces.ipynb

* Shape of samples changes based on intervention tensor size

* Changing tensor shape

* forcing tensors to be 1d

* fixing input lists to intervention templates

* Updated ouu to handle penalty for multiple constraints

* Updating `sample` to fix tensor size issue

* Update optimize_interface.ipynb

* Adding tests for multiple constraints

* Lint

* Update qoi.py

* Update interfaces.ipynb

* Updating type hints

* Lint

* recreate failed runs for tensor size issues

* Update ouu.py

* Fixing tensor shape for sampling to always be 3D

* Update optimize_interface.ipynb

* Update interfaces.py

* Update interfaces.py
  • Loading branch information
anirban-chaudhuri authored Aug 27, 2024
1 parent f5c5bec commit 5401b75
Show file tree
Hide file tree
Showing 9 changed files with 692 additions and 794 deletions.
244 changes: 122 additions & 122 deletions docs/source/interfaces.ipynb

Large diffs are not rendered by default.

1,071 changes: 465 additions & 606 deletions docs/source/optimize_interface.ipynb

Large diffs are not rendered by default.

38 changes: 29 additions & 9 deletions pyciemss/integration_utils/intervention_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ def param_value_objective(
if param_value[count] is None:
if not callable(param_value[count]):
param_value[count] = lambda y: torch.tensor(y)
# Note that param_value needs to be Callable

def intervention_generator(
x: torch.Tensor,
Expand All @@ -29,13 +30,19 @@ def intervention_generator(
for count in range(param_size):
if start_time[count].item() in static_parameter_interventions:
static_parameter_interventions[start_time[count].item()].update(
{param_name[count]: param_value[count](x[count].item())}
{
param_name[count]: torch.atleast_1d(
param_value[count](x[count].item())
)
}
)
else:
static_parameter_interventions.update(
{
start_time[count].item(): {
param_name[count]: param_value[count](x[count].item())
param_name[count]: torch.atleast_1d(
param_value[count](x[count].item())
)
}
}
)
Expand All @@ -46,9 +53,11 @@ def intervention_generator(

def start_time_objective(
param_name: List[str],
param_value: List[Intervention],
param_value: List[torch.Tensor],
) -> Callable[[torch.Tensor], Dict[float, Dict[str, Intervention]]]:
param_size = len(param_name)
# Note: code below will only work for tensors and not callable functions
param_value = [torch.atleast_1d(y) for y in param_value]

def intervention_generator(
x: torch.Tensor,
Expand All @@ -62,11 +71,15 @@ def intervention_generator(
for count in range(param_size):
if x[count].item() in static_parameter_interventions:
static_parameter_interventions[x[count].item()].update(
{param_name[count]: param_value[count]}
{param_name[count]: torch.atleast_1d(param_value[count])}
)
else:
static_parameter_interventions.update(
{x[count].item(): {param_name[count]: param_value[count]}}
{
x[count].item(): {
param_name[count]: torch.atleast_1d(param_value[count])
}
}
)
return static_parameter_interventions

Expand Down Expand Up @@ -97,14 +110,18 @@ def intervention_generator(
for count in range(param_size):
if x[count * 2].item() in static_parameter_interventions:
static_parameter_interventions[x[count * 2].item()].update(
{param_name[count]: param_value[count](x[count * 2 + 1].item())}
{
param_name[count]: torch.atleast_1d(
param_value[count](x[count * 2 + 1].item())
)
}
)
else:
static_parameter_interventions.update(
{
x[count * 2].item(): {
param_name[count]: param_value[count](
x[count * 2 + 1].item()
param_name[count]: torch.atleast_1d(
param_value[count](x[count * 2 + 1].item())
)
}
}
Expand All @@ -120,7 +137,10 @@ def intervention_func_combinator(
],
intervention_func_lengths: List[int],
) -> Callable[[torch.Tensor], Dict[float, Dict[str, Intervention]]]:
assert len(intervention_funcs) == len(intervention_func_lengths)
assert len(intervention_funcs) == len(intervention_func_lengths), (
f"Size mismatch between number of intervention functions ('{len(intervention_funcs)}')"
f"and number of intervention function lengths ('{len(intervention_func_lengths)}') "
)

total_length = sum(intervention_func_lengths)

Expand Down
38 changes: 27 additions & 11 deletions pyciemss/interfaces.py
Original file line number Diff line number Diff line change
Expand Up @@ -529,11 +529,14 @@ def wrapped_model():
num_samples=num_samples,
parallel=parallel,
)()
samples = {
k: (v.squeeze() if len(v.shape) > 2 else v) for k, v in samples.items()
}

risk_results = {}
for k, vals in samples.items():
if "_state" in k:
# qoi is assumed to be the last day of simulation
# Note: qoi is assumed to be the last day of simulation
qoi_sample = vals.detach().numpy()[:, -1]
sq_est = alpha_superquantile(qoi_sample, alpha=alpha)
risk_results.update({k: {"risk": [sq_est], "qoi": qoi_sample}})
Expand Down Expand Up @@ -769,16 +772,16 @@ def optimize(
model_path_or_json: Union[str, Dict],
end_time: float,
logging_step_size: float,
qoi: Callable,
risk_bound: float,
qoi: Union[List[Callable[[Any], np.ndarray]], Callable[[Any], np.ndarray]],
risk_bound: Union[List[float], float],
static_parameter_interventions: Callable[
[torch.Tensor], Dict[float, Dict[str, Intervention]]
],
objfun: Callable,
initial_guess_interventions: List[float],
bounds_interventions: List[List[float]],
*,
alpha: float = 0.95,
alpha: Union[List[float], float] = [0.95],
solver_method: str = "dopri5",
solver_options: Dict[str, Any] = {},
start_time: float = 0.0,
Expand All @@ -803,10 +806,10 @@ def optimize(
- The end time of the sampled simulation.
logging_step_size: float
- The step size to use for logging the trajectory.
qoi: Callable
- A callable function defining the quantity of interest to optimize over.
risk_bounds: float
- The threshold on the risk constraint.
qoi: Union[List[Callable[[Any], np.ndarray]],Callable[[Any], np.ndarray]]
- A (list of) callable function(s) defining the quantity of interest to optimize over.
risk_bounds: Union[List[float], float]
- The (list of) threshold(s) on the risk constraint.
static_parameter_interventions: Callable[[torch.Tensor], Dict[float, Dict[str, Intervention]]]
- A callable function of static parameter interventions to optimize over.
- The callable functions are created using the provided templates:
Expand All @@ -823,6 +826,8 @@ def optimize(
bounds_interventions: List[List[float]]
- The lower and upper bounds for intervention parameter.
- Bounds are a list of the form [[lower bounds], [upper bounds]]
alpha: Union[List[float], float]
- A (list of) risk preference parameter(s) for alpha-superquantile
solver_method: str
- The method to use for solving the ODE. See torchdiffeq's `odeint` method for more details.
- If performance is incredibly slow, we suggest using `euler` to debug.
Expand Down Expand Up @@ -863,21 +868,32 @@ def optimize(
- Optimization results as scipy object.
"""
check_solver(solver_method, solver_options)
if not isinstance(risk_bound, list):
risk_bound = [risk_bound]
if not isinstance(qoi, list):
qoi = [qoi]
if not isinstance(alpha, list):
alpha = [alpha]
assert len(risk_bound) == len(alpha) and len(risk_bound) == len(qoi), (
f"Size mismatch between qoi ('{len(qoi)}'), risk_bound ('{len(risk_bound)}') "
f"and alpha ('{len(alpha)}')"
)

with torch.no_grad():
control_model = CompiledDynamics.load(model_path_or_json)
bounds_np = np.atleast_2d(bounds_interventions)
u_min = bounds_np[0, :]
u_max = bounds_np[1, :]
# Set up risk estimation
risk_measures = [lambda z: alpha_superquantile(z, alpha=a) for a in alpha]
RISK = computeRisk(
model=control_model,
interventions=static_parameter_interventions,
qoi=qoi,
end_time=end_time,
logging_step_size=logging_step_size,
start_time=start_time,
risk_measure=lambda z: alpha_superquantile(z, alpha=alpha),
risk_measure=risk_measures,
num_samples=1,
guide=inferred_parameters,
fixed_static_parameter_interventions=fixed_static_parameter_interventions,
Expand All @@ -890,7 +906,7 @@ def optimize(
# Run one sample to estimate model evaluation time
start_t = time.time()
init_prediction = RISK.propagate_uncertainty(initial_guess_interventions)
RISK.qoi(init_prediction)
RISK.qoi[0](init_prediction)
end_t = time.time()
forward_time = end_t - start_t
time_per_eval = forward_time / 1.0
Expand All @@ -902,7 +918,7 @@ def optimize(
# Define constraints >= 0
constraints = (
# risk constraint
{"type": "ineq", "fun": lambda x: risk_bound - RISK(x)},
{"type": "ineq", "fun": lambda x: np.array(risk_bound) - RISK(x)},
# bounds on control
{"type": "ineq", "fun": lambda x: x - u_min},
{"type": "ineq", "fun": lambda x: u_max - x},
Expand Down
3 changes: 0 additions & 3 deletions pyciemss/mira_integration/distributions.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ def mira_beta_to_pyro(parameters: ParameterDict) -> pyro.distributions.Distribut
def mira_betabinomial_to_pyro(
parameters: ParameterDict,
) -> pyro.distributions.Distribution:

concentration1 = parameters["alpha"]
concentration0 = parameters["beta"]
total_count = parameters["numberOfTrials"]
Expand Down Expand Up @@ -143,7 +142,6 @@ def mira_gumbel_to_pyro(parameters: ParameterDict) -> pyro.distributions.Distrib


def mira_laplace_to_pyro(parameters: ParameterDict) -> pyro.distributions.Distribution:

if "location" in parameters.keys():
loc = parameters["location"]
elif "mu" in parameters.keys():
Expand Down Expand Up @@ -189,7 +187,6 @@ def mira_studentt_to_pyro(parameters: ParameterDict) -> pyro.distributions.Distr


def mira_weibull_to_pyro(parameters: ParameterDict) -> pyro.distributions.Distribution:

if "scale" in parameters.keys():
scale = parameters["scale"]
elif "lambda" in parameters.keys():
Expand Down
27 changes: 8 additions & 19 deletions pyciemss/ouu/ouu.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,19 +66,19 @@ def __init__(
self,
model: Callable,
interventions: Callable[[torch.Tensor], Dict[float, Dict[str, Intervention]]],
qoi: Callable,
qoi: List[Callable[[Any], np.ndarray]],
end_time: float,
logging_step_size: float,
*,
start_time: float = 0.0,
risk_measure: Callable = lambda z: alpha_superquantile(z, alpha=0.95),
risk_measure: List[Callable] = [lambda z: alpha_superquantile(z, alpha=0.95)],
num_samples: int = 1000,
guide=None,
fixed_static_parameter_interventions: Dict[float, Dict[str, Intervention]] = {},
solver_method: str = "dopri5",
solver_options: Dict[str, Any] = {},
u_bounds: np.ndarray = np.atleast_2d([[0], [1]]),
risk_bound: float = 0.0,
risk_bound: List[float] = [0.0],
):
self.model = model
self.interventions = interventions
Expand Down Expand Up @@ -107,15 +107,17 @@ def __call__(self, x):
"Selected interventions are out of bounds. Will use a penalty instead of estimating risk."
)
risk_estimate = max(
2 * self.risk_bound, 10.0
np.max(2 * np.array(self.risk_bound)), 10.0
) # used as a penalty and the model is not run
else:
# Apply intervention and perform forward uncertainty propagation
samples = self.propagate_uncertainty(x)
# Compute quanity of interest
sample_qoi = self.qoi(samples)
sample_qoi = [q(samples) for q in self.qoi]
# Estimate risk
risk_estimate = self.risk_measure(sample_qoi)
risk_estimate = np.full(len(self.qoi), np.nan)
for i in range(len(self.qoi)):
risk_estimate[i] = self.risk_measure[i](sample_qoi[i])
return risk_estimate

def propagate_uncertainty(self, x):
Expand Down Expand Up @@ -176,27 +178,16 @@ def __init__(
x0: List[float],
objfun: Callable,
constraints: Tuple[Dict[str, object], Dict[str, object], Dict[str, object]],
minimizer_kwargs: Dict = dict(
method="COBYLA",
tol=1e-5,
options={"disp": False, "maxiter": 10},
),
optimizer_algorithm: str = "basinhopping",
maxfeval: int = 100,
maxiter: int = 100,
u_bounds: np.ndarray = np.atleast_2d([[0], [1]]),
):
self.x0 = np.squeeze(np.array([x0]))
self.objfun = objfun
self.constraints = constraints
self.minimizer_kwargs = minimizer_kwargs.update(
{"constraints": self.constraints}
)
self.optimizer_algorithm = optimizer_algorithm
self.maxiter = maxiter
self.maxfeval = maxfeval
self.u_bounds = u_bounds
# self.kwargs = kwargs

def solve(self):
pbar = tqdm(total=self.maxfeval * (self.maxiter + 1))
Expand All @@ -220,8 +211,6 @@ def update_progress(xk):
},
)
take_step = RandomDisplacementBounds(self.u_bounds[0, :], self.u_bounds[1, :])
# result = basinhopping(self._vrate, u_init, stepsize=stepsize, T=1.5,
# niter=self.maxiter, minimizer_kwargs=minimizer_kwargs, take_step=take_step, interval=2)

result = basinhopping(
self.objfun,
Expand Down
7 changes: 3 additions & 4 deletions pyciemss/ouu/qoi.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@ def obs_nday_average_qoi(
samples is is the output from a Pyro Predictive object.
samples[VARIABLE] is expected to have dimension (nreplicates, ntimepoints)
Note: last ndays timepoints is assumed to represent last n-days of simulation.
ndays = 1 leads to using the value at the end of the simulation.
"""
dataQoI = samples[contexts[0]].detach().numpy()

dataQoI = samples[contexts[0]][..., 0, :].detach().numpy()
return np.mean(dataQoI[:, -ndays:], axis=1)


Expand All @@ -24,6 +24,5 @@ def obs_max_qoi(samples: Dict[str, torch.Tensor], contexts: List) -> np.ndarray:
samples is is the output from a Pyro Predictive object.
samples[VARIABLE] is expected to have dimension (nreplicates, ntimepoints)
"""
dataQoI = samples[contexts[0]].detach().numpy()

dataQoI = samples[contexts[0]][..., 0, :].detach().numpy()
return np.max(dataQoI, axis=1)
Loading

0 comments on commit 5401b75

Please sign in to comment.