Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Benchmarking ptdf #296

Draft
wants to merge 15 commits into
base: main
Choose a base branch
from
32 changes: 32 additions & 0 deletions egret/benchmark/benchmark_dcopf.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
import sys

from egret.models.dcopf import (solve_dcopf,
create_btheta_dcopf_model,
create_ptdf_dcopf_model,
)
from egret.data.model_data import ModelData

solver = "xpress_persistent"
if len(sys.argv) < 3:
print("Useage: python benchmark_ptdf.py matpower_file dcopf_type")
print("""dcopf_type is one of "btheta", "fullptdf", "virtualptdf" """)
sys.exit(0)

matpower_file = sys.argv[1]
dcopf_type = sys.argv[2].lower().replace("-","").replace("_","")

md = ModelData.read(matpower_file)

if dcopf_type == "btheta":
mdo = solve_dcopf(md, solver, dcopf_model_generator=create_btheta_dcopf_model)
elif "ptdf" in dcopf_type:
kwargs = {}
if dcopf_type == "fullptdf":
kwargs["virtual_ptdf"] = False
elif dcopf_type == "virtualptdf":
kwargs["virtual_ptdf"] = True
else:
raise RuntimeError(f"Unrecognized dcopf_type {dcopf_type}")
mdo = solve_dcopf(md, solver, dcopf_model_generator=create_ptdf_dcopf_model, **kwargs)
else:
raise RuntimeError(f"Unrecognized dcopf_type {dcopf_type}")
130 changes: 92 additions & 38 deletions egret/common/lazy_ptdf_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,22 +151,27 @@ def __init__(self, max_viol_add, md, prepend_str, time=None):
self.violations_store = {}
self.total_violations = 0
self.monitored_violations = 0
self.min_flow_screen = True
self.min_violation = 0.

def get_violations_named(self, name):
for key in self.violations_store:
if key[0] == name:
yield key[1]

def min_flow_violation(self):
if self.violations_store:
return min(self.violations_store.values())
if len(self.violations_store) == self.max_viol_add:
return self.min_violation
else:
return 0.

def _min_violation_key(self):
d = self.violations_store
return min(d, key=d.get)

def update_min_flow_violation(self):
self.min_violation = min(self.violations_store.values())

def _add_violation(self, name, other_name, index, val):
if other_name:
key = ( name, (other_name, index) )
Expand All @@ -184,12 +189,13 @@ def _add_violation(self, name, other_name, index, val):
f"violations_store: {self.violations_store}")

del self.violations_store[min_key]
self.update_min_flow_violation()

def _add_violations( self, name, other_name, viol_array, viol_indices):
while viol_indices:
idx = np.argmax(viol_array[viol_indices])
val = viol_array[viol_indices[idx]]
if val < self.min_flow_violation() and len(self.violations_store) >= self.max_viol_add:
if len(self.violations_store) >= self.max_viol_add and val < self.min_flow_violation():
break
# If this violation is close in value to
# one already in the set, it is likely
Expand All @@ -201,17 +207,20 @@ def _add_violations( self, name, other_name, viol_array, viol_indices):
# adding *this* iteration, it's possible to add parallel
# lines, which may not be necessary in may cases (e.g.,
# when the line is binding but not over the limit)
close_to_existing = any( math.isclose( val, existing ) for existing in self.violations_store.values() )
if close_to_existing:
viol_indices.pop(idx)
continue
# NOTE: the above assumes that this line will not be violated
# Since they often are (e.g. contingencies), we leave this
# logic out
#close_to_existing = any( math.isclose( val, existing ) for existing in self.violations_store.values() )
#if close_to_existing:
# viol_indices.pop(idx)
# continue
self._add_violation( name, other_name, viol_indices[idx], val )
viol_indices.pop(idx)

def check_and_add_violations(self, name, flow_array, flow_variable,
upper_lazy_limits, upper_enforced_limits,
lower_lazy_limits, lower_enforced_limits,
monitored_indices, index_names, outer_name=None, PFV=None):
monitored_indices, index_names, outer_name=None, PFV=None, get_counts=False):

if outer_name:
# contingencies are named by cn, branch_idx, reduce to
Expand All @@ -224,43 +233,63 @@ def check_and_add_violations(self, name, flow_array, flow_variable,
## get the indices of the violation
## here filter by least violation in violations_store
## in the limit, this will become 0 eventually --
upper_viol_lazy_idx = np.nonzero(upper_viol_lazy_array > self.min_flow_violation())[0]
upper_viol_lazy_idx = np.nonzero(upper_viol_lazy_array > 0.0)[0]

if len(upper_viol_lazy_idx) > 0:
upper_viol_array = flow_array[upper_viol_lazy_idx] - upper_enforced_limits[upper_viol_lazy_idx]
upper_viol_idx = self._calculate_total_and_monitored_violations(upper_viol_array, upper_viol_lazy_idx, monitored_indices,
flow_variable, flow_array, index_names, upper_enforced_limits,
name, outer_name, PFV)

upper_viol_array = flow_array[upper_viol_lazy_idx] - upper_enforced_limits[upper_viol_lazy_idx]
self._calculate_total_and_monitored_violations(upper_viol_array, upper_viol_lazy_idx, monitored_indices,
flow_variable, flow_array, index_names, upper_enforced_limits,
name, outer_name, PFV)
## viol_lazy_idx will hold the lines we're adding
## this iteration -- don't want to add lines
## that are already in the monitored set

## viol_lazy_idx will hold the lines we're adding
## this iteration -- don't want to add lines
## that are already in the monitored set
# eliminate lines in the monitored set
upper_viol_lazy_idx = set(upper_viol_lazy_idx).difference(monitored_indices)
upper_viol_lazy_idx = np.fromiter(upper_viol_lazy_idx, dtype='int64', count=len(upper_viol_lazy_idx))

# eliminate lines in the monitored set
upper_viol_lazy_idx = list(set(upper_viol_lazy_idx).difference(monitored_indices))
upper_viol_lazy_idx_idx = np.nonzero(upper_viol_lazy_array[upper_viol_lazy_idx] > self.min_flow_violation())[0]
upper_viol_lazy_idx = list(upper_viol_lazy_idx[upper_viol_lazy_idx_idx])

self._add_violations( name, outer_name, upper_viol_lazy_array, upper_viol_lazy_idx )
self._add_violations( name, outer_name, upper_viol_lazy_array, upper_viol_lazy_idx )
else:
upper_viol_idx = []

## check lower bound
lower_viol_lazy_array = lower_lazy_limits - flow_array

## get the indices of the violation
## here filter by least violation in violations_store
## in the limit, this will become 0 eventually --
lower_viol_lazy_idx = np.nonzero(lower_viol_lazy_array > self.min_flow_violation())[0]
lower_viol_lazy_idx = np.nonzero(lower_viol_lazy_array > 0.0)[0]

lower_viol_array = lower_enforced_limits[lower_viol_lazy_idx] - flow_array[lower_viol_lazy_idx]
self._calculate_total_and_monitored_violations(lower_viol_array, lower_viol_lazy_idx, monitored_indices,
flow_variable, flow_array, index_names, lower_enforced_limits,
name, outer_name, PFV)
if len(lower_viol_lazy_idx) > 0:
lower_viol_array = lower_enforced_limits[lower_viol_lazy_idx] - flow_array[lower_viol_lazy_idx]
lower_viol_idx = self._calculate_total_and_monitored_violations(lower_viol_array, lower_viol_lazy_idx, monitored_indices,
flow_variable, flow_array, index_names, lower_enforced_limits,
name, outer_name, PFV)

## viol_lazy_idx will hold the lines we're adding
## this iteration -- don't want to add lines
## that are already in the monitored set
## viol_lazy_idx will hold the lines we're adding
## this iteration -- don't want to add lines
## that are already in the monitored set

# eliminate lines in the monitored set
lower_viol_lazy_idx = list(set(lower_viol_lazy_idx).difference(monitored_indices))
# eliminate lines in the monitored set
lower_viol_lazy_idx = set(lower_viol_lazy_idx).difference(monitored_indices)
lower_viol_lazy_idx = np.fromiter(lower_viol_lazy_idx, dtype='int64', count=len(lower_viol_lazy_idx))

lower_viol_lazy_idx_idx = np.nonzero(lower_viol_lazy_array[lower_viol_lazy_idx] > self.min_flow_violation())[0]
lower_viol_lazy_idx = list(lower_viol_lazy_idx[lower_viol_lazy_idx_idx])

self._add_violations( name, outer_name, lower_viol_lazy_array, lower_viol_lazy_idx )
else:
lower_viol_idx = []

self._add_violations( name, outer_name, lower_viol_lazy_array, lower_viol_lazy_idx )
if get_counts:
viol_idx = set(upper_viol_idx)
viol_idx.update(lower_viol_idx)
return len(viol_idx.difference(monitored_indices))
return


def _calculate_total_and_monitored_violations(self, viol_array, viol_lazy_idx, monitored_indices,
Expand Down Expand Up @@ -313,6 +342,8 @@ def _calculate_total_and_monitored_violations(self, viol_array, viol_lazy_idx, m
print(f'model: {pyo.value(flow_variable[element_name])}')
print('')

return viol_idx


## to hold the indicies of the violations
## in the model or block
Expand Down Expand Up @@ -359,7 +390,7 @@ def check_violations(mb, md, PTDF, max_viol_add, time=None, prepend_str=""):
mb._interfaces_monitored, PTDF.interface_keys)

if PTDF.contingencies and \
violations_store.total_violations == 0:
violations_store.total_violations < violations_store.monitored_violations + violations_store.max_viol_add:
## NOTE: checking contingency constraints in general could be very expensive
## we probably want to delay doing so until we have a nearly transmission feasible
## solution
Expand All @@ -378,18 +409,40 @@ def check_violations(mb, md, PTDF, max_viol_add, time=None, prepend_str=""):

## In this way, we avoid (number of contingenies) adds PFV+PFV_delta_c

logger.debug("Checking contingency flows...")
logger.info("Checking contingency flows...")
lazy_contingency_limits_upper = PTDF.lazy_contingency_limits - PFV
lazy_contingency_limits_lower = -PTDF.lazy_contingency_limits - PFV
enforced_contingency_limits_upper = PTDF.enforced_contingency_limits - PFV
enforced_contingency_limits_lower = -PTDF.enforced_contingency_limits - PFV
for cn in PTDF.contingency_compensators:
if time in PTDF.contingency_compensators._order:
cn_iterator = PTDF.contingency_compensators._order[time]
else:
cn_iterator = PTDF.contingency_compensators
total = 0
for c_checked, cn in enumerate(cn_iterator):
PFV_delta = PTDF.calculate_masked_PFV_delta(cn, PFV, VA)
violations_store.check_and_add_violations('contingency', PFV_delta, mb.pfc,
lazy_contingency_limits_upper, enforced_contingency_limits_upper,
lazy_contingency_limits_lower, enforced_contingency_limits_lower,
mb._contingencies_monitored, PTDF.branches_keys_masked,
outer_name = cn, PFV = PFV)
PTDF.contingency_compensators._count[time,cn] = violations_store.check_and_add_violations('contingency', PFV_delta, mb.pfc,
lazy_contingency_limits_upper, enforced_contingency_limits_upper,
lazy_contingency_limits_lower, enforced_contingency_limits_lower,
mb._contingencies_monitored, PTDF.branches_keys_masked,
outer_name = cn, PFV = PFV, get_counts=True)

total += PTDF.contingency_compensators._count[time,cn]
if time in PTDF.contingency_compensators._order:
# give us some choice in which violations we add
if total >= violations_store.max_viol_add:
c_checked += 1
break
if (c_checked+1)%1000 == 0:
logger.info(f"Checked {c_checked+1} contingency flows...")

PTDF.contingency_compensators._order[time] = list(sorted(PTDF.contingency_compensators, key=lambda cn: PTDF.contingency_compensators._count[time,cn], reverse=True))
if len(PTDF.contingency_compensators) == c_checked+1:
logger.info(f"Checked **ALL** {c_checked} contingencies")
else:
logger.info(f"Checked {c_checked} contingencies")
else:
logger.info("Checked no contingencies")

logger.debug(f"branches_monitored: {mb._idx_monitored}\n"
f"interfaces_monitored: {mb._interfaces_monitored}\n"
Expand All @@ -404,6 +457,7 @@ def check_violations(mb, md, PTDF, max_viol_add, time=None, prepend_str=""):

return flows, violations_store.total_violations, violations_store.monitored_violations, viol_lazy


def _generate_flow_monitor_remove_message(flow_type, bn, slack, baseMVA, time):
ret_str = "removing {0} {1} from monitored set".format(flow_type, bn)
if time is not None:
Expand Down
5 changes: 3 additions & 2 deletions egret/common/solver_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,8 @@ def _solve_model(model,
solve_method_options = None,
return_solver = False,
vars_to_load = None,
set_instance = True):
set_instance = True,
):
'''
Create and solve an Egret power system optimization model

Expand Down Expand Up @@ -118,7 +119,6 @@ def _solve_model(model,
-------

'''

results = None

## termination conditions which are acceptable
Expand Down Expand Up @@ -169,6 +169,7 @@ def _solve_model(model,
else:
model.solutions.load_from(results)


if return_solver:
return model, results, solver
return model, results
49 changes: 44 additions & 5 deletions egret/data/ptdf_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,10 @@ def get_interface_ptdf_iterator(self, interface_name):
def calculate_PFV(self,mb):
pass

def _insert_reference_bus(self, bus_array, val):
return np.insert(bus_array, self._busname_to_index_map[self._reference_bus], val)


class VirtualPTDFMatrix(_PTDFManagerBase):
'''
Helper class which *looks like* a PTDF matrix, but doesn't
Expand Down Expand Up @@ -142,6 +146,9 @@ def __init__(self, branches, buses, reference_bus, base_point,
# dense array write buffer
self._bus_sensi_buffer = np.empty((1,len(self.buses_keys_no_ref)), dtype=np.float64)

# helper
self._first_time_contingency = True

def _calculate_ptdf_factorization(self):
logger.info("Calculating PTDF Matrix Factorization")
MLU, B_dA, ref_bus_mask, contingency_compensators, B_dA_I, I = \
Expand Down Expand Up @@ -408,9 +415,6 @@ def get_interface_ptdf_iterator(self, interface_name):
'''
yield from zip(self.buses_keys_no_ref, self._get_interface_row(interface_name))

def _insert_reference_bus(self, bus_array, val):
return np.insert(bus_array, self._busname_to_index_map[self._reference_bus], val)

def _calculate_PFV_delta(self, cn, PFV, VA, masked):
comp = self.contingency_compensators[cn]

Expand Down Expand Up @@ -666,6 +670,8 @@ def __init__(self, branches, buses, reference_bus, base_point,

if interfaces is None:
interfaces = dict()
# no contingency analysis for Full PTDF matrix
self.contingencies = dict()
self._calculate_ptdf_interface(interfaces)

self._set_lazy_limits(ptdf_options)
Expand Down Expand Up @@ -874,7 +880,7 @@ def get_interface_ptdf_iterator(self, interface_name):
def bus_iterator(self):
yield from self.buses_keys

def calculate_PFV(self,mb):
def calculate_masked_PFV(self,mb):
NWV = np.fromiter((value(mb.p_nw[b]) for b in self.buses_keys), float, count=len(self.buses_keys))
NWV += self.phi_adjust_array

Expand All @@ -884,7 +890,40 @@ def calculate_PFV(self,mb):
PFV_I = self.PTDFM_I.dot(NWV)
PFV_I += self.PTDFM_I_const

return PFV, PFV_I
return PFV, PFV_I, None

def calculate_PFV(self,mb):
NWV = np.fromiter((value(mb.p_nw[b]) for b in self.buses_keys), float, count=len(self.buses_keys))
NWV += self.phi_adjust_array

PFV = self.PTDFM.dot(NWV)
PFV += self.phase_shift_array

PFV_I = self.PTDFM_I.dot(NWV)
PFV_I += self.PTDFM_I_const

return PFV, PFV_I, None

def calculate_LMP(self, mb, dual, bus_balance_constr):
## NOTE: unmonitored lines cannot contribute to LMPC
PFD = np.fromiter( ( value(dual[mb.ineq_pf_branch_thermal_bounds[bn]])
if bn in mb.ineq_pf_branch_thermal_bounds else
0. for i,bn in enumerate(self.branches_keys_masked) ),
float, count=len(self.branches_keys_masked))

## interface constributes to LMP
PFID = np.fromiter( ( value(dual[mb.ineq_pf_interface_bounds[i_n]])
if i_n in mb.ineq_pf_interface_bounds else
0. for i,i_n in enumerate(self.interface_keys) ),
float, count=len(self.interface_keys))
LMPC = -self.PTDFM.T.dot(PFD)
LMPI = -self.PTDFM_I.T.dot(PFID)

LMPE = value(dual[bus_balance_constr])

LMP = LMPE + LMPC + LMPI

return self._insert_reference_bus(LMP, LMPE)

class PTDFLossesMatrix(PTDFMatrix):

Expand Down
2 changes: 2 additions & 0 deletions egret/model_library/transmission/tx_calc.py
Original file line number Diff line number Diff line change
Expand Up @@ -766,6 +766,8 @@ def __init__(self, compensators, L, U, Pr, Pc):
self._U = U
self._Pr = Pr
self._Pc = Pc
self._order = {}
self._count = {}

def __getitem__(self, key):
return self._compensators[key]
Expand Down
Loading