Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix intertimepoint constraint when estimated duration has no variation #44

Open
wants to merge 25 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
35cae56
Fix intertimepoint constraints
Aug 26, 2020
855217e
Rename timepoint names from start, pickup, delivery to departure, sta…
Aug 27, 2020
28ad402
Merge pull request #2 from kelo-robotics/feature/rename-timepoints
anenriquez Aug 27, 2020
75705cc
stn: Add get_insertion_points
Sep 9, 2020
4116f2d
stn: Fix get_insertion_points
Sep 16, 2020
35c1b54
Add update_travel_time method
Sep 25, 2020
52427c1
Add update_work_time method
Oct 8, 2020
45375b1
pstn: Replace is not with !=
Oct 16, 2020
4ae7b3e
srea: default msg=0 when debug is inactive. Replace spaces in problem…
Oct 16, 2020
2cae803
Sort edges and nodes before displaying graph
Oct 22, 2020
c23e264
pstn: Remove node number from __str__
Oct 22, 2020
ceff021
stn: Add get_times
Oct 23, 2020
48b7a80
Update remove_node_ids to store executed timepoints in an archived_stn
Nov 3, 2020
0081943
stn: Get insertion points based on latest start time
Nov 3, 2020
5ab3440
stn: from_json add data to new node only if the node has data
Nov 3, 2020
c6448ea
stn: Check if nodes have data in get_times method
Nov 3, 2020
d64d6cd
Update get_completion_time to use lower bound instead of upper bound
Nov 11, 2020
fbf3eb9
fpc: Remove logging
Nov 16, 2020
126f9ed
get_times: Return is_executed
Dec 4, 2020
9136eec
stn: Use sorted version of nodes and edges to retrieve information
Jan 12, 2021
210a127
stn: Fix get_earliest_task_id to return the earliest task also if it …
Jan 12, 2021
f999ca1
stn: Fix get_tasks
Feb 26, 2021
b5b238b
Fix update_task, get_node_by_type and get_task_position for tasks wit…
Mar 3, 2021
d4d9be5
stn: Add check to get_time
Mar 25, 2021
ff7f95d
Remove oldpoints before running srea algorithm
Apr 16, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 22 additions & 4 deletions stn/config/config.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
from stn.stn import STN
import copy

from stn.methods.dsc_lp import DSC_LP
from stn.methods.fpc import get_minimal_network
from stn.methods.srea import srea
from stn.pstn.pstn import PSTN
from stn.stn import STN
from stn.stnu.stnu import STNU
from stn.methods.srea import srea
from stn.methods.fpc import get_minimal_network
from stn.methods.dsc_lp import DSC_LP


class STNFactory(object):
Expand Down Expand Up @@ -77,13 +79,29 @@ def srea_algorithm(stn):

:param stn: stn (object)
"""
archived_stn = PSTN()
stn = copy.deepcopy(stn)
archived_stn = stn.remove_old_timepoints(archived_stn)

result = srea(stn, debug=True)

if result is None:
return
risk_metric, dispatchable_graph = result

dispatchable_graph.risk_metric = risk_metric

for i in archived_stn.nodes():
if i != 0 and 'data' in archived_stn.nodes[i]:
dispatchable_graph.add_node(i, data=archived_stn.nodes[i]['data'])
dispatchable_graph.add_edge(i, 0, weight=archived_stn[i][0]['weight'], is_executed=True)
dispatchable_graph.add_edge(0, i, weight=archived_stn[0][i]['weight'], is_executed=True)

if archived_stn.has_edge(i, i + 1):
dispatchable_graph.add_constraint(i, i + 1, -archived_stn[i + 1][i]['weight'],
archived_stn[i][i + 1]['weight'])
dispatchable_graph.execute_edge(i, i + 1)

return dispatchable_graph


Expand Down
6 changes: 1 addition & 5 deletions stn/methods/fpc.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,16 @@
import logging
import networkx as nx
import copy

import networkx as nx

""" Achieves full path consistency (fpc) by applying the Floyd Warshall algorithm to the STN"""


def get_minimal_network(stn):

logger = logging.getLogger('stn.fpc')
minimal_network = copy.deepcopy(stn)

shortest_path_array = nx.floyd_warshall(stn)
if stn.is_consistent(shortest_path_array):
# Get minimal stn by updating the edges of the stn to reflect the shortest path distances
minimal_network.update_edges(shortest_path_array)
return minimal_network
else:
logger.debug("The minimal network is inconsistent. STP could not be solved")
10 changes: 5 additions & 5 deletions stn/methods/srea.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def setUpLP(stn, decouple):
bounds = {}
deltas = {}

prob = pulp.LpProblem('PSTN Robust Execution LP', pulp.LpMaximize)
prob = pulp.LpProblem('PSTN_Robust_Execution_LP', pulp.LpMaximize)

for (i, j) in stn.edges():
weight = stn.get_edge_weight(i, j)
Expand Down Expand Up @@ -103,7 +103,7 @@ def setUpLP(stn, decouple):
return (bounds, deltas, prob)


def srea(inputstn,
def srea(stn,
debug=False,
debugLP=False,
returnAlpha=True,
Expand All @@ -112,7 +112,7 @@ def srea(inputstn,
ub=0.999):

""" Runs the SREA algorithm on an input STN
@param inputstn The STN that we are running SREA on
@param stn The STN that we are running SREA on
@param debug Print optional status messages about alpha levels
@param debugLP Print optional status messages about each run of the LP
@param lb The starting lower bound on alpha for the binary search
Expand All @@ -122,8 +122,6 @@ def srea(inputstn,
or None if there is no solution
"""

stn = copy.deepcopy(inputstn)

# dictionary of alphas for binary search
alphas = {i: i / 1000.0 for i in range(1001)}

Expand Down Expand Up @@ -267,6 +265,8 @@ def srea_LP(inputstn,
prob.writeLP('STN.lp')
pulp.LpSolverDefault.msg = 10

pulp.LpSolverDefault.msg = 0

# Based on https://stackoverflow.com/questions/27406858/pulp-solver-error
# Sometimes pulp throws an exception instead of returning a problem with unfeasible status
try:
Expand Down
2 changes: 1 addition & 1 deletion stn/node.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ def __init__(self, task_id, node_type, is_executed=False, **kwargs):
if isinstance(task_id, str):
task_id = from_str(task_id)
self.task_id = task_id
# The node can be of node_type zero_timepoint, start, pickup or delivery
# The node can be of node_type zero_timepoint, departure, start or finish
self.node_type = node_type
self.is_executed = is_executed
self.action_id = kwargs.get("action_id")
Expand Down
77 changes: 67 additions & 10 deletions stn/pstn/pstn.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def __init__(self):

def __str__(self):
to_print = ""
for (i, j, data) in self.edges.data():
for (i, j, data) in sorted(self.edges.data()):
if self.has_edge(j, i) and i < j:
# Constraints with the zero timepoint
if i == 0:
Expand Down Expand Up @@ -93,7 +93,7 @@ def add_constraint(self, i, j, wji=0.0, wij=MAX_FLOAT, distribution=""):
"""

# The constraint is contingent if it has a probability distribution
is_contingent = distribution is not ""
is_contingent = distribution != ""

super().add_constraint(i, j, wji, wij)

Expand All @@ -115,9 +115,9 @@ def get_contingent_constraints(self):
def add_intertimepoints_constraints(self, constraints, task):
""" Adds constraints between the timepoints of a task
Constraints between:
- start and pickup (contingent)
- pickup and delivery (contingent)
- delivery and next task (if any) (requirement)
- departure and start (contingent)
- start and finish (contingent)
- finish and next task (if any) (requirement)
Args:
constraints (list) : list of tuples that defines the pair of nodes between which a new constraint should be added
Example:
Expand All @@ -128,7 +128,7 @@ def add_intertimepoints_constraints(self, constraints, task):
"""
for (i, j) in constraints:
self.logger.debug("Adding constraint: %s ", (i, j))
if self.nodes[i]['data'].node_type == "start":
if self.nodes[i]['data'].node_type == "departure":
distribution = self.get_travel_time_distribution(task)
if distribution.endswith("_0.0"): # the distribution has no variation (stdev is 0)
# Make the constraint a requirement constraint
Expand All @@ -137,14 +137,71 @@ def add_intertimepoints_constraints(self, constraints, task):
else:
self.add_constraint(i, j, distribution=distribution)

elif self.nodes[i]['data'].node_type == "pickup":
elif self.nodes[i]['data'].node_type == "start":
distribution = self.get_work_time_distribution(task)
self.add_constraint(i, j, distribution=distribution)
if distribution.endswith("_0.0"): # the distribution has no variation (stdev is 0)
# Make the constraint a requirement constraint
mean = float(distribution.split("_")[1])
self.add_constraint(i, j, mean, mean)
else:
self.add_constraint(i, j, distribution=distribution)

elif self.nodes[i]['data'].node_type == "delivery":
# wait time between finish of one task and start of the next one. Fixed to [0, inf]
elif self.nodes[i]['data'].node_type == "finish":
# wait time between finish of one task and departure of the next one. Fixed to [0, inf]
self.add_constraint(i, j)

def update_travel_time(self, task):
position = self.get_task_position(task.task_id)
departure_node_id = 2 * position + (position-2)
start_node_id = departure_node_id + 1
distribution = self.get_travel_time_distribution(task)

if self.has_edge(departure_node_id, start_node_id):
if distribution.endswith("_0.0"): # the distribution has no variation (stdev is 0)
# Make the constraint a requirement constraint
mean = float(distribution.split("_")[1])
self.add_constraint(departure_node_id, start_node_id, mean, mean)
else:
self.add_constraint(departure_node_id, start_node_id, distribution=distribution)

def update_work_time(self, task):
position = self.get_task_position(task.task_id)
departure_node_id = 2 * position + (position-2)
start_node_id = departure_node_id + 1
finish_node_id = start_node_id + 1
distribution = self.get_work_time_distribution(task)

if self.has_edge(start_node_id, finish_node_id):
if distribution.endswith("_0.0"): # the distribution has no variation (stdev is 0)
# Make the constraint a requirement constraint
mean = float(distribution.split("_")[1])
self.add_constraint(start_node_id, finish_node_id, mean, mean)
else:
self.add_constraint(start_node_id, finish_node_id, distribution=distribution)

def remove_node_ids(self, node_ids, archived_stn=None):
# Assumes that the node_ids are in consecutive order from node_id 1 onwards
for i in node_ids:
if archived_stn:
# Start adding nodes after the last node_id in archived_stn
start_node_id = list(archived_stn.nodes())[-1]
if start_node_id == 0: # skip the zero_timepoint
start_node_id = 1
archived_stn.add_node(start_node_id, data=self.nodes[i]['data'])
archived_stn.add_edge(start_node_id, 0, weight=self[i][0]['weight'], is_executed=True)
archived_stn.add_edge(0, start_node_id, weight=self[0][i]['weight'], is_executed=True)

if self.has_edge(i, i+1):
if 'is_contingent' in self[i][i+1]:
archived_stn.add_constraint(start_node_id, start_node_id+1, -self[i+1][i]['weight'], self[i][i+1]['weight'], self[i][i+1]['distribution'])
else:
archived_stn.add_constraint(start_node_id, start_node_id+1, -self[i+1][i]['weight'], self[i][i+1]['weight'])
else:
# Add dummy node
archived_stn.add_node(start_node_id+1)
self.remove_node(i)
return archived_stn

@staticmethod
def get_travel_time_distribution(task):
travel_time = task.get_edge("travel_time")
Expand Down
Loading