Skip to content

Commit

Permalink
minor changes to scripting,. just looking at am convergence (look at …
Browse files Browse the repository at this point in the history
…highway_assign.py line 139)
  • Loading branch information
lachlan-git committed Jun 20, 2024
1 parent 202cb68 commit 7d4aa90
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 113 deletions.
4 changes: 3 additions & 1 deletion scripts/compare_skims.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ def read_matrix_as_long_df(path: Path, run_name):
index_lables = list(range(am_time.shape[0]))
return pd.DataFrame(am_time, index=index_lables, columns=index_lables).stack().rename(run_name).to_frame()

a = read_matrix_as_long_df(r"D:\TEMP\TM2.2.1.1-New_network_rerun\TM2.2.1.1_new_taz\skim_matrices\highway\HWYSKMAM_taz.omx", "test")
#%%
all_skims = []
for skim_matrix_path in network_fid_path.rglob("*AM_taz.omx"):
print(skim_matrix_path)
Expand All @@ -25,7 +27,7 @@ def read_matrix_as_long_df(path: Path, run_name):
all_skims = pd.concat(all_skims, axis=1)
# %%
#%%%
all_skims.to_csv(r"Z:\MTC\US0024934.9168\Task_3_runtime_improvements\3.1_network_fidelity\run_result\consolidated\skims.csv")
all_skims.to_csv(r"Z:\MTC\US0024934.9168\Task_3_runtime_improvements\3.1_network_fidelity\output_summaries\skim_data\skims.csv")
# %%
# %%
import geopandas as gpd
Expand Down
151 changes: 40 additions & 111 deletions scripts/compile_model_runs.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@
# print("writing")
# input[["#link_id", "geometry"]].to_file(output_dir / "test_geom.geojson")

scenarios_to_consolidate = (12, 14)
scenarios_to_consolidate = (11, 12, 13, 14, 15)
runs_to_consolidate = (3, 4)
#%%

def read_file_and_tag(path: Path, columns_to_filter = ("@ft", "VOLAU", "@capacity", "run_number", "scenario_number", "#link_id", "geometry")) -> pd.DataFrame:
Expand All @@ -30,6 +31,8 @@ def read_file_and_tag(path: Path, columns_to_filter = ("@ft", "VOLAU", "@capacit

run = file.parent.parent.stem
run_number = int(run.split("_")[-1])
if run_number not in runs_to_consolidate:
return None

return_gdf = gpd.read_file(path, engine="pyogrio")

Expand Down Expand Up @@ -136,11 +139,45 @@ def combine_tables(dfs, columns_same):
/ "all_data_wide.geojson")


#%%
num_iter = {
(3,11): 3,
(3,12): 10,
(3,13): 10,
(3,14): 19,
(3,15): 4,
(4,12): 20
}
#%%
all_links_no_none = [links for links in all_links if (links is not None)] #and (links["#link_id"].is_unique)]
for df in all_links_no_none:
df["saturation"] = df["VOLAU"] / df["@capacity"]
ft6_sat = [(link["run_number"].iloc[0], link["scenario_number"].iloc[0], (link.loc[link["@ft"] == 6, "saturation"] > 1).mean()) for link in all_links_no_none]

y = [val for val in num_iter.values()]
x = [x[-1] for x in ft6_sat]
col = [val[0] for val in num_iter.keys()]

#%%
import matplotlib.pyplot as plt
plt.scatter(x, y, c=col)

# Calculate the trendline
z = np.polyfit(x, y, 1)
p = np.poly1d(z)

# Plot the trendline
plt.plot(x, p(x), color='red')

plt.xlabel('proportion of ft 6 with saturation > 1')
plt.ylabel('number of iterations to solve')
plt.title('Number of iterations to solve (relative gap = 0.05)')
plt.show()
#%%
import matplotlib.pyplot as plt
data = [links_wide_table[col] for col in links_wide_table.iloc[:, 2:].columns]

fig = plt.boxplot(links_wide_table, y="total_bill")
fig = plt.boxplot(data)
fig.show()

# --------------------------------------------------------------------------
Expand All @@ -162,112 +199,4 @@ def get_link_counts(df: pd.DataFrame):

pd.concat(
[get_link_counts(df) for df in all_links]
).sort_values(by=["run_number", "scenario_number"])
# #%%
# import geopandas as gpd
# import pandas as pd
# from pathlib import Path
# from tqdm import tqdm

# input_dir = Path(r"Z:\MTC\US0024934.9168\Task_3_runtime_improvements\3.1_network_fidelity\run_result")
# output_dir = input_dir / "consolidated"


# # in_file = next(input_dir.rglob('emme_links.shp'))
# # print("reading", in_file)
# # input2 = gpd.read_file(in_file, engine="pyogrio", use_arrow=True)
# # #%%
# # print("writing")
# # input[["#link_id", "geometry"]].to_file(output_dir / "test_geom.geojson")

# #%%

# def process_frame(return_gdf: pd.DataFrame, path: Path, columns_to_filter = ("@ft", "VOLAU", "@capacity", "run_number", "scenario_number", "#link_id")) -> pd.DataFrame:

# scenario = file.parent.stem
# scenario_number = int(scenario.split("_")[-1])

# run = file.parent.parent.stem
# run_number = int(run.split("_")[-1])

# # return_gdf = #gpd.read_file(path, engine="pyogrio")

# return_gdf["scenario"] = scenario
# return_gdf["scenario_number"] = scenario_number
# return_gdf["run"] = run
# return_gdf["run_number"] = run_number

# return_gdf = return_gdf[list(columns_to_filter)]

# # assert return_gdf["#link_id"].is_unique

# return return_gdf
# #%%
# geom_file = next(input_dir.rglob('run_*/Scenario*/emme_links.shp'))
# geometry = gpd.read_file(geom_file, engine="pyogrio")
# #%%
# # geometry[["#link_id", "geometry"]].to_file(output_dir / "test_geom.geojson")

# print("Reading Links...", end="")
# all_links = {}
# x = 0
# for file in tqdm(input_dir.rglob('run_*/Scenario*/emme_links.shp')):
# print(file)
# all_links[file] = gpd.read_file(file)


# #%%
# processed_values = {path: process_frame(df, path) for path, df in all_links.items()}

# temp_iter = iter(processed_values)
# wide_data =
# for path, frame in process_name.values():


# #%%
# links_iter = iter(all_links)
# for frame in
# # %%
# links_table = pd.concat(all_links)
# #%%
# links_table.to_file("links_attr.csv", index=False)
# # #%%
# # print("reading Nodes...", end="")
# # all_nodes = []
# # for file in tqdm(input_dir.rglob('emme_nodes.shp')):
# # all_nodes.append(read_file_and_tag(file))

# # nodes_table = pd.concat(all_nodes)
# # nodes_table = gpd.GeoDataFrame(nodes_table, geometry="geometry", crs=all_links[0].crs)
# # print("done")

# # print("outputting files...")

# links_table.to_file(output_dir/"links.geojson")
# nodes_table.to_file(output_dir/"nodes.geojson")


# #%%
# from pathlib import Path
# import os
# import geopandas as gpd
# import pandas as pd
# #%%
# output_paths_to_consolidate = Path(r"D:\TEMP\output_summaries\ss")
# all_files = []
# for file in output_paths_to_consolidate.glob("*_roadway_network.geojson"):
# run_name = file.name[0:5]
# print(run_name)
# specific_run = gpd.read_file(file)
# specific_run["run_number"] = run_name
# all_files.append(specific_run)
# #%%
# all_files = pd.concat(all_files)
# #%%

# all_files.drop(columns="geometry").to_csv(output_paths_to_consolidate / "data.csv")
# #%%
# to_be_shape = all_files[["geometry", "model_link_id"]].drop_duplicates()
# print("outputting")
# to_be_shape.to_file(output_paths_to_consolidate / "geom_package")
# # %%
).sort_values(by=["run_number", "scenario_number"])
2 changes: 1 addition & 1 deletion tm2py/components/network/highway/highway_assign.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def run(self):
demand.run()
else:
self.highway_emmebank.zero_matrix
for time in self.time_period_names:
for time in ["AM"]: #self.time_period_names:
scenario = self.highway_emmebank.scenario(time)
with self._setup(scenario, time):
iteration = self.controller.iteration
Expand Down

0 comments on commit 7d4aa90

Please sign in to comment.