Skip to content

Commit

Permalink
Update observed.py
Browse files Browse the repository at this point in the history
update taz to district bridge and update reduce_traffic_counts
  • Loading branch information
vamerlynck committed Jan 11, 2024
1 parent 0898497 commit b61c151
Showing 1 changed file with 17 additions and 17 deletions.
34 changes: 17 additions & 17 deletions tm2py/acceptance/observed.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ class Observed:
census_tract_centroids_gdf: gpd.GeoDataFrame

RELEVANT_PEMS_OBSERVED_YEARS_LIST = [2014, 2015, 2016]
RELEVANT_BRIDGE_TRANSACTIONS_YEARS_LIST = [2014, 2015, 2016] # ch
RELEVANT_BRIDGE_TRANSACTIONS_YEARS_LIST = [2014, 2015, 2016]
RELEVANT_PEMS_VEHICLE_CLASSES_FOR_LARGE_TRUCK = [6, 7, 8, 9, 10, 11, 12]

ohio_rmse_standards_df = pd.DataFrame(
Expand Down Expand Up @@ -220,6 +220,7 @@ def _join_standard_route_id(self, input_df: pd.DataFrame) -> pd.DataFrame:
df["survey_agency"] = df["survey_agency"].map(
self.c.canonical_agency_names_dict
)

join_df = df[~df["survey_agency"].isin(self.c.rail_operators_vector)].copy()

join_all_df = join_df.copy()
Expand Down Expand Up @@ -329,6 +330,7 @@ def reduce_on_board_survey(self, read_file_from_disk=True):
temp_df = in_df[
(in_df["weekpart"].isna()) | (in_df["weekpart"] != "WEEKEND")
].copy()

temp_df["time_period"] = temp_df["day_part"].map(time_period_dict)
temp_df["route"] = np.where(
temp_df["operator"].isin(self.c.rail_operators_vector),
Expand Down Expand Up @@ -553,11 +555,11 @@ def _make_district_to_district_transit_flows_by_technology(self):
o_df = self.reduced_transit_spatial_flow_df.copy()
o_df = o_df[o_df["time_period"] == "am"].copy()

tm1_district_dict = self.c.taz_to_district_df.set_index("taz_tm1")[
"district_tm1"
tm2_district_dict = self.c.taz_to_district_df.set_index("taz_tm2")[
"district_tm2"
].to_dict()
o_df["orig_district"] = o_df["orig_taz"].map(tm1_district_dict)
o_df["dest_district"] = o_df["dest_taz"].map(tm1_district_dict)
o_df["orig_district"] = o_df["orig_taz"].map(tm2_district_dict)
o_df["dest_district"] = o_df["dest_taz"].map(tm2_district_dict)

for prefix in self.c.transit_technology_abbreviation_dict.keys():
o_df["{}".format(prefix.lower())] = (
Expand Down Expand Up @@ -796,7 +798,6 @@ def reduce_bridge_transactions(self, read_file_from_disk=True):
return_df.to_pickle(os.path.join(file_root, out_file))

self.observed_bridge_transactions_df = return_df

return

def reduce_traffic_counts(self, read_file_from_disk=True):
Expand Down Expand Up @@ -872,20 +873,19 @@ def _reduce_pems_counts(self, read_file_from_disk=True):
self.c.pems_to_link_crosswalk_df,
out_df,
how="left",
left_on="pems_station_id",
right_on="station_id",
).drop(columns=["pems_station_id"])
on = "station_id"
)


out_df = self._join_tm2_node_ids(out_df)
out_df = self._join_tm2_node_ids(out_df)

# take median across multiple stations on same link
median_df = (
out_df.groupby(
[
"A",
"B",
"emme_a_node_id",
"emme_b_node_id",
"emme_a_node_id",
"emme_b_node_id",
"model_link_id",
"time_period",
"vehicle_class",
]
Expand All @@ -894,15 +894,15 @@ def _reduce_pems_counts(self, read_file_from_disk=True):
.reset_index()
)
join_df = out_df[
["A", "B", "observed_flow", "station_id", "type", "vehicle_class"]
["emme_a_node_id","emme_b_node_id","model_link_id", "time_period", "station_id", "type", "vehicle_class"]
].copy()
return_df = pd.merge(
median_df,
join_df,
how="left",
on=["A", "B", "observed_flow", "vehicle_class"],
on = ["emme_a_node_id","emme_b_node_id","model_link_id", "time_period", "vehicle_class"]
).reset_index(drop=True)

return_df = return_df.rename(columns = {"model_link_id" : "standard_link_id"})
return_df = self._join_ohio_standards(return_df)
return_df = self._identify_key_arterials_and_bridges(return_df)

Expand Down

0 comments on commit b61c151

Please sign in to comment.