From 42498505535588de5feb551492ac2a47e8b06795 Mon Sep 17 00:00:00 2001 From: Ashish Kulshrestha Date: Fri, 26 Jan 2024 09:57:14 -0800 Subject: [PATCH] updates to acceptance criteria scripts --- tm2py/acceptance/acceptance.py | 67 ++------- tm2py/acceptance/canonical.py | 6 +- tm2py/acceptance/observed.py | 22 +-- tm2py/acceptance/simulated.py | 260 +++++++++++++++++++++++---------- 4 files changed, 207 insertions(+), 148 deletions(-) diff --git a/tm2py/acceptance/acceptance.py b/tm2py/acceptance/acceptance.py index 07805d90..4d1244af 100644 --- a/tm2py/acceptance/acceptance.py +++ b/tm2py/acceptance/acceptance.py @@ -165,6 +165,7 @@ def _make_roadway_network_comparisons(self): s_bridge_df = self.s.simulated_bridge_details_df.copy() o_df["time_period"] = o_df.time_period.str.lower() + #o_df = o_df.drop(columns = ["standard_link_id"]) s_trim_df = s_df[ s_df["ft"] <= self.MAX_FACILITY_TYPE_FOR_ROADWAY_COMPARISONS ].copy() @@ -315,6 +316,8 @@ def _make_transit_network_comparisons(self): .reset_index() ) + sim_df = self._fix_technology_labels(sim_df, "technology") + rail_df = pd.merge( sim_df, obs_df, @@ -334,6 +337,8 @@ def _make_transit_network_comparisons(self): ) ].copy() + sim_df = self._fix_technology_labels(sim_df, "technology") + non_df = pd.merge( sim_df, obs_df, @@ -342,7 +347,7 @@ def _make_transit_network_comparisons(self): right_on=["standard_line_name", "daily_line_name", "time_period"], ) - boards_df = pd.concat([rail_df, non_df]) + boards_df = pd.concat([rail_df, non_df], axis = "rows",ignore_index=True) boards_df["operator"] = np.where( boards_df["operator"].isnull(), @@ -357,22 +362,23 @@ def _make_transit_network_comparisons(self): # step 3 -- create a daily shape df = pd.DataFrame(self.s.simulated_transit_segments_gdf).copy() - am_shape_df = df[~(df["line"].str.contains("pnr_"))].reset_index().copy() + am_shape_df = df[~(df["LINE_ID"].str.contains("pnr_"))].reset_index().copy() am_shape_df = self.c.aggregate_line_names_across_time_of_day( - am_shape_df, "line" + am_shape_df, "LINE_ID" ) b_df = ( am_shape_df.groupby("daily_line_name") - .agg({"line": "first"}) + .agg({"LINE_ID": "first"}) .reset_index() .copy() ) c_df = pd.DataFrame( self.s.simulated_transit_segments_gdf[ - self.s.simulated_transit_segments_gdf["line"].isin(b_df["line"]) + self.s.simulated_transit_segments_gdf["LINE_ID"].isin(b_df["LINE_ID"]) ].copy() ) - daily_shape_df = pd.merge(c_df, b_df, how="left", on="line") + daily_shape_df = pd.merge(c_df, b_df, how="left", on="LINE_ID") + daily_shape_df = daily_shape_df.rename(columns={"INODE":"emme_a_node_id","JNODE":"emme_b_node_id"}) # step 4 -- join the shapes to the boardings # for daily, join boardings to shape, as I care about the boardings more than the daily shapes @@ -393,16 +399,16 @@ def _make_transit_network_comparisons(self): am_join_df = pd.merge( am_shape_df, boards_df[boards_df["time_period"] == "am"], - how="left", + how="outer", right_on=["line_name", "daily_line_name"], - left_on=["line", "daily_line_name"], + left_on=["LINE_ID", "daily_line_name"], ) am_join_df["model_line_id"] = am_join_df["line_name"] am_join_df["time_period"] = np.where( am_join_df["time_period"].isnull(), "am", am_join_df["time_period"] ) am_join_df = self.s._get_operator_name_from_line_name( - am_join_df, "line", "temp_operator" + am_join_df, "LINE_ID", "temp_operator" ) am_join_df["operator"] = np.where( am_join_df["operator"].isnull() @@ -531,48 +537,7 @@ def _make_home_work_flow_comparisons(self): def _make_zero_vehicle_household_comparisons(self): - # prepare simulated data - a_df = ( - pd.merge( - self.s.simulated_zero_vehicle_hhs_df, - self.s.simulated_maz_data_df[["MAZ_ORIGINAL", "MAZSEQ"]], - left_on="maz", - right_on="MAZSEQ", - how="left", - ) - .drop(columns=["maz", "MAZSEQ"]) - .rename(columns={"MAZ_ORIGINAL": "maz"}) - ) - - # TODO: probably better if this is done in Simulated, to avoid using Canonical in this class - a_df = pd.merge( - a_df, - self.c.census_2010_to_maz_crosswalk_df, - how="left", - on="maz", - ) - - a_df["product"] = a_df["simulated_zero_vehicle_share"] * a_df["maz_share"] - - b_df = ( - a_df.groupby("blockgroup") - .agg({"product": "sum", "simulated_households": "sum"}) - .reset_index() - .rename(columns={"product": "simulated_zero_vehicle_share"}) - ) - b_df["tract"] = b_df["blockgroup"].astype("str").str.slice(stop=-1) - b_df["product"] = ( - b_df["simulated_zero_vehicle_share"] * b_df["simulated_households"] - ) - - c_df = ( - b_df.groupby("tract") - .agg({"product": "sum", "simulated_households": "sum"}) - .reset_index() - ) - c_df["simulated_zero_vehicle_share"] = ( - c_df["product"] / c_df["simulated_households"] - ) + c_df = self.s.reduced_simulated_zero_vehicle_hhs_df # prepare the observed data join_df = self.o.census_2017_zero_vehicle_hhs_df.copy() diff --git a/tm2py/acceptance/canonical.py b/tm2py/acceptance/canonical.py index 9f545b9a..6d1b0045 100644 --- a/tm2py/acceptance/canonical.py +++ b/tm2py/acceptance/canonical.py @@ -225,11 +225,7 @@ def _read_pems_to_link_crosswalk(self) -> pd.DataFrame: df = pd.read_csv(os.path.join(file_root, in_file)) - df["pems_station_id"] = df["station"].astype(str) + "_" + df["direction"] - - assert df["pems_station_id"].is_unique # validate crosswalk - correct location? - - df = df[["pems_station_id", "A", "B"]] + df = df[["station_id", "A", "B"]] self.pems_to_link_crosswalk_df = df diff --git a/tm2py/acceptance/observed.py b/tm2py/acceptance/observed.py index 9a736565..67d1e64d 100644 --- a/tm2py/acceptance/observed.py +++ b/tm2py/acceptance/observed.py @@ -22,7 +22,7 @@ class Observed: census_tract_centroids_gdf: gpd.GeoDataFrame RELEVANT_PEMS_OBSERVED_YEARS_LIST = [2014, 2015, 2016] - RELEVANT_BRIDGE_TRANSACTIONS_YEARS_LIST = [2014, 2015, 2016] # ch + RELEVANT_BRIDGE_TRANSACTIONS_YEARS_LIST = [2014, 2015, 2016] RELEVANT_PEMS_VEHICLE_CLASSES_FOR_LARGE_TRUCK = [6, 7, 8, 9, 10, 11, 12] ohio_rmse_standards_df = pd.DataFrame( @@ -325,7 +325,7 @@ def reduce_on_board_survey(self, read_file_from_disk=True): dtype=self.reduced_transit_on_board_df.dtypes.to_dict(), ) else: - in_df = pd.read_feather(os.path.join(file_root, in_file)) + in_df = pd.read_csv(os.path.join(file_root, in_file)) temp_df = in_df[ (in_df["weekpart"].isna()) | (in_df["weekpart"] != "WEEKEND") ].copy() @@ -553,11 +553,11 @@ def _make_district_to_district_transit_flows_by_technology(self): o_df = self.reduced_transit_spatial_flow_df.copy() o_df = o_df[o_df["time_period"] == "am"].copy() - tm1_district_dict = self.c.taz_to_district_df.set_index("taz_tm1")[ - "district_tm1" + tm2_district_dict = self.c.taz_to_district_df.set_index("taz_tm2")[ + "district_tm2" ].to_dict() - o_df["orig_district"] = o_df["orig_taz"].map(tm1_district_dict) - o_df["dest_district"] = o_df["dest_taz"].map(tm1_district_dict) + o_df["orig_district"] = o_df["orig_taz"].map(tm2_district_dict) + o_df["dest_district"] = o_df["dest_taz"].map(tm2_district_dict) for prefix in self.c.transit_technology_abbreviation_dict.keys(): o_df["{}".format(prefix.lower())] = ( @@ -872,9 +872,8 @@ def _reduce_pems_counts(self, read_file_from_disk=True): self.c.pems_to_link_crosswalk_df, out_df, how="left", - left_on="pems_station_id", - right_on="station_id", - ).drop(columns=["pems_station_id"]) + on="station_id" + ) out_df = self._join_tm2_node_ids(out_df) @@ -894,15 +893,16 @@ def _reduce_pems_counts(self, read_file_from_disk=True): .reset_index() ) join_df = out_df[ - ["A", "B", "observed_flow", "station_id", "type", "vehicle_class"] + ["emme_a_node_id","emme_b_node_id", "time_period", "station_id", "type", "vehicle_class"] ].copy() return_df = pd.merge( median_df, join_df, how="left", - on=["A", "B", "observed_flow", "vehicle_class"], + on=["emme_a_node_id","emme_b_node_id", "time_period", "vehicle_class"] ).reset_index(drop=True) + #return_df = return_df.rename(columns = {"model_link_id" : "standard_link_id"}) return_df = self._join_ohio_standards(return_df) return_df = self._identify_key_arterials_and_bridges(return_df) diff --git a/tm2py/acceptance/simulated.py b/tm2py/acceptance/simulated.py index fbf53f48..9345d610 100644 --- a/tm2py/acceptance/simulated.py +++ b/tm2py/acceptance/simulated.py @@ -38,10 +38,12 @@ class Simulated: simulated_transit_segments_gdf: gpd.GeoDataFrame simulated_transit_access_df: pd.DataFrame simulated_zero_vehicle_hhs_df: pd.DataFrame + reduced_simulated_zero_vehicle_hhs_df: pd.DataFrame simulated_station_to_station_df: pd.DataFrame simulated_transit_demand_df: pd.DataFrame simulated_transit_tech_in_vehicle_times_df: pd.DataFrame simulated_transit_district_to_district_by_tech_df: pd.DataFrame + am_segment_simulated_boardings_df: pd.DataFrame HEAVY_RAIL_NETWORK_MODE_DESCR: str COMMUTER_RAIL_NETWORK_MODE_DESCR: str @@ -118,19 +120,16 @@ def __init__( self._validate() def _validate(self): - self._make_transit_mode_dict() self._make_simulated_maz_data() - self._read_standard_transit_stops() self._read_standard_transit_shapes() self._read_standard_transit_routes() self._read_standard_node() - self._read_transit_demand() self._make_transit_technology_in_vehicle_table_from_skims() self._make_district_to_district_transit_summaries() - + self._reduce_simulated_transit_by_segment() self._reduce_simulated_transit_boardings() self._reduce_simulated_transit_shapes() self._reduce_simulated_home_work_flows() @@ -144,11 +143,46 @@ def _validate(self): assert sorted( self.simulated_home_work_flows_df.work_county.unique().tolist() ) == sorted(self.c.county_names_list) - self._reduce_simulated_roadway_assignment_outcomes() return + def _add_model_link_id(self): + file_root = self.scenario_dict["scenario"]["root_dir"] + df = pd.read_csv(os.path.join(file_root, "acceptance", "crosswalks", "transit_link_id_mapping_am.csv")) + + df = df.rename( + columns={ + "LINK_ID": "#link_id", + } + ) + + return df + + def _reduce_simulated_transit_by_segment(self): + file_prefix = "transit_segment_" + file_root = self.scenario_dict["scenario"]["root_dir"] + + time_period = "am" # am only + + df = pd.read_csv( + os.path.join(file_root, "output_summaries", file_prefix + time_period + ".csv"), + low_memory=False, + ) + + a_df = df[~(df["line"].str.contains("pnr_"))].reset_index().copy() + + # remove nodes from line name and add node fields + a_df["line_long"] = df["line"].copy() + temp = a_df["line_long"].str.split(pat="-", expand=True) + a_df["LINE_ID"] = temp[0] + a_df = a_df.rename(columns = {"i_node":"INODE","j_node":"JNODE"}) + a_df = a_df[~(a_df["JNODE"]=="None")].reset_index().copy() + a_df["JNODE"] = a_df["JNODE"].astype("float").astype("Int64") + df = a_df[["LINE_ID","line","INODE","JNODE","board"]] + + self.am_segment_simulated_boardings_df = df + def _get_operator_name_from_line_name( self, input_df: pd.DataFrame, input_column_name: str, output_column_name: str ) -> pd.DataFrame: @@ -170,8 +204,18 @@ def _reduce_simulated_transit_shapes(self): gdf["first_row_in_line"] = gdf.groupby("LINE_ID").cumcount() == 0 - # Compute v/c ratio, excluding pnr dummy routes df = pd.DataFrame(gdf.drop(columns=["geometry"])) + + # Add am boards + a_df = self.am_segment_simulated_boardings_df + + df = pd.merge(df, + a_df, + how = "left", + on = ["LINE_ID","INODE","JNODE"] + ) + + # Compute v/c ratio, excluding pnr dummy routes a_df = df[~(df["LINE_ID"].str.contains("pnr_"))].reset_index().copy() a_df["am_segment_capacity_total"] = ( a_df["capt"] * self.model_morning_capacity_factor @@ -204,8 +248,17 @@ def _reduce_simulated_transit_shapes(self): how="left", ) + crosswalk_df = self._add_model_link_id() + + b_gdf= pd.merge( + a_gdf, + crosswalk_df, + on = ["INODE","JNODE","LINE_ID"], + how = "left", + ) + self.simulated_transit_segments_gdf = pd.merge( - a_gdf, + b_gdf, a_df[ [ "LINE_ID", @@ -216,6 +269,7 @@ def _reduce_simulated_transit_shapes(self): "am_segment_capacity_seated", "am_segment_vc_ratio_total", "am_segment_vc_ratio_seated", + "board" ] ], on=["LINE_ID", "INODE", "JNODE"], @@ -252,7 +306,7 @@ def _join_coordinates_to_stations(self, input_df, input_column_name): def _read_standard_node(self): root_dir = self.scenario_dict["scenario"]["root_dir"] - in_file = os.path.join(root_dir,"inputs","network","standard","node.geojson") + in_file = os.path.join(root_dir,"inputs","trn","standard","v12_node.geojson") gdf = gpd.read_file(in_file, driver = "GEOJSON") self.standard_nodes_gdf = gdf @@ -262,7 +316,7 @@ def _read_standard_node(self): def _read_standard_transit_stops(self): root_dir = self.scenario_dict["scenario"]["root_dir"] - in_file = os.path.join(root_dir, "inputs", "network", "standard", "v12_stops.txt") + in_file = os.path.join(root_dir, "inputs", "trn", "standard", "v12_stops.txt") df = pd.read_csv(in_file) @@ -273,7 +327,7 @@ def _read_standard_transit_stops(self): def _read_standard_transit_shapes(self): root_dir = self.scenario_dict["scenario"]["root_dir"] - in_file = os.path.join(root_dir, "inputs", "network", "standard", "v12_shapes.txt") + in_file = os.path.join(root_dir, "inputs", "trn", "standard", "v12_shapes.txt") df = pd.read_csv(in_file) @@ -284,7 +338,7 @@ def _read_standard_transit_shapes(self): def _read_standard_transit_routes(self): root_dir = self.scenario_dict["scenario"]["root_dir"] - in_file = os.path.join(root_dir, "inputs", "network", "standard", "v12_routes.txt") + in_file = os.path.join(root_dir, "inputs", "trn", "standard", "v12_routes.txt") df = pd.read_csv(in_file) @@ -344,7 +398,7 @@ def _make_simulated_maz_data(self): df = pd.read_csv(in_file) - index_file = os.path.join(root_dir,"inputs","highway", "mtc_final_network_zone_seq.csv") + index_file = os.path.join(root_dir,"inputs", "landuse", "mtc_final_network_zone_seq.csv") index_df = pd.read_csv(index_file) join_df = index_df.rename(columns={"N": "MAZ_ORIGINAL"})[ @@ -381,14 +435,14 @@ def _reduce_simulated_rail_access_summaries(self): rail_df = df[ df["mdesc"].isin( [ - self.HEAVY_RAIL_NETWORK_MODE_DESCR, # check - self.COMMUTER_RAIL_NETWORK_MODE_DESCR, + "HVY", + "COM" ] ) ].copy() a_df = rail_df["line"].str.split(pat="_", expand=True).copy() - rail_df["operator"] = a_df[1] # check + rail_df["operator"] = a_df[1] rail_df["operator"] = rail_df["operator"].map( self.c.canonical_agency_names_dict ) @@ -461,7 +515,7 @@ def _reduce_simulated_rail_access_summaries(self): right_on=["boarding"], ).reset_index(drop=True) - out_df = ( + running_df = ( join_df.groupby( [ "operator", @@ -474,13 +528,13 @@ def _reduce_simulated_rail_access_summaries(self): .agg({"boardings": "sum"}) .reset_index() ) - out_df = out_df.rename( + running_df = running_df.rename( columns={"boardings": "simulated_boardings", "i_node": "boarding"} ) - out_df["time_period"] = time_period + running_df["time_period"] = time_period - out_df = out_df.append(out_df) + out_df = pd.concat( [out_df, running_df], axis="rows", ignore_index=True) self.simulated_transit_access_df = out_df @@ -488,8 +542,8 @@ def _reduce_simulated_rail_access_summaries(self): return def _reduce_simulated_station_to_station(self): - if self.model_time_periods is None: - self._get_model_time_periods() + # if self.model_time_periods is None: + # self._get_model_time_periods() root_dir = self.scenario_dict["scenario"]["root_dir"] @@ -540,7 +594,7 @@ def _reduce_simulated_station_to_station(self): "simulated", ], ) - df = df.append(subject_df).reset_index(drop=True) + df = pd.concat([df,subject_df],axis="rows", ignore_index=True) df["boarding"] = df["boarding"].astype(int) df["alighting"] = df["alighting"].astype(int) @@ -565,6 +619,8 @@ def _join_tm2_mode_codes(self, input_df): df = self.c.gtfs_to_tm2_mode_codes_df.copy() + df = df.drop_duplicates(subset=["tm2_mode"],keep='first') # temporary 10.29 + return_df = pd.merge( input_df, df, @@ -586,7 +642,7 @@ def _reduce_simulated_transit_boardings(self): os.path.join(file_root, "output_summaries", file_prefix + time_period + ".csv") ) df["time_period"] = time_period - c_df = c_df.append(df) + c_df = pd.concat([c_df,df],axis="rows", ignore_index=True) c_df = self._join_tm2_mode_codes(c_df) c_df["operator"] = c_df["operator"].map(self.c.canonical_agency_names_dict) @@ -656,6 +712,50 @@ def _reduce_simulated_zero_vehicle_households(self): .copy() ) + # prepare simulated data + a_df = ( + pd.merge( + self.simulated_zero_vehicle_hhs_df, + self.simulated_maz_data_df[["MAZ_ORIGINAL", "MAZSEQ"]], + left_on="maz", + right_on="MAZSEQ", + how="left", + ) + .drop(columns=["maz", "MAZSEQ"]) + .rename(columns={"MAZ_ORIGINAL": "maz"}) + ) + + a_df = pd.merge( + a_df, + self.c.census_2010_to_maz_crosswalk_df, + how="left", + on="maz", + ) + + a_df["product"] = a_df["simulated_zero_vehicle_share"] * a_df["maz_share"] + + b_df = ( + a_df.groupby("blockgroup") + .agg({"product": "sum", "simulated_households": "sum"}) + .reset_index() + .rename(columns={"product": "simulated_zero_vehicle_share"}) + ) + b_df["tract"] = b_df["blockgroup"].astype("str").str.slice(stop=-1) + b_df["product"] = ( + b_df["simulated_zero_vehicle_share"] * b_df["simulated_households"] + ) + + c_df = ( + b_df.groupby("tract") + .agg({"product": "sum", "simulated_households": "sum"}) + .reset_index() + ) + c_df["simulated_zero_vehicle_share"] = ( + c_df["product"] / c_df["simulated_households"] + ) + + self.reduced_simulated_zero_vehicle_hhs_df = c_df + return @@ -801,8 +901,8 @@ def _make_transit_mode_dict(self): # check # TODO: this will be in model_config in TM2.2 transit_mode_dict.update({"p": "pnr"}) - self.HEAVY_RAIL_NETWORK_MODE_DESCR = transit_mode_dict["h"] - self.COMMUTER_RAIL_NETWORK_MODE_DESCR = transit_mode_dict["r"] + # self.HEAVY_RAIL_NETWORK_MODE_DESCR = transit_mode_dict["h"] + # self.COMMUTER_RAIL_NETWORK_MODE_DESCR = transit_mode_dict["r"] access_mode_dict = { transit_mode_dict["w"]: self.c.WALK_ACCESS_WORD, @@ -847,7 +947,7 @@ def _make_transit_technology_in_vehicle_table_from_skims(self): path_list, self.model_time_periods ): filename = os.path.join( - skim_dir, "trnskm{}_{}.omx".format(time_period, path) + skim_dir, "trnskm{}_{}.omx".format(time_period.upper(), path) ) if os.path.exists(filename): omx_handle = omx.open_file(filename) @@ -860,7 +960,7 @@ def _make_transit_technology_in_vehicle_table_from_skims(self): omx_handle[matrix_name], matrix_name ) ivt_df = ivt_df[ivt_df[matrix_name] > 0.0].copy() - ivt_df.rename(columns={ivt_df.columns[2]: "IVT"}, inplace = True) + ivt_df.rename(columns={ivt_df.columns[2]: "ivt"}, inplace = True) # Transfers to get boardings from trips @@ -870,7 +970,7 @@ def _make_transit_technology_in_vehicle_table_from_skims(self): omx_handle[matrix_name], matrix_name ) boards_df = boards_df[boards_df[matrix_name] > 0.0].copy() - boards_df.rename(columns={boards_df.columns[2]: "BOARDS"}, inplace = True) + boards_df.rename(columns={boards_df.columns[2]: "boards"}, inplace = True) path_time_df = pd.merge( ivt_df, boards_df, on=["origin", "destination"], how="left" @@ -886,12 +986,14 @@ def _make_transit_technology_in_vehicle_table_from_skims(self): omx_handle[matrix_name], matrix_name ) df = df[df[matrix_name] > 0.0].copy() - df.rename(columns={df.columns[2]: tech.lower()}, inplace = True) - #col_name = "{}".format(tech.lower()) - #df[col_name] = df[matrix_name] - #df = df.drop(columns=[matrix_name]).copy() - path_time_df = pd.merge( - path_time_df, df, how="left", on=["origin", "destination"] + col_name = "{}".format(tech.lower()) + df[col_name] = df[matrix_name] + df = df.drop(columns=[matrix_name]).copy() + path_time_df = pd.merge( + path_time_df, + df, + how="left", + on=["origin", "destination"], ) if running_df is None: @@ -917,8 +1019,8 @@ def _read_transit_demand(self): "KNR_TRN_WLK", "WLK_TRN_KNR", ] - #dem_dir = os.path.join(self.scenario_dict["scenario"]["root_dir"], "demand_matrices") - dem_dir = '//corp.pbwan.net/us/CentralData/DCCLDA00/Standard/sag/projects/MTC/Acceptance_Criteria/temp/temp_acceptance/demand_matrices' + dem_dir = os.path.join(self.scenario_dict["scenario"]["root_dir"], "demand_matrices", "transit") + #dem_dir = '//corp.pbwan.net/us/CentralData/DCCLDA00/Standard/sag/projects/MTC/Acceptance_Criteria/temp/temp_acceptance/demand_matrices' out_df = pd.DataFrame() for time_period in self.model_time_periods: @@ -951,18 +1053,17 @@ def _read_transit_demand(self): return - def _make_dataframe_from_omx(self, input_mtx: omx, column_name: str): + def _make_dataframe_from_omx(self, input_mtx: omx, core_name: str): """_summary_ Args: input_mtx (omx): _description_ - column_name (str): _description_ + core_name (str): _description_ """ - df = pd.DataFrame(input_mtx) - df = df.add_prefix("dest_") - df["id"] = df.index - df = pd.wide_to_long(df, "dest_", "id", "destination") - df = df.reset_index().rename(columns={"dest_": column_name, "id": "origin"}) + a = np.array(input_mtx) + + df = pd.DataFrame(a) + df= df.unstack().reset_index().rename(columns={"level_0": "origin" ,"level_1": "destination", 0:core_name}) df["origin"] = df["origin"] + 1 df["destination"] = df["destination"] + 1 @@ -970,31 +1071,31 @@ def _make_dataframe_from_omx(self, input_mtx: omx, column_name: str): def _make_district_to_district_transit_summaries(self): - taz_district_dict = self.c.taz_to_district_df.set_index("taz_tm1")[ - "district_tm1" + taz_district_dict = self.c.taz_to_district_df.set_index("taz_tm2")[ + "district_tm2" ].to_dict() s_dem_df = self.simulated_transit_demand_df.copy() s_path_df = self.simulated_transit_tech_in_vehicle_times_df.copy() s_dem_sum_df = ( - s_dem_df.groupby(["origin", "destination"]) + s_dem_df.groupby(["origin", "destination", "time_period"]) .agg({"simulated_flow": "sum"}) .reset_index() ) s_df = s_dem_sum_df.merge( s_path_df, - left_on=["origin", "destination"], - right_on=["origin", "destination"], + left_on=["origin", "destination", "time_period"], + right_on=["origin", "destination", "time_period"], ) for tech in self.c.transit_technology_abbreviation_dict.keys(): column_name = "simulated_{}_flow".format(tech.lower()) s_df[column_name] = ( s_df["simulated_flow"] - * s_df["BOARDS"] + * s_df["boards"] * s_df["{}".format(tech.lower())] - / s_df["IVT"] + / s_df["ivt"] ) s_df["orig_district"] = s_df["origin"].map(taz_district_dict) @@ -1022,7 +1123,7 @@ def _make_district_to_district_transit_summaries(self): return def _reduce_simulated_traffic_flow(self): - + #TODO:AK - CHeck this file_root = self.scenario_dict["scenario"]["root_dir"] time_of_day_df = pd.DataFrame() @@ -1031,44 +1132,41 @@ def _reduce_simulated_traffic_flow(self): emme_scenario = self.network_shapefile_names_dict[time_period] gdf = gpd.read_file( - os.path.join(file_root + "emme_project/Media/" + emme_scenario + "/" + "emme_links.shp") + os.path.join(file_root, "emme_project", "Media", emme_scenario, "emme_links.shp") ) df = gdf[ ["ID", "@flow_da", "@flow_lrgt", "@flow_sr2", "@flow_sr3", "@flow_trk", "@flow_dato", "@flow_lrg0","@flow_sr2t", "@flow_sr3t", "@flow_trkt"] ] df = df.rename(columns={"ID": "model_link_id"}) df["time_period"] = time_period - time_of_day_df = time_of_day_df.append( - df - ) + time_of_day_df = pd.concat([time_of_day_df,df], axis = "rows",ignore_index="True") - time_of_day_df["simulated_flow_auto"] = time_of_day_df[ - ["@flow_da", "@flow_sr2", "@flow_sr3","@flow_dato","@flow_sr2t", "@flow_sr3t"] - ].sum( - axis=1 - ) - - time_of_day_df["simulated_flow_truck"] = time_of_day_df[ - ["@flow_trk", "@flow_trkt"] - ].sum( - axis=1 - ) - time_of_day_df["simulated_flow"] = time_of_day_df[ - ["simulated_flow_auto", "simulated_flow_truck", "@flow_lrgt", "@flow_lrg0"] - ].sum(axis=1) - - all_day_df = ( - time_of_day_df.groupby( - ["model_link_id"] - ) - .sum() - .reset_index() - ) - all_day_df["time_period"] = self.c.ALL_DAY_WORD + time_of_day_df["simulated_flow_auto"] = time_of_day_df[ + ["@flow_da", "@flow_sr2", "@flow_sr3","@flow_dato","@flow_sr2t", "@flow_sr3t"] + ].sum( + axis=1 + ) + time_of_day_df["simulated_flow_truck"] = time_of_day_df[ + ["@flow_trk", "@flow_trkt"] + ].sum( + axis=1 + ) + time_of_day_df["simulated_flow"] = time_of_day_df[ + ["simulated_flow_auto", "simulated_flow_truck", "@flow_lrgt", "@flow_lrg0"] + ].sum(axis=1) - out_df = pd.concat([time_of_day_df, all_day_df], axis="rows", ignore_index=True) + all_day_df = ( + time_of_day_df.groupby( + ["model_link_id"] + ) + .sum() + .reset_index() + ) + all_day_df["time_period"] = self.c.ALL_DAY_WORD + + out_df = pd.concat([time_of_day_df, all_day_df], axis="rows", ignore_index=True) out_df = out_df[ [ @@ -1092,7 +1190,7 @@ def _reduce_simulated_roadway_assignment_outcomes(self): shape_period = "am" emme_scenario = self.network_shapefile_names_dict[shape_period] shape_gdf = gpd.read_file( - os.path.join(file_root +"emme_project/Media/" + emme_scenario + "/" + "emme_links.shp") + os.path.join(file_root, "emme_project", "Media", emme_scenario, "emme_links.shp") ) self.simulated_roadway_am_shape_gdf = ( shape_gdf[["INODE", "JNODE", "#link_id", "geometry"]] @@ -1115,7 +1213,7 @@ def _reduce_simulated_roadway_assignment_outcomes(self): else: emme_scenario = self.network_shapefile_names_dict[t] gdf = gpd.read_file( - os.path.join(file_root + "emme_project/Media/" + emme_scenario + "/" + "emme_links.shp") + os.path.join(file_root, "emme_project", "Media", emme_scenario, "emme_links.shp") ) df = pd.DataFrame(gdf)[