Skip to content

Commit

Permalink
Add codespell, ruff and fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
g4brielvs committed Feb 16, 2024
1 parent c37af73 commit 0676d79
Show file tree
Hide file tree
Showing 19 changed files with 2,468 additions and 1,620 deletions.
46 changes: 38 additions & 8 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,39 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
- id: check-yaml
args: [--unsafe]
- id: end-of-file-fixer
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- id: check-ast
- id: check-json
- id: detect-aws-credentials
args: [--allow-missing-credentials]
- id: detect-private-key
- repo: https://github.com/igorshubovych/markdownlint-cli
rev: v0.39.0
hooks:
- id: markdownlint
name: Markdownlint
files: \.(md|mdown|markdown)$
args: [
"--disable=MD013", # line-length
"--disable=MD033", # no-inline-html
]
- repo: https://github.com/codespell-project/codespell
rev: v2.2.6
hooks:
- id: codespell
name: codespell
description: Checks for common misspellings in text files
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.2.1
hooks:
- id: ruff
types_or: [python, pyi, jupyter]
args: [--fix]
- id: ruff-format
types_or: [python, pyi, jupyter]
60 changes: 41 additions & 19 deletions Tutorials/EXAMPLE Finding_links_between_pairs.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
"metadata": {},
"outputs": [],
"source": [
"import os, sys, time\n",
"import rasterio\n",
"import os\n",
"import sys\n",
"\n",
"import networkx as nx\n",
"import geopandas as gpd\n",
Expand Down Expand Up @@ -45,8 +45,12 @@
"# Define input data\n",
"pth = \"./\"\n",
"# Read in cleaned pickle from earlier analysis and convert to time\n",
"G = nx.read_gpickle(os.path.join(pth, 'tutorial_outputs', r'iceland_network_clean.pickle'))\n",
"G_time = gn.convert_network_to_time(G, distance_tag = 'length', road_col = 'infra_type', factor = 1000)\n",
"G = nx.read_gpickle(\n",
" os.path.join(pth, \"tutorial_outputs\", r\"iceland_network_clean.pickle\")\n",
")\n",
"G_time = gn.convert_network_to_time(\n",
" G, distance_tag=\"length\", road_col=\"infra_type\", factor=1000\n",
")\n",
"# Define origins and destinations files\n",
"rek_grid_file = os.path.join(pth, \"tutorial_data\", \"rek_grid.shp\")\n",
"rek_pop_grid_file = rek_grid_file.replace(\".shp\", \"_pop.shp\")\n",
Expand All @@ -64,10 +68,22 @@
"outputs": [],
"source": [
"# calculate the origins and destinations by snapping to the road network\n",
"origins_df = gn.pandana_snap_c(G_time, rek_grid, source_crs='epsg:4326', target_crs='epsg:4326',add_dist_to_node_col = True)\n",
"origins = list(set(origins_df['NN']))\n",
"destinations_df = gn.pandana_snap_c(G_time, in_churches, source_crs='epsg:4326', target_crs='epsg:4326',add_dist_to_node_col = True)\n",
"destinations = list(set(destinations_df['NN']))"
"origins_df = gn.pandana_snap_c(\n",
" G_time,\n",
" rek_grid,\n",
" source_crs=\"epsg:4326\",\n",
" target_crs=\"epsg:4326\",\n",
" add_dist_to_node_col=True,\n",
")\n",
"origins = list(set(origins_df[\"NN\"]))\n",
"destinations_df = gn.pandana_snap_c(\n",
" G_time,\n",
" in_churches,\n",
" source_crs=\"epsg:4326\",\n",
" target_crs=\"epsg:4326\",\n",
" add_dist_to_node_col=True,\n",
")\n",
"destinations = list(set(destinations_df[\"NN\"]))"
]
},
{
Expand Down Expand Up @@ -161,10 +177,12 @@
}
],
"source": [
"obj_nodes = nx.shortest_path(G_time, source=origins[0], target=destinations[0], weight=\"time\")\n",
"obj_nodes = nx.shortest_path(\n",
" G_time, source=origins[0], target=destinations[0], weight=\"time\"\n",
")\n",
"print(origins[0])\n",
"print(destinations[0])\n",
"obj_nodes # this is a list of the nodes that connected make the shortest path from the origin to the destination"
"obj_nodes # this is a list of the nodes that connected make the shortest path from the origin to the destination"
]
},
{
Expand Down Expand Up @@ -601,14 +619,18 @@
"oIdx = 0\n",
"for org in origins:\n",
" oIdx = oIdx + 1\n",
" print(f'{oIdx} of {len(origins)}')\n",
" print(f\"{oIdx} of {len(origins)}\")\n",
" for dest in destinations:\n",
" obj_nodes = nx.shortest_path(G_time, source=org, target=dest, weight=\"time\")\n",
" all_edges = []\n",
" for idx in range(0, len(obj_nodes) - 1):\n",
" start_node = obj_nodes[idx]\n",
" end_node = obj_nodes[idx + 1]\n",
" cur_edge = edges_gdf.loc[(edges_gdf['stnode'] == start_node) & (edges_gdf['endnode'] == end_node), 'geometry'].iloc[0]\n",
" cur_edge = edges_gdf.loc[\n",
" (edges_gdf[\"stnode\"] == start_node)\n",
" & (edges_gdf[\"endnode\"] == end_node),\n",
" \"geometry\",\n",
" ].iloc[0]\n",
" all_edges.append(cur_edge)\n",
" all_connections.append([start_node, end_node, cur_edge])\n",
" all_res.append([org, dest, MultiLineString(all_edges)])"
Expand All @@ -621,8 +643,8 @@
"outputs": [],
"source": [
"# Write all connections to file\n",
"all_results = pd.DataFrame(all_res, columns=['O','D','geometry'])\n",
"all_results.to_csv(os.path.join(pth, \"tutorial_data\",\"all_OD_links.csv\"))"
"all_results = pd.DataFrame(all_res, columns=[\"O\", \"D\", \"geometry\"])\n",
"all_results.to_csv(os.path.join(pth, \"tutorial_data\", \"all_OD_links.csv\"))"
]
},
{
Expand All @@ -632,12 +654,12 @@
"outputs": [],
"source": [
"# Tabulate usage of individual links and write to file\n",
"all_conn = pd.DataFrame(all_connections, columns=['start','node','geometry'])\n",
"all_connections_count = pd.DataFrame(all_conn.groupby(['start','node']).count())\n",
"all_conn = pd.DataFrame(all_connections, columns=[\"start\", \"node\", \"geometry\"])\n",
"all_connections_count = pd.DataFrame(all_conn.groupby([\"start\", \"node\"]).count())\n",
"all_connections_count.reset_index(inplace=True)\n",
"all_connections_first = pd.DataFrame(all_conn.groupby(['start','node']).first())\n",
"all_connections_first = pd.DataFrame(all_conn.groupby([\"start\", \"node\"]).first())\n",
"all_connections_first.reset_index(inplace=True)\n",
"all_connections_first['count'] = all_connections_count['geometry']"
"all_connections_first[\"count\"] = all_connections_count[\"geometry\"]"
]
},
{
Expand All @@ -646,7 +668,7 @@
"metadata": {},
"outputs": [],
"source": [
"all_connections_first.to_csv(os.path.join(pth, \"tutorial_data\",\"OD_links_usage.csv\"))"
"all_connections_first.to_csv(os.path.join(pth, \"tutorial_data\", \"OD_links_usage.csv\"))"
]
},
{
Expand Down
27 changes: 15 additions & 12 deletions Tutorials/EXAMPLE Fixing your road network.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@
"metadata": {},
"outputs": [],
"source": [
"import os, sys\n",
"import time\n",
"import os\n",
"import sys\n",
"import networkx as nx\n",
"import osmnx as ox"
]
Expand All @@ -36,11 +36,11 @@
"metadata": {},
"outputs": [],
"source": [
"pth = \"./\" # change this path to your working folder\n",
"data_pth = os.path.join(pth, 'tutorial_outputs')\n",
"pth = \"./\" # change this path to your working folder\n",
"data_pth = os.path.join(pth, \"tutorial_outputs\")\n",
"\n",
"# read back your graph from step 1 from you saved pickle\n",
"G = nx.read_gpickle(os.path.join(data_pth, 'iceland_network_clean.pickle'))"
"G = nx.read_gpickle(os.path.join(data_pth, \"iceland_network_clean.pickle\"))"
]
},
{
Expand All @@ -50,7 +50,10 @@
"outputs": [],
"source": [
"# note the use of sorted to sort by number of edges\n",
"list_of_subgraphs = [G.subgraph(c).copy() for c in sorted(nx.strongly_connected_components(G), key=len, reverse=True)]"
"list_of_subgraphs = [\n",
" G.subgraph(c).copy()\n",
" for c in sorted(nx.strongly_connected_components(G), key=len, reverse=True)\n",
"]"
]
},
{
Expand Down Expand Up @@ -158,8 +161,8 @@
],
"source": [
"# plotting functions only work if the graphs have a name and a crs attribute\n",
"G_largest.graph['crs'] = 'epsg:32646'\n",
"G_largest.graph['name'] = 'Iceland'\n",
"G_largest.graph[\"crs\"] = \"epsg:32646\"\n",
"G_largest.graph[\"name\"] = \"Iceland\"\n",
"\n",
"# largest connected subgraph\n",
"ox.plot_graph(G_largest, fig_width=10, edge_linewidth=1, node_size=7)"
Expand Down Expand Up @@ -193,8 +196,8 @@
}
],
"source": [
"G_second_largest.graph['crs'] = 'epsg:32646'\n",
"G_second_largest.graph['name'] = 'Iceland'\n",
"G_second_largest.graph[\"crs\"] = \"epsg:32646\"\n",
"G_second_largest.graph[\"name\"] = \"Iceland\"\n",
"\n",
"# second largest connected subgraph\n",
"ox.plot_graph(G_second_largest, fig_width=10, edge_linewidth=1, node_size=7)"
Expand All @@ -216,7 +219,7 @@
],
"source": [
"edges_largest = gn.edge_gdf_from_graph(G_largest)\n",
"edges_largest.to_csv(os.path.join(data_pth, 'edges_largest.csv'))"
"edges_largest.to_csv(os.path.join(data_pth, \"edges_largest.csv\"))"
]
},
{
Expand All @@ -226,7 +229,7 @@
"outputs": [],
"source": [
"edges_second = gn.edge_gdf_from_graph(G_second_largest)\n",
"edges_second.to_csv(os.path.join(data_pth, 'edges_second.csv'))"
"edges_second.to_csv(os.path.join(data_pth, \"edges_second.csv\"))"
]
},
{
Expand Down
60 changes: 41 additions & 19 deletions Tutorials/EXAMPLE Gravity Calculations.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
"metadata": {},
"outputs": [],
"source": [
"import os, sys, time\n",
"import os\n",
"import sys\n",
"import rasterio\n",
"\n",
"import networkx as nx\n",
Expand Down Expand Up @@ -34,8 +35,12 @@
"# Define input data\n",
"pth = \"./\"\n",
"# Read in cleaned pickle from earlier analysis and convert to time\n",
"G = nx.read_gpickle(os.path.join(pth, 'tutorial_outputs', r'iceland_network_clean.pickle'))\n",
"G_time = gn.convert_network_to_time(G, distance_tag = 'length', road_col = 'infra_type', factor = 1000)\n",
"G = nx.read_gpickle(\n",
" os.path.join(pth, \"tutorial_outputs\", r\"iceland_network_clean.pickle\")\n",
")\n",
"G_time = gn.convert_network_to_time(\n",
" G, distance_tag=\"length\", road_col=\"infra_type\", factor=1000\n",
")\n",
"# Define origins and destinations\n",
"rek_grid_file = os.path.join(pth, \"tutorial_data\", \"rek_grid.shp\")\n",
"rek_pop_grid_file = rek_grid_file.replace(\".shp\", \"_pop.shp\")\n",
Expand Down Expand Up @@ -67,14 +72,14 @@
"if not os.path.exists(rek_pop_grid_file):\n",
" population_data = \"R:\\GLOBAL\\POP&DEMO\\LandScan_2012\\lspop2012.tif\"\n",
"\n",
" in_grid = gpd.read_file(rek_grid_file) \n",
" in_grid = gpd.read_file(rek_grid_file)\n",
" in_pop = rasterio.open(population_data)\n",
" in_grid = in_grid.to_crs(in_pop.crs)\n",
"\n",
" geoms = ([x.x, x.y] for x in in_grid['geometry'])\n",
" geoms = ([x.x, x.y] for x in in_grid[\"geometry\"])\n",
" vals = in_pop.sample(geoms)\n",
" in_grid['Pop'] = [x[0]+1 for x in vals]\n",
" \n",
" in_grid[\"Pop\"] = [x[0] + 1 for x in vals]\n",
"\n",
" in_grid.to_file(rek_pop_grid_file)"
]
},
Expand All @@ -93,7 +98,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Read in the input \n",
"# Read in the input\n",
"rek_grid = gpd.read_file(rek_pop_grid_file)\n",
"in_churches = gpd.read_file(churches_file)\n",
"in_churches = in_churches.to_crs(rek_grid.crs)"
Expand All @@ -106,10 +111,22 @@
"outputs": [],
"source": [
"# calculate the origins and destinations by snapping to the road network\n",
"origins_df = gn.pandana_snap_c(G_time, rek_grid, source_crs='epsg:4326', target_crs='epsg:4326',add_dist_to_node_col = True)\n",
"origins = list(set(origins_df['NN']))\n",
"destinations_df = gn.pandana_snap_c(G_time, in_churches, source_crs='epsg:4326', target_crs='epsg:4326',add_dist_to_node_col = True)\n",
"destinations = list(set(destinations_df['NN']))"
"origins_df = gn.pandana_snap_c(\n",
" G_time,\n",
" rek_grid,\n",
" source_crs=\"epsg:4326\",\n",
" target_crs=\"epsg:4326\",\n",
" add_dist_to_node_col=True,\n",
")\n",
"origins = list(set(origins_df[\"NN\"]))\n",
"destinations_df = gn.pandana_snap_c(\n",
" G_time,\n",
" in_churches,\n",
" source_crs=\"epsg:4326\",\n",
" target_crs=\"epsg:4326\",\n",
" add_dist_to_node_col=True,\n",
")\n",
"destinations = list(set(destinations_df[\"NN\"]))"
]
},
{
Expand All @@ -118,7 +135,7 @@
"metadata": {},
"outputs": [],
"source": [
"OD = gn.calculate_OD(G_time, origins, destinations, fail_value = 9999999)"
"OD = gn.calculate_OD(G_time, origins, destinations, fail_value=9999999)"
]
},
{
Expand All @@ -129,9 +146,12 @@
"source": [
"# The calculate_od_raw contains functions for performing OD and gravity calculations\n",
"import GOSTnets.calculate_od_raw as calcOD\n",
"\n",
"# For this calculation the origins are all weighted equally with a value of 1\n",
"gravity = calcOD.calculate_gravity(OD, oWeight=[1] * OD.shape[0], dWeight = destinations_df['Pop'])\n",
"gravity['NN'] = origins"
"gravity = calcOD.calculate_gravity(\n",
" OD, oWeight=[1] * OD.shape[0], dWeight=destinations_df[\"Pop\"]\n",
")\n",
"gravity[\"NN\"] = origins"
]
},
{
Expand All @@ -152,10 +172,12 @@
"outputs": [],
"source": [
"# Now let's introduce the population numbers into the origins\n",
"origins = origins_df['NN']\n",
"destinations = destinations_df['NN']\n",
"OD = gn.calculate_OD(G_time, origins, destinations, fail_value = 9999999)\n",
"gravity = calcOD.calculate_gravity(OD, oWeight=origins_df['Pop'], dWeight = destinations_df['Pop'])"
"origins = origins_df[\"NN\"]\n",
"destinations = destinations_df[\"NN\"]\n",
"OD = gn.calculate_OD(G_time, origins, destinations, fail_value=9999999)\n",
"gravity = calcOD.calculate_gravity(\n",
" OD, oWeight=origins_df[\"Pop\"], dWeight=destinations_df[\"Pop\"]\n",
")"
]
},
{
Expand Down
Loading

0 comments on commit 0676d79

Please sign in to comment.