diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index ff69dcb..429b04a 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -5,110 +5,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
- uses: actions/checkout@v2
- - name: Setup Anaconda
- uses: conda-incubator/setup-miniconda@v2
- with:
- auto-update-conda: true
- auto-activate-base: true
- miniconda-version: 'latest'
- python-version: 3.11
- environment-file: environment.yml
- activate-environment: networks
- - name: Install latex dependencies
- run: |
- sudo apt-get -qq update
- sudo apt-get install -y \
- texlive-latex-recommended \
- texlive-latex-extra \
- texlive-fonts-recommended \
- texlive-fonts-extra \
- texlive-xetex \
- latexmk \
- xindy \
- dvipng \
- cm-super \
- msttcorefonts
- - name: Set up Julia
- uses: julia-actions/setup-julia@v1
- with:
- version: 1.9
- - name: Install IJulia and Setup Project
- shell: bash
- run: |
- julia -e 'using Pkg; ENV["PYTHON"]="/usr/share/miniconda3"; Pkg.add(["JuMP","GLPK","PyPlot","IJulia"]); using PyPlot'
- # - name: Install latex dependencies
- # run: |
- # sudo apt-get -qq update
- # sudo apt-get install -y \
- # texlive-latex-recommended \
- # texlive-latex-extra \
- # texlive-fonts-recommended \
- # texlive-fonts-extra \
- # texlive-xetex \
- # latexmk \
- # xindy \
- # dvipng \
- # cm-super \
- # msttcorefonts
- # - name: Display Conda Environment Versions
- # shell: bash -l {0}
- # run: conda list
- # - name: Display Pip Versions
- # shell: bash -l {0}
- # run: pip list
- # - name: Download "build" folder (cache)
- # uses: dawidd6/action-download-artifact@v2
- # with:
- # workflow: cache.yml
- # branch: main
- # name: build-cache
- # path: _build
- # # Build Assets (Download Notebooks and PDF via LaTeX)
- # - name: Build PDF from LaTeX
- # shell: bash -l {0}
- # run: |
- # jb build lectures --builder pdflatex --path-output ./ -n --keep-going
- # mkdir -p _build/html/_pdf
- # cp -u _build/latex/*.pdf _build/html/_pdf
- # - name: Build Download Notebooks (sphinx-tojupyter)
- # shell: bash -l {0}
- # run: |
- # jb build lectures --path-output ./ --builder=custom --custom-builder=jupyter
- # mkdir -p _build/html/_notebooks
- # cp -u _build/jupyter/*.ipynb _build/html/_notebooks
- # Build HTML (Website)
- - name: Build HTML
- shell: bash -l {0}
- run: |
- jb build code_book --path-output ./
- - name: Upload Execution Reports
- uses: actions/upload-artifact@v2
- if: failure()
- with:
- name: execution-reports
- path: _build/html/reports
- - name: Custom Front Page
- shell: bash -l {0}
- run: |
- cp -r frontpage/assets _build/html/
- cp -r frontpage/images _build/html/
- cp frontpage/index.html _build/html/
- - name: Book PDF
- shell: bash -l {0}
- run: |
- mkdir -p _build/html/_downloads
- cp pdf/networks.pdf _build/html/_downloads/
- cp pdf/networks_chinese.pdf _build/html/_downloads/
- - name: Save Build as Artifact
- uses: actions/upload-artifact@v1
- with:
- name: _build
- path: _build
+ uses: actions/checkout@v4
- name: Preview Deploy to Netlify
- uses: nwtgck/actions-netlify@v1.1
+ uses: nwtgck/actions-netlify@v2
with:
- publish-dir: '_build/html/'
+ publish-dir: 'website'
production-branch: main
github-token: ${{ secrets.GITHUB_TOKEN }}
deploy-message: "Preview Deploy from GitHub Actions"
diff --git a/code_book/downloads/networks-book.pdf b/book/networks-book.pdf
similarity index 100%
rename from code_book/downloads/networks-book.pdf
rename to book/networks-book.pdf
diff --git a/code_book/_config.yml b/code_book/_config.yml
deleted file mode 100644
index 0e923b4..0000000
--- a/code_book/_config.yml
+++ /dev/null
@@ -1,49 +0,0 @@
-# Book settings
-title: |
- Economic Networks
- Theory and Computation
-author: John Stachurski and Thomas J. Sargent
-only_build_toc_files: true
-
-execute:
- timeout: 100
-
-sphinx:
- extra_extensions: ["sphinx_design"]
- config:
- html_js_files:
- - https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js
- html_theme: quantecon_book_theme
- html_theme_options:
- header_organisation_url: https://quantecon.org
- header_organisation: QuantEcon
- repository_url: https://github.com/quantecon/book-networks
- twitter: quantecon
- twitter_logo_url: https://assets.quantecon.org/img/qe-twitter-logo.png
- og_logo_url: https://assets.quantecon.org/img/qe-og-logo.png
- description: |
- This website is the companion site for Economic Networks: Theory and Computation written by John Stachurski and Thomas J. Sargent
- keywords: Python, QuantEcon, Quantitative Economics, Economics, Networks, Tom J. Sargent, John Stachurski
- # google_analytics_id: UA-54984338-10
- mathjax3_config:
- tex:
- macros:
- "Exp" : ["\\operatorname{Exp}"]
- "Binomial" : ["\\operatorname{Binomial}"]
- "Poisson" : ["\\operatorname{Poisson}"]
- "BB" : ["\\mathbb{B}"]
- "EE" : ["\\mathbb{E}"]
- "PP" : ["\\mathbb{P}"]
- "RR" : ["\\mathbb{R}"]
- "NN" : ["\\mathbb{N}"]
- "ZZ" : ["\\mathbb{Z}"]
- "dD" : ["\\mathcal{D}"]
- "fF" : ["\\mathcal{F}"]
- "lL" : ["\\mathcal{L}"]
- "linop" : ["\\mathcal{L}(\\mathbb{B})"]
- "linopell" : ["\\mathcal{L}(\\ell_1)"]
-
-latex:
- latex_engine: "xelatex"
- latex_documents:
- targetname: book.tex
\ No newline at end of file
diff --git a/code_book/_toc.yml b/code_book/_toc.yml
deleted file mode 100644
index 1405efd..0000000
--- a/code_book/_toc.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-format: jb-book
-root: intro
-chapters:
-- url: https://networks.quantecon.org
- title: Home
-- file: ch_intro
-- file: ch_production
-- file: ch_opt
-- file: ch_opt_julia
-- file: ch_mcs
-- file: ch_fpms
-- file: appendix
-- file: pkg_funcs
-- file: status
diff --git a/code_book/appendix.md b/code_book/appendix.md
deleted file mode 100644
index 9e4909d..0000000
--- a/code_book/appendix.md
+++ /dev/null
@@ -1,547 +0,0 @@
----
-jupytext:
- cell_metadata_filter: -all
- formats: md:myst
- text_representation:
- extension: .md
- format_name: myst
- format_version: 0.13
- jupytext_version: 1.10.3
-kernelspec:
- display_name: Python 3
- language: python
- name: python3
----
-
-# Appendix Code
-
-We begin with some imports
-
-```{code-cell}
-import numpy as np
-import matplotlib.pyplot as plt
-import quantecon_book_networks
-from mpl_toolkits.mplot3d.axes3d import Axes3D, proj3d
-from matplotlib import cm
-export_figures = False
-quantecon_book_networks.config("matplotlib")
-```
-
-## Functions
-
-### One-to-one and onto functions on $(0,1)$
-
-We start by defining the domain and our one-to-one and onto function examples.
-
-
-```{code-cell}
-x = np.linspace(0, 1, 100)
-titles = 'one-to-one', 'onto'
-labels = '$f(x)=1/2 + x/2$', '$f(x)=4x(1-x)$'
-funcs = lambda x: 1/2 + x/2, lambda x: 4 * x * (1-x)
-```
-
-The figure can now be produced as follows.
-
-```{code-cell}
-fig, axes = plt.subplots(1, 2, figsize=(10, 4))
-for f, ax, lb, ti in zip(funcs, axes, labels, titles):
- ax.set_xlim(0, 1)
- ax.set_ylim(0, 1.01)
- ax.plot(x, f(x), label=lb)
- ax.set_title(ti, fontsize=12)
- ax.legend(loc='lower center', fontsize=12, frameon=False)
- ax.set_xticks((0, 1))
- ax.set_yticks((0, 1))
-
-if export_figures:
- plt.savefig("figures/func_types_1.pdf")
-plt.show()
-```
-
-### Some functions are bijections
-
-This figure can be produced in a similar manner to 6.1.
-
-```{code-cell}
-x = np.linspace(0, 1, 100)
-titles = 'constant', 'bijection'
-labels = '$f(x)=1/2$', '$f(x)=1-x$'
-funcs = lambda x: 1/2 + 0 * x, lambda x: 1-x
-
-fig, axes = plt.subplots(1, 2, figsize=(10, 4))
-for f, ax, lb, ti in zip(funcs, axes, labels, titles):
- ax.set_xlim(0, 1)
- ax.set_ylim(0, 1.01)
- ax.plot(x, f(x), label=lb)
- ax.set_title(ti, fontsize=12)
- ax.legend(loc='lower center', fontsize=12, frameon=False)
- ax.set_xticks((0, 1))
- ax.set_yticks((0, 1))
-
-if export_figures:
- plt.savefig("figures/func_types_2.pdf")
-plt.show()
-```
-
-## Fixed Points
-
-### Graph and fixed points of $G \colon x \mapsto 2.125/(1 + x^{-4})$
-
-We begin by defining the domain and the function.
-
-```{code-cell}
-xmin, xmax = 0.0000001, 2
-xgrid = np.linspace(xmin, xmax, 200)
-g = lambda x: 2.125 / (1 + x**(-4))
-```
-
-Next we define our fixed points.
-
-```{code-cell}
-fps_labels = ('$x_\ell$', '$x_m$', '$x_h$' )
-fps = (0.01, 0.94, 1.98)
-coords = ((40, 80), (40, -40), (-40, -80))
-```
-
-Finally we can produce the figure.
-
-```{code-cell}
-fig, ax = plt.subplots(figsize=(6.5, 6))
-
-ax.set_xlim(xmin, xmax)
-ax.set_ylim(xmin, xmax)
-
-ax.plot(xgrid, g(xgrid), 'b-', lw=2, alpha=0.6, label='$G$')
-ax.plot(xgrid, xgrid, 'k-', lw=1, alpha=0.7, label='$45^o$')
-
-ax.legend(fontsize=14)
-
-ax.plot(fps, fps, 'ro', ms=8, alpha=0.6)
-
-for (fp, lb, coord) in zip(fps, fps_labels, coords):
- ax.annotate(lb,
- xy=(fp, fp),
- xycoords='data',
- xytext=coord,
- textcoords='offset points',
- fontsize=16,
- arrowprops=dict(arrowstyle="->"))
-
-if export_figures:
- plt.savefig("figures/three_fixed_points.pdf")
-plt.show()
-```
-
-## Complex Numbers
-
-### The complex number $(a, b) = r e^{i \phi}$
-
-We start by abbreviating some useful values and functions.
-
-```{code-cell}
-π = np.pi
-zeros = np.zeros
-ones = np.ones
-fs = 18
-```
-
-Next we set our parameters.
-
-```{code-cell}
-r = 2
-φ = π/3
-x = r * np.cos(φ)
-x_range = np.linspace(0, x, 1000)
-φ_range = np.linspace(0, φ, 1000)
-```
-
-Finally we produce the plot.
-
-```{code-cell}
-fig = plt.figure(figsize=(7, 7))
-ax = plt.subplot(111, projection='polar')
-
-ax.plot((0, φ), (0, r), marker='o', color='b', alpha=0.5) # Plot r
-ax.plot(zeros(x_range.shape), x_range, color='b', alpha=0.5) # Plot x
-ax.plot(φ_range, x / np.cos(φ_range), color='b', alpha=0.5) # Plot y
-ax.plot(φ_range, ones(φ_range.shape) * 0.15, color='green') # Plot φ
-
-ax.margins(0) # Let the plot starts at origin
-
-ax.set_rmax(2)
-ax.set_rticks((1, 2)) # Less radial ticks
-ax.set_rlabel_position(-88.5) # Get radial labels away from plotted line
-
-ax.text(φ, r+0.04 , r'$(a, b) = (1, \sqrt{3})$', fontsize=fs) # Label z
-ax.text(φ+0.4, 1 , '$r = 2$', fontsize=fs) # Label r
-ax.text(0-0.4, 0.5, '$1$', fontsize=fs) # Label x
-ax.text(0.5, 1.2, '$\sqrt{3}$', fontsize=fs) # Label y
-ax.text(0.3, 0.25, '$\\varphi = \\pi/3$', fontsize=fs) # Label θ
-
-xT=plt.xticks()[0]
-xL=['0',
- r'$\frac{\pi}{4}$',
- r'$\frac{\pi}{2}$',
- r'$\frac{3\pi}{4}$',
- r'$\pi$',
- r'$\frac{5\pi}{4}$',
- r'$\frac{3\pi}{2}$',
- r'$\frac{7\pi}{4}$']
-
-plt.xticks(xT, xL, fontsize=fs+2)
-ax.grid(True)
-
-if export_figures:
- plt.savefig("figures/complex_number.pdf")
-plt.show()
-```
-
-## Convergence
-
-### Convergence of a sequence to the origin in $\mathbb{R}^3$
-
-We define our transformation matrix, initial point, and number of iterations.
-
-```{code-cell}
-θ = 0.1
-A = ((np.cos(θ), - np.sin(θ), 0.0001),
- (np.sin(θ), np.cos(θ), 0.001),
- (np.sin(θ), np.cos(θ), 1))
-
-A = 0.98 * np.array(A)
-p = np.array((1, 1, 1))
-n = 200
-```
-
-Now we can produce the plot by repeatedly transforming our point with the transformation matrix and plotting each resulting point.
-
-```{code-cell}
-fig = plt.figure(figsize=(6, 6))
-ax = fig.add_subplot(111, projection='3d')
-ax.view_init(elev=20, azim=-40)
-
-ax.set_xlim((-1.5, 1.5))
-ax.set_ylim((-1.5, 1.5))
-ax.set_xticks((-1,0,1))
-ax.set_yticks((-1,0,1))
-
-for i in range(n):
- x, y, z = p
- ax.plot([x], [y], [z], 'o', ms=4, color=cm.jet_r(i / n))
- p = A @ p
-
-if export_figures:
- plt.savefig("figures/euclidean_convergence_1.pdf")
-plt.show()
-```
-
-## Linear Algebra
-
-### The span of vectors $u$, $v$, $w$ in $\mathbb{R}$
-
-We begin by importing the `FancyArrowPatch` class and extending it.
-
-```{code-cell}
-from matplotlib.patches import FancyArrowPatch
-
-class Arrow3D(FancyArrowPatch):
- def __init__(self, xs, ys, zs, *args, **kwargs):
- FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
- self._verts3d = xs, ys, zs
-
- def do_3d_projection(self, renderer=None):
- xs3d, ys3d, zs3d = self._verts3d
- xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, self.axes.M)
- self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
-
- return np.min(zs)
-
- def draw(self, renderer):
- xs3d, ys3d, zs3d = self._verts3d
- xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, self.axes.M)
- self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
- FancyArrowPatch.draw(self, renderer)
-```
-
-Next we generate our vectors $u$, $v$, $w$, ensuring linear dependence.
-
-```{code-cell}
-α, β = 0.2, 0.1
-def f(x, y):
- return α * x + β * y
-
-# Vector locations, by coordinate
-x_coords = np.array((3, 3, -3.5))
-y_coords = np.array((4, -4, 3.0))
-z_coords = f(x_coords, y_coords)
-
-vecs = [np.array((x, y, z)) for x, y, z in zip(x_coords, y_coords, z_coords)]
-```
-
-Next we define the spanning plane.
-
-```{code-cell}
-x_min, x_max = -5, 5
-y_min, y_max = -5, 5
-
-grid_size = 20
-xr2 = np.linspace(x_min, x_max, grid_size)
-yr2 = np.linspace(y_min, y_max, grid_size)
-x2, y2 = np.meshgrid(xr2, yr2)
-z2 = f(x2, y2)
-```
-
-Finally we generate the plot.
-
-```{code-cell}
-fig = plt.figure(figsize=(12, 7))
-ax = plt.axes(projection ='3d')
-ax.view_init(elev=10., azim=-80)
-
-ax.set(xlim=(x_min, x_max),
- ylim=(x_min, x_max),
- zlim=(x_min, x_max),
- xticks=(0,), yticks=(0,), zticks=(0,))
-
-# Draw the axes
-gs = 3
-z = np.linspace(x_min, x_max, gs)
-x = np.zeros(gs)
-y = np.zeros(gs)
-ax.plot(x, y, z, 'k-', lw=2, alpha=0.5)
-ax.plot(z, x, y, 'k-', lw=2, alpha=0.5)
-ax.plot(y, z, x, 'k-', lw=2, alpha=0.5)
-
-# Draw the vectors
-for v in vecs:
- a = Arrow3D([0, v[0]],
- [0, v[1]],
- [0, v[2]],
- mutation_scale=20,
- lw=1,
- arrowstyle="-|>",
- color="b")
- ax.add_artist(a)
-
-
-for v, label in zip(vecs, ('u', 'v', 'w')):
- v = v * 1.1
- ax.text(v[0], v[1], v[2],
- f'${label}$',
- fontsize=14)
-
-# Draw the plane
-grid_size = 20
-xr2 = np.linspace(x_min, x_max, grid_size)
-yr2 = np.linspace(y_min, y_max, grid_size)
-x2, y2 = np.meshgrid(xr2, yr2)
-z2 = f(x2, y2)
-
-ax.plot_surface(x2, y2, z2, rstride=1, cstride=1, cmap=cm.jet,
- linewidth=0, antialiased=True, alpha=0.2)
-
-
-if export_figures:
- plt.savefig("figures/span1.pdf")
-plt.show()
-```
-
-## Linear Maps Are Matrices
-
-### Equivalence of the onto and one-to-one properties (for linear maps)
-
-This plot is produced similarly to Figures 6.1 and 6.2.
-
-```{code-cell}
-fig, axes = plt.subplots(1, 2, figsize=(10, 4))
-
-x = np.linspace(-2, 2, 10)
-
-titles = 'non-bijection', 'bijection'
-labels = '$f(x)=0 x$', '$f(x)=0.5 x$'
-funcs = lambda x: 0*x, lambda x: 0.5 * x
-
-for ax, f, lb, ti in zip(axes, funcs, labels, titles):
-
- # Set the axes through the origin
- for spine in ['left', 'bottom']:
- ax.spines[spine].set_position('zero')
- for spine in ['right', 'top']:
- ax.spines[spine].set_color('none')
- ax.set_yticks((-1, 1))
- ax.set_ylim((-1, 1))
- ax.set_xlim((-1, 1))
- ax.set_xticks((-1, 1))
- y = f(x)
- ax.plot(x, y, '-', linewidth=4, label=lb, alpha=0.6)
- ax.text(-0.8, 0.5, ti, fontsize=14)
- ax.legend(loc='lower right', fontsize=12)
-
-if export_figures:
- plt.savefig("figures/func_types_3.pdf")
-plt.show()
-```
-
-## Convexity and Polyhedra
-
-### A polyhedron $P$ represented as intersecting halfspaces
-
-Inequalities are of the form
-
-$$ a x + b y \leq c $$
-
-To plot the halfspace we plot the line
-
-$$ y = c/b - a/b x $$
-
-and then fill in the halfspace using `fill_between` on points $x, y, \hat y$,
-where $\hat y$ is either `y_min` or `y_max`.
-
-```{code-cell}
-fig, ax = plt.subplots()
-plt.axis('off')
-
-x = np.linspace(-10, 14, 200)
-y_min, y_max = -2, 3
-
-a1, b1, c1 = 1.0, 8.0, -5.0
-y = c1 / b1 - (a1 / b1) * x
-ax.plot(x, y, label='$a_1 x_1 + b_1 x_2 = c_1$')
-ax.fill_between(x, y, y_max, alpha=0.2)
-
-
-a2, b2, c2 = 0.5, 0.75, 1.5
-y = c2 / b2 - (a2 / b2) * x
-ax.plot(x, y, label='$a_2 x_1 + b_2 x_2 = c_2$')
-ax.fill_between(x, y, y_min, alpha=0.2)
-
-
-a3, b3, c3 = -1.0, 1.0, 1.5
-y = c3 / b3 - (a3 / b3) * x
-ax.plot(x, y, label='$a_3 x_1 + b_3 x_2 = c_3$')
-ax.fill_between(x, y, y_min, alpha=0.2)
-
-ax.plot((0.23,), (1.82,), 'ko')
-ax.plot((-1.95,), (-0.4,), 'ko')
-ax.plot((4.8,), (-1.2,), 'ko')
-
-
-ax.annotate('$P$', xy=(0, 0), fontsize=12)
-
-ax.set_ylim(y_min, y_max)
-if export_figures:
- plt.savefig("figures/polyhedron1.pdf")
-plt.show()
-```
-
-## Saddle Points and Duality
-
-```{code-cell}
-fig = plt.figure(figsize=(8.5, 6))
-
-## Top left Plot
-
-ax = fig.add_subplot(221, projection='3d')
-
-plot_args = {'rstride': 1, 'cstride': 1, 'cmap':"viridis",
- 'linewidth': 0.4, 'antialiased': True, "alpha":0.75,
- 'vmin': -1, 'vmax': 1}
-
-x, y = np.mgrid[-1:1:31j, -1:1:31j]
-z = x**2 - y**2
-
-ax.plot_surface(x, y, z, **plot_args)
-
-ax.view_init(azim=245, elev=20)
-
-ax.set_xlim(-1, 1)
-ax.set_ylim(-1, 1)
-ax.set_zlim(-1, 1)
-
-ax.set_xticks([0])
-ax.set_xticklabels([r"$x^*$"], fontsize=16)
-ax.set_yticks([0])
-ax.set_yticklabels([r"$\theta^*$"], fontsize=16)
-
-
-ax.set_zticks([])
-ax.set_zticklabels([])
-
-ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
-ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
-ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
-
-ax.set_zlabel("$L(x,\\theta)$", fontsize=14)
-
-## Top Right Plot
-
-ax = fig.add_subplot(222)
-
-
-plot_args = {'cmap':"viridis", 'antialiased': True, "alpha":0.6,
- 'vmin': -1, 'vmax': 1}
-
-x, y = np.mgrid[-1:1:31j, -1:1:31j]
-z = x**2 - y**2
-
-ax.contourf(x, y, z, **plot_args)
-
-ax.plot([0], [0], 'ko')
-
-ax.set_xlim(-1, 1)
-ax.set_ylim(-1, 1)
-
-plt.xticks([ 0],
- [r"$x^*$"], fontsize=16)
-
-plt.yticks([0],
- [r"$\theta^*$"], fontsize=16)
-
-ax.hlines(0, -1, 1, color='k', ls='-', lw=1)
-ax.vlines(0, -1, 1, color='k', ls='-', lw=1)
-
-coords=(-35, 30)
-ax.annotate(r'$L(x, \theta^*)$',
- xy=(-0.5, 0),
- xycoords="data",
- xytext=coords,
- textcoords="offset points",
- fontsize=12,
- arrowprops={"arrowstyle" : "->"})
-
-coords=(35, 30)
-ax.annotate(r'$L(x^*, \theta)$',
- xy=(0, 0.25),
- xycoords="data",
- xytext=coords,
- textcoords="offset points",
- fontsize=12,
- arrowprops={"arrowstyle" : "->"})
-
-## Bottom Left Plot
-
-ax = fig.add_subplot(223)
-
-x = np.linspace(-1, 1, 100)
-ax.plot(x, -x**2, label='$\\theta \mapsto L(x^*, \\theta)$')
-ax.set_ylim((-1, 1))
-ax.legend(fontsize=14)
-ax.set_xticks([])
-ax.set_yticks([])
-
-## Bottom Right Plot
-
-ax = fig.add_subplot(224)
-
-x = np.linspace(-1, 1, 100)
-ax.plot(x, x**2, label='$x \mapsto L(x, \\theta^*)$')
-ax.set_ylim((-1, 1))
-ax.legend(fontsize=14, loc='lower right')
-ax.set_xticks([])
-ax.set_yticks([])
-
-if export_figures:
- plt.savefig("figures/saddle_1.pdf")
-plt.show()
-```
diff --git a/code_book/ch_fpms.md b/code_book/ch_fpms.md
deleted file mode 100644
index d033658..0000000
--- a/code_book/ch_fpms.md
+++ /dev/null
@@ -1,178 +0,0 @@
----
-jupytext:
- cell_metadata_filter: -all
- formats: md:myst
- text_representation:
- extension: .md
- format_name: myst
-kernelspec:
- display_name: Python 3
- language: python
- name: python3
----
-
-# Chapter 5 - Nonlinear Interactions (Python Code)
-
-```{code-cell}
-:tags: [hide-output]
-
-! pip install --upgrade quantecon_book_networks
-```
-
-We begin with some imports
-
-```{code-cell}
-import quantecon as qe
-import quantecon_book_networks
-import quantecon_book_networks.input_output as qbn_io
-import quantecon_book_networks.plotting as qbn_plt
-import quantecon_book_networks.data as qbn_data
-export_figures = False
-```
-
-```{code-cell}
-import numpy as np
-import networkx as nx
-import matplotlib.pyplot as plt
-from matplotlib import cm
-quantecon_book_networks.config("matplotlib")
-```
-
-## Financial Networks
-
-### Equity-Cross Holdings
-
-Here we define a class for modelling a financial network where firms are linked by share cross-holdings,
-and there are failure costs as described by [Elliott et al. (2014)](https://www.aeaweb.org/articles?id=10.1257/aer.104.10.3115).
-
-```{code-cell}
-class FinNet:
-
- def __init__(self, n=100, c=0.72, d=1, θ=0.5, β=1.0, seed=1234):
-
- self.n, self.c, self.d, self.θ, self.β = n, c, d, θ, β
- np.random.seed(seed)
-
- self.e = np.ones(n)
- self.C, self.C_hat = self.generate_primitives()
- self.A = self.C_hat @ np.linalg.inv(np.identity(n) - self.C)
- self.v_bar = self.A @ self.e
- self.t = np.full(n, θ)
-
- def generate_primitives(self):
-
- n, c, d = self.n, self.c, self.d
- B = np.zeros((n, n))
- C = np.zeros_like(B)
-
- for i in range(n):
- for j in range(n):
- if i != j and np.random.rand() < d/(n-1):
- B[i,j] = 1
-
- for i in range(n):
- for j in range(n):
- k = np.sum(B[:,j])
- if k > 0:
- C[i,j] = c * B[i,j] / k
-
- C_hat = np.identity(n) * (1 - c)
-
- return C, C_hat
-
- def T(self, v):
- Tv = self.A @ (self.e - self.β * np.where(v < self.t, 1, 0))
- return Tv
-
- def compute_equilibrium(self):
- i = 0
- v = self.v_bar
- error = 1
- while error > 1e-10:
- print(f"number of failing firms is ", np.sum(v < self.θ))
- new_v = self.T(v)
- error = np.max(np.abs(new_v - v))
- v = new_v
- i = i+1
-
- print(f"Terminated after {i} iterations")
- return v
-
- def map_values_to_colors(self, v, j):
- cols = cm.plasma(qbn_io.to_zero_one(v))
- if j != 0:
- for i in range(len(v)):
- if v[i] < self.t[i]:
- cols[i] = 0.0
- return cols
-```
-
-Now we create a financial network.
-
-```{code-cell}
-fn = FinNet(n=100, c=0.72, d=1, θ=0.3, β=1.0)
-```
-
-And compute its equilibrium.
-
-```{code-cell}
-fn.compute_equilibrium()
-```
-
-### Waves of bankruptcies in a financial network
-
-Now we visualise the network after different numbers of iterations.
-
-For convenience we will first define a function to plot the graphs of the financial network.
-
-```{code-cell}
-def plot_fin_graph(G, ax, node_color_list):
-
- n = G.number_of_nodes()
-
- node_pos_dict = nx.spring_layout(G, k=1.1)
- edge_colors = []
-
- for i in range(n):
- for j in range(n):
- edge_colors.append(node_color_list[i])
-
-
- nx.draw_networkx_nodes(G,
- node_pos_dict,
- node_color=node_color_list,
- edgecolors='grey',
- node_size=100,
- linewidths=2,
- alpha=0.8,
- ax=ax)
-
- nx.draw_networkx_edges(G,
- node_pos_dict,
- edge_color=edge_colors,
- alpha=0.4,
- ax=ax)
-```
-
-Now we will iterate by applying the operator $T$ to the vector of firm values $v$ and produce the plots.
-
-```{code-cell}
-G = nx.from_numpy_array(np.array(fn.C), create_using=nx.DiGraph)
-v = fn.v_bar
-
-k = 15
-d = 3
-fig, axes = plt.subplots(int(k/d), 1, figsize=(10, 12))
-
-for i in range(k):
- if i % d == 0:
- ax = axes[int(i/d)]
- ax.set_title(f"iteration {i}")
-
- plot_fin_graph(G, ax, fn.map_values_to_colors(v, i))
- v = fn.T(v)
-if export_figures:
- plt.savefig("figures/fin_network_sims_1.pdf")
-plt.show()
-```
-
diff --git a/code_book/ch_intro.md b/code_book/ch_intro.md
deleted file mode 100644
index 775b781..0000000
--- a/code_book/ch_intro.md
+++ /dev/null
@@ -1,788 +0,0 @@
----
-jupytext:
- cell_metadata_filter: -all
- formats: md:myst
- text_representation:
- extension: .md
- format_name: myst
- format_version: 0.13
- jupytext_version: 1.15.1
-kernelspec:
- display_name: Python 3 (ipykernel)
- language: python
- name: python3
----
-
-# Chapter 1 - Introduction (Python Code)
-
-```{code-cell} ipython3
-:tags: [hide-output]
-! pip install --upgrade quantecon_book_networks kaleido
-```
-
-We begin by importing the `quantecon` package as well as some functions and data that have been packaged for release with this text.
-
-```{code-cell} ipython3
-import quantecon as qe
-import quantecon_book_networks
-import quantecon_book_networks.input_output as qbn_io
-import quantecon_book_networks.data as qbn_data
-import quantecon_book_networks.plotting as qbn_plot
-ch1_data = qbn_data.introduction()
-default_figsize = (6, 4)
-export_figures = False
-```
-
-Next we import some common python libraries.
-
-```{code-cell} ipython3
-import numpy as np
-import pandas as pd
-import networkx as nx
-import statsmodels.api as sm
-import matplotlib.pyplot as plt
-import matplotlib.cm as cm
-import matplotlib.ticker as ticker
-from mpl_toolkits.mplot3d.art3d import Poly3DCollection
-import matplotlib.patches as mpatches
-import plotly.graph_objects as go
-quantecon_book_networks.config("matplotlib")
-```
-
-## Motivation
-
-### International trade in crude oil 2021
-
-We begin by loading a `NetworkX` directed graph object that represents international trade in crude oil.
-
-```{code-cell} ipython3
-DG = ch1_data["crude_oil"]
-```
-
-Next we transform the data to prepare it for display as a Sankey diagram.
-
-```{code-cell} ipython3
-nodeid = {}
-for ix,nd in enumerate(DG.nodes()):
- nodeid[nd] = ix
-
-# Links
-source = []
-target = []
-value = []
-for src,tgt in DG.edges():
- source.append(nodeid[src])
- target.append(nodeid[tgt])
- value.append(DG[src][tgt]['weight'])
-```
-
-Finally we produce our plot.
-
-```{code-cell} ipython3
-fig = go.Figure(data=[go.Sankey(
- node = dict(
- pad = 15,
- thickness = 20,
- line = dict(color = "black", width = 0.5),
- label = list(nodeid.keys()),
- color = "blue"
- ),
- link = dict(
- source = source,
- target = target,
- value = value
- ))])
-
-
-fig.update_layout(title_text="Crude Oil", font_size=10, width=600, height=800)
-if export_figures:
- fig.write_image("figures/crude_oil_2021.pdf")
-fig.show(renderer='svg')
-```
-
-### International trade in commercial aircraft during 2019
-
-For this plot we will use a cleaned dataset from
-[Harvard, CID Dataverse](https://dataverse.harvard.edu/dataverse/atlas).
-
-```{code-cell} ipython3
-DG = ch1_data['aircraft_network']
-pos = ch1_data['aircraft_network_pos']
-```
-
-We begin by calculating some features of our graph using the `NetworkX` and
-the `quantecon_book_networks` packages.
-
-```{code-cell} ipython3
-centrality = nx.eigenvector_centrality(DG)
-node_total_exports = qbn_io.node_total_exports(DG)
-edge_weights = qbn_io.edge_weights(DG)
-```
-
-Now we convert our graph features to plot features.
-
-```{code-cell} ipython3
-node_pos_dict = pos
-
-node_sizes = qbn_io.normalise_weights(node_total_exports,10000)
-edge_widths = qbn_io.normalise_weights(edge_weights,10)
-
-node_colors = qbn_io.colorise_weights(list(centrality.values()),color_palette=cm.viridis)
-node_to_color = dict(zip(DG.nodes,node_colors))
-edge_colors = []
-for src,_ in DG.edges:
- edge_colors.append(node_to_color[src])
-```
-
-Finally we produce the plot.
-
-```{code-cell} ipython3
-fig, ax = plt.subplots(figsize=(10, 10))
-ax.axis('off')
-
-nx.draw_networkx_nodes(DG,
- node_pos_dict,
- node_color=node_colors,
- node_size=node_sizes,
- linewidths=2,
- alpha=0.6,
- ax=ax)
-
-nx.draw_networkx_labels(DG,
- node_pos_dict,
- ax=ax)
-
-nx.draw_networkx_edges(DG,
- node_pos_dict,
- edge_color=edge_colors,
- width=edge_widths,
- arrows=True,
- arrowsize=20,
- ax=ax,
- node_size=node_sizes,
- connectionstyle='arc3,rad=0.15')
-
-if export_figures:
- plt.savefig("figures/commercial_aircraft_2019_1.pdf")
-plt.show()
-```
-
-## Spectral Theory
-
-### Spectral Radii
-
-Here we provide code for computing the spectral radius of a matrix.
-
-```{code-cell} ipython3
-def spec_rad(M):
- """
- Compute the spectral radius of M.
- """
- return np.max(np.abs(np.linalg.eigvals(M)))
-```
-
-```{code-cell} ipython3
-M = np.array([[1,2],[2,1]])
-spec_rad(M)
-```
-
-This function is available in the `quantecon_book_networks` package, along with
-several other functions for used repeatedly in the text. Source code for
-these functions can be seen [here](pkg_funcs).
-
-```{code-cell} ipython3
-qbn_io.spec_rad(M)
-```
-
-## Probability
-
-### The unit simplex in $\mathbb{R}^3$
-
-Here we define a function for plotting the unit simplex.
-
-```{code-cell} ipython3
-def unit_simplex(angle):
-
- fig = plt.figure(figsize=(10, 8))
- ax = fig.add_subplot(111, projection='3d')
-
- vtx = [[0, 0, 1],
- [0, 1, 0],
- [1, 0, 0]]
-
- tri = Poly3DCollection([vtx], color='darkblue', alpha=0.3)
- tri.set_facecolor([0.5, 0.5, 1])
- ax.add_collection3d(tri)
-
- ax.set(xlim=(0, 1), ylim=(0, 1), zlim=(0, 1),
- xticks=(1,), yticks=(1,), zticks=(1,))
-
- ax.set_xticklabels(['$(1, 0, 0)$'], fontsize=16)
- ax.set_yticklabels([f'$(0, 1, 0)$'], fontsize=16)
- ax.set_zticklabels([f'$(0, 0, 1)$'], fontsize=16)
-
- ax.xaxis.majorTicks[0].set_pad(15)
- ax.yaxis.majorTicks[0].set_pad(15)
- ax.zaxis.majorTicks[0].set_pad(35)
-
- ax.view_init(30, angle)
-
- # Move axis to origin
- ax.xaxis._axinfo['juggled'] = (0, 0, 0)
- ax.yaxis._axinfo['juggled'] = (1, 1, 1)
- ax.zaxis._axinfo['juggled'] = (2, 2, 0)
-
- ax.grid(False)
-
- return ax
-```
-
-We can now produce the plot.
-
-```{code-cell} ipython3
-unit_simplex(50)
-if export_figures:
- plt.savefig("figures/simplex_1.pdf")
-plt.show()
-```
-
-### Independent draws from Student’s t and Normal distributions
-
-Here we illustrate the occurrence of "extreme" events in heavy tailed distributions.
-We start by generating 1,000 samples from a normal distribution and a Student's t distribution.
-
-```{code-cell} ipython3
-from scipy.stats import t
-n = 1000
-np.random.seed(123)
-
-s = 2
-n_data = np.random.randn(n) * s
-
-t_dist = t(df=1.5)
-t_data = t_dist.rvs(n)
-```
-
-When we plot our samples, we see the Student's t distribution frequently
-generates samples many standard deviations from the mean.
-
-```{code-cell} ipython3
-fig, axes = plt.subplots(1, 2, figsize=(8, 3.4))
-
-for ax in axes:
- ax.set_ylim((-50, 50))
- ax.plot((0, n), (0, 0), 'k-', lw=0.3)
-
-ax = axes[0]
-ax.plot(list(range(n)), t_data, linestyle='', marker='o', alpha=0.5, ms=4)
-ax.vlines(list(range(n)), 0, t_data, 'k', lw=0.2)
-ax.get_xaxis().set_major_formatter(
- ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
-ax.set_title(f"Student t draws", fontsize=11)
-
-ax = axes[1]
-ax.plot(list(range(n)), n_data, linestyle='', marker='o', alpha=0.5, ms=4)
-ax.vlines(list(range(n)), 0, n_data, lw=0.2)
-ax.get_xaxis().set_major_formatter(
- ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
-ax.set_title(f"$N(0, \sigma^2)$ with $\sigma = {s}$", fontsize=11)
-
-plt.tight_layout()
-if export_figures:
- plt.savefig("figures/heavy_tailed_draws.pdf")
-plt.show()
-```
-
-### CCDF plots for the Pareto and Exponential distributions
-
-When the Pareto tail property holds, the CCDF is eventually log linear. Here
-we illustrates this using a Pareto distribution. For comparison, an exponential
-distribution is also shown. First we define our domain and the Pareto and
-Exponential distributions.
-
-```{code-cell} ipython3
-x = np.linspace(1, 10, 500)
-```
-
-```{code-cell} ipython3
-α = 1.5
-def Gp(x):
- return x**(-α)
-```
-
-```{code-cell} ipython3
-λ = 1.0
-def Ge(x):
- return np.exp(-λ * x)
-```
-
-We then plot our distribution on a log-log scale.
-
-```{code-cell} ipython3
-fig, ax = plt.subplots(figsize=default_figsize)
-
-ax.plot(np.log(x), np.log(Gp(x)), label="Pareto")
-ax.plot(np.log(x), np.log(Ge(x)), label="Exponential")
-
-ax.legend(fontsize=12, frameon=False, loc="lower left")
-ax.set_xlabel("$\ln x$", fontsize=12)
-ax.set_ylabel("$\ln G(x)$", fontsize=12)
-
-if export_figures:
- plt.savefig("figures/ccdf_comparison_1.pdf")
-plt.show()
-```
-
-### Empirical CCDF plots for largest firms (Forbes)
-
-Here we show that the distribution of firm sizes has a Pareto tail. We start
-by loading the `forbes_global_2000` dataset.
-
-```{code-cell} ipython3
-dfff = ch1_data['forbes_global_2000']
-```
-
-We calculate values of the empirical CCDF.
-
-```{code-cell} ipython3
-data = np.asarray(dfff['Market Value'])[0:500]
-y_vals = np.empty_like(data, dtype='float64')
-n = len(data)
-for i, d in enumerate(data):
- # record fraction of sample above d
- y_vals[i] = np.sum(data >= d) / n
-```
-
-Now we fit a linear trend line (on the log-log scale).
-
-```{code-cell} ipython3
-x, y = np.log(data), np.log(y_vals)
-results = sm.OLS(y, sm.add_constant(x)).fit()
-b, a = results.params
-```
-
-Finally we produce our plot.
-
-```{code-cell} ipython3
-fig, ax = plt.subplots(figsize=(7.3, 4))
-
-ax.scatter(x, y, alpha=0.3, label="firm size (market value)")
-ax.plot(x, x * a + b, 'k-', alpha=0.6, label=f"slope = ${a: 1.2f}$")
-
-ax.set_xlabel('log value', fontsize=12)
-ax.set_ylabel("log prob.", fontsize=12)
-ax.legend(loc='lower left', fontsize=12)
-
-if export_figures:
- plt.savefig("figures/empirical_powerlaw_plots_firms_forbes.pdf")
-plt.show()
-```
-
-## Graph Theory
-
-### Zeta and Pareto distributions
-
-We begin by defining the Zeta and Pareto distributions.
-
-```{code-cell} ipython3
-γ = 2.0
-α = γ - 1
-```
-
-```{code-cell} ipython3
-def z(k, c=2.0):
- return c * k**(-γ)
-
-k_grid = np.arange(1, 10+1)
-```
-
-```{code-cell} ipython3
-def p(x, c=2.0):
- return c * x**(-γ)
-
-x_grid = np.linspace(1, 10, 200)
-```
-
-Then we can produce our plot
-
-```{code-cell} ipython3
-fig, ax = plt.subplots(figsize=default_figsize)
-ax.plot(k_grid, z(k_grid), '-o', label='zeta distribution with $\gamma=2$')
-ax.plot(x_grid, p(x_grid), label='density of Pareto with tail index $\\alpha$')
-ax.legend(fontsize=12)
-ax.set_yticks((0, 1, 2))
-if export_figures:
- plt.savefig("figures/zeta_1.pdf")
-plt.show()
-```
-
-### NetworkX digraph plot
-
-We start by creating a graph object and populating it with edges.
-
-```{code-cell} ipython3
-G_p = nx.DiGraph()
-
-edge_list = [
- ('p', 'p'),
- ('m', 'p'), ('m', 'm'), ('m', 'r'),
- ('r', 'p'), ('r', 'm'), ('r', 'r')
-]
-
-for e in edge_list:
- u, v = e
- G_p.add_edge(u, v)
-```
-
-Now we can plot our graph.
-
-```{code-cell} ipython3
-fig, ax = plt.subplots()
-nx.spring_layout(G_p, seed=4)
-nx.draw_spring(G_p, ax=ax, node_size=500, with_labels=True,
- font_weight='bold', arrows=True, alpha=0.8,
- connectionstyle='arc3,rad=0.25', arrowsize=20)
-if export_figures:
- plt.savefig("figures/networkx_basics_1.pdf")
-plt.show()
-```
-
-The `DiGraph` object has methods that calculate in-degree and out-degree of vertices.
-
-```{code-cell} ipython3
-G_p.in_degree('p')
-```
-
-```{code-cell} ipython3
-G_p.out_degree('p')
-```
-
-Additionally, the `NetworkX` package supplies functions for testing
-communication and strong connectedness, as well as to compute strongly
-connected components.
-
-```{code-cell} ipython3
-G = nx.DiGraph()
-G.add_edge(1, 1)
-G.add_edge(2, 1)
-G.add_edge(2, 3)
-G.add_edge(3, 2)
-list(nx.strongly_connected_components(G))
-```
-
-Like `NetworkX`, the Python library `quantecon`
-provides access to some graph-theoretic algorithms.
-
-In the case of QuantEcon's `DiGraph` object, an instance is created via the adjacency matrix.
-
-```{code-cell} ipython3
-A = ((1, 0, 0),
- (1, 1, 1),
- (1, 1, 1))
-A = np.array(A) # Convert to NumPy array
-G = qe.DiGraph(A)
-
-G.strongly_connected_components
-```
-
-### International private credit flows by country
-
-We begin by loading an adjacency matrix of international private credit flows
-(in the form of a NumPy array and a list of country labels).
-
-```{code-cell} ipython3
-Z = ch1_data["adjacency_matrix"]["Z"]
-Z_visual= ch1_data["adjacency_matrix"]["Z_visual"]
-countries = ch1_data["adjacency_matrix"]["countries"]
-```
-
-To calculate our graph's properties, we use hub-based eigenvector
-centrality as our centrality measure for this plot.
-
-```{code-cell} ipython3
-centrality = qbn_io.eigenvector_centrality(Z_visual, authority=False)
-```
-
-Now we convert our graph features to plot features.
-
-```{code-cell} ipython3
-node_colors = cm.plasma(qbn_io.to_zero_one_beta(centrality))
-```
-
-Finally we produce the plot.
-
-```{code-cell} ipython3
-X = qbn_io.to_zero_one_beta(Z.sum(axis=1))
-
-fig, ax = plt.subplots(figsize=(8, 10))
-plt.axis("off")
-
-qbn_plot.plot_graph(Z_visual, X, ax, countries,
- layout_type='spring',
- layout_seed=1234,
- node_size_multiple=3000,
- edge_size_multiple=0.000006,
- tol=0.0,
- node_color_list=node_colors)
-
-if export_figures:
- plt.savefig("figures/financial_network_analysis_visualization.pdf")
-plt.show()
-```
-
-### Centrality measures for the credit network
-
-This figure looks at six different centrality measures.
-
-We begin by defining a function for calculating eigenvector centrality.
-
-Hub-based centrality is calculated by default, although authority-based centrality
-can be calculated by setting `authority=True`.
-
-```{code-cell} ipython3
-def eigenvector_centrality(A, k=40, authority=False):
- """
- Computes the dominant eigenvector of A. Assumes A is
- primitive and uses the power method.
-
- """
- A_temp = A.T if authority else A
- n = len(A_temp)
- r = spec_rad(A_temp)
- e = r**(-k) * (np.linalg.matrix_power(A_temp, k) @ np.ones(n))
- return e / np.sum(e)
-```
-
-Here a similar function is defined for calculating Katz centrality.
-
-```{code-cell} ipython3
-def katz_centrality(A, b=1, authority=False):
- """
- Computes the Katz centrality of A, defined as the x solving
-
- x = 1 + b A x (1 = vector of ones)
-
- Assumes that A is square.
-
- If authority=True, then A is replaced by its transpose.
- """
- n = len(A)
- I = np.identity(n)
- C = I - b * A.T if authority else I - b * A
- return np.linalg.solve(C, np.ones(n))
-```
-
-Now we generate an unweighted version of our matrix to help calculate in-degree and out-degree.
-
-```{code-cell} ipython3
-D = qbn_io.build_unweighted_matrix(Z)
-```
-
-We now use the above to calculate the six centrality measures.
-
-```{code-cell} ipython3
-outdegree = D.sum(axis=1)
-ecentral_hub = eigenvector_centrality(Z, authority=False)
-kcentral_hub = katz_centrality(Z, b=1/1_700_000)
-
-indegree = D.sum(axis=0)
-ecentral_authority = eigenvector_centrality(Z, authority=True)
-kcentral_authority = katz_centrality(Z, b=1/1_700_000, authority=True)
-```
-
-Here we provide a helper function that returns a DataFrame for each measure.
-The DataFrame is ordered by that measure and contains color information.
-
-```{code-cell} ipython3
-def centrality_plot_data(countries, centrality_measures):
- df = pd.DataFrame({'code': countries,
- 'centrality':centrality_measures,
- 'color': qbn_io.colorise_weights(centrality_measures).tolist()
- })
- return df.sort_values('centrality')
-```
-
-Finally, we plot the various centrality measures.
-
-```{code-cell} ipython3
-centrality_measures = [outdegree, indegree,
- ecentral_hub, ecentral_authority,
- kcentral_hub, kcentral_authority]
-
-ylabels = ['out degree', 'in degree',
- 'eigenvector hub','eigenvector authority',
- 'Katz hub', 'Katz authority']
-
-ylims = [(0, 20), (0, 20),
- None, None,
- None, None]
-
-
-fig, axes = plt.subplots(3, 2, figsize=(10, 12))
-
-axes = axes.flatten()
-
-for i, ax in enumerate(axes):
- df = centrality_plot_data(countries, centrality_measures[i])
-
- ax.bar('code', 'centrality', data=df, color=df["color"], alpha=0.6)
-
- patch = mpatches.Patch(color=None, label=ylabels[i], visible=False)
- ax.legend(handles=[patch], fontsize=12, loc="upper left", handlelength=0, frameon=False)
-
- if ylims[i] is not None:
- ax.set_ylim(ylims[i])
-
-if export_figures:
- plt.savefig("figures/financial_network_analysis_centrality.pdf")
-plt.show()
-```
-
-### Computing in and out degree distributions
-
-The in-degree distribution evaluated at $k$ is the fraction of nodes in a
-network that have in-degree $k$. The in-degree distribution of a `NetworkX`
-DiGraph can be calculated using the code below.
-
-```{code-cell} ipython3
-def in_degree_dist(G):
- n = G.number_of_nodes()
- iG = np.array([G.in_degree(v) for v in G.nodes()])
- d = [np.mean(iG == k) for k in range(n+1)]
- return d
-```
-
-The out-degree distribution is defined analogously.
-
-```{code-cell} ipython3
-def out_degree_dist(G):
- n = G.number_of_nodes()
- oG = np.array([G.out_degree(v) for v in G.nodes()])
- d = [np.mean(oG == k) for k in range(n+1)]
- return d
-```
-
-### Degree distribution for international aircraft trade
-
-Here we illustrate that the commercial aircraft international trade network is
-approximately scale-free by plotting the degree distribution alongside
-$f(x)=cx-\gamma$ with $c=0.2$ and $\gamma=1.1$.
-
-In this calculation of the degree distribution, performed by the NetworkX
-function `degree_histogram`, directions are ignored and the network is treated
-as an undirected graph.
-
-```{code-cell} ipython3
-def plot_degree_dist(G, ax, loglog=True, label=None):
- "Plot the degree distribution of a graph G on axis ax."
- dd = [x for x in nx.degree_histogram(G) if x > 0]
- dd = np.array(dd) / np.sum(dd) # normalize
- if loglog:
- ax.loglog(dd, '-o', lw=0.5, label=label)
- else:
- ax.plot(dd, '-o', lw=0.5, label=label)
-```
-
-```{code-cell} ipython3
-fig, ax = plt.subplots(figsize=default_figsize)
-
-plot_degree_dist(DG, ax, loglog=False, label='degree distribution')
-
-xg = np.linspace(0.5, 25, 250)
-ax.plot(xg, 0.2 * xg**(-1.1), label='power law')
-ax.set_xlim(0.9, 22)
-ax.set_ylim(0, 0.25)
-ax.legend()
-if export_figures:
- plt.savefig("figures/commercial_aircraft_2019_2.pdf")
-plt.show()
-```
-
-### Random graphs
-
-The code to produce the Erdos-Renyi random graph, used below, applies the
-combinations function from the `itertools` library. The function
-`combinations(A, k)` returns a list of all subsets of $A$ of size $k$. For
-example:
-
-```{code-cell} ipython3
-import itertools
-letters = 'a', 'b', 'c'
-list(itertools.combinations(letters, 2))
-```
-
-Below we generate random graphs using the Erdos-Renyi and Barabasi-Albert
-algorithms. Here, for convenience, we will define a function to plot these
-graphs.
-
-```{code-cell} ipython3
-def plot_random_graph(RG,ax):
- node_pos_dict = nx.spring_layout(RG, k=1.1)
-
- centrality = nx.degree_centrality(RG)
- node_color_list = qbn_io.colorise_weights(list(centrality.values()))
-
- edge_color_list = []
- for i in range(n):
- for j in range(n):
- edge_color_list.append(node_color_list[i])
-
- nx.draw_networkx_nodes(RG,
- node_pos_dict,
- node_color=node_color_list,
- edgecolors='grey',
- node_size=100,
- linewidths=2,
- alpha=0.8,
- ax=ax)
-
- nx.draw_networkx_edges(RG,
- node_pos_dict,
- edge_color=edge_colors,
- alpha=0.4,
- ax=ax)
-```
-
-### An instance of an Erdos–Renyi random graph
-
-```{code-cell} ipython3
-n = 100
-p = 0.05
-G_er = qbn_io.erdos_renyi_graph(n, p, seed=1234)
-```
-
-```{code-cell} ipython3
-fig, axes = plt.subplots(1, 2, figsize=(10, 3.2))
-
-axes[0].set_title("Graph visualization")
-plot_random_graph(G_er,axes[0])
-
-axes[1].set_title("Degree distribution")
-plot_degree_dist(G_er, axes[1], loglog=False)
-
-if export_figures:
- plt.savefig("figures/rand_graph_experiments_1.pdf")
-plt.show()
-```
-
-### An instance of a preferential attachment random graph
-
-```{code-cell} ipython3
-n = 100
-m = 5
-G_ba = nx.generators.random_graphs.barabasi_albert_graph(n, m, seed=123)
-```
-
-```{code-cell} ipython3
-fig, axes = plt.subplots(1, 2, figsize=(10, 3.2))
-
-axes[0].set_title("Graph visualization")
-plot_random_graph(G_ba, axes[0])
-
-axes[1].set_title("Degree distribution")
-plot_degree_dist(G_ba, axes[1], loglog=False)
-
-if export_figures:
- plt.savefig("figures/rand_graph_experiments_2.pdf")
-plt.show()
-```
diff --git a/code_book/ch_mcs.md b/code_book/ch_mcs.md
deleted file mode 100644
index 198d542..0000000
--- a/code_book/ch_mcs.md
+++ /dev/null
@@ -1,476 +0,0 @@
----
-jupytext:
- cell_metadata_filter: -all
- formats: md:myst
- text_representation:
- extension: .md
- format_name: myst
- format_version: 0.13
- jupytext_version: 1.10.3
-kernelspec:
- display_name: Python 3
- language: python
- name: python3
----
-
-# Chapter 4 - Markov Chains and Networks (Python Code)
-
-```{code-cell}
----
-tags: [hide-output]
----
-! pip install --upgrade quantecon_book_networks
-```
-
-We begin with some imports.
-
-```{code-cell}
-import quantecon as qe
-import quantecon_book_networks
-import quantecon_book_networks.input_output as qbn_io
-import quantecon_book_networks.plotting as qbn_plt
-import quantecon_book_networks.data as qbn_data
-ch4_data = qbn_data.markov_chains_and_networks()
-export_figures = False
-```
-
-```{code-cell} ipython3
-import numpy as np
-import pandas as pd
-import networkx as nx
-import matplotlib.pyplot as plt
-from matplotlib import cm
-from mpl_toolkits.mplot3d import Axes3D
-from mpl_toolkits.mplot3d.art3d import Poly3DCollection
-quantecon_book_networks.config("matplotlib")
-```
-
-## Example transition matrices
-
-In this chapter two transition matrices are used.
-
-First, a Markov model is estimated in the international growth dynamics study
-of [Quah (1993)](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.142.5504&rep=rep1&type=pdf).
-
-The state is real GDP per capita in a given country relative to the world
-average.
-
-Quah discretizes the possible values to 0–1/4, 1/4–1/2, 1/2–1, 1–2
-and 2–inf, calling these states 1 to 5 respectively. The transitions are over
-a one year period.
-
-```{code-cell}
-P_Q = [
- [0.97, 0.03, 0, 0, 0 ],
- [0.05, 0.92, 0.03, 0, 0 ],
- [0, 0.04, 0.92, 0.04, 0 ],
- [0, 0, 0.04, 0.94, 0.02],
- [0, 0, 0, 0.01, 0.99]
-]
-P_Q = np.array(P_Q)
-codes_Q = ('1', '2', '3', '4', '5')
-```
-
-Second, [Benhabib et al. (2015)](https://www.economicdynamics.org/meetpapers/2015/paper_364.pdf) estimate the following transition matrix for intergenerational social mobility.
-
-The states are percentiles of the wealth distribution.
-
-In particular, states 1, 2,..., 8, correspond to the percentiles 0-20%, 20-40%, 40-60%, 60-80%, 80-90%, 90-95%, 95-99%, 99-100%.
-
-```{code-cell}
-P_B = [
- [0.222, 0.222, 0.215, 0.187, 0.081, 0.038, 0.029, 0.006],
- [0.221, 0.22, 0.215, 0.188, 0.082, 0.039, 0.029, 0.006],
- [0.207, 0.209, 0.21, 0.194, 0.09, 0.046, 0.036, 0.008],
- [0.198, 0.201, 0.207, 0.198, 0.095, 0.052, 0.04, 0.009],
- [0.175, 0.178, 0.197, 0.207, 0.11, 0.067, 0.054, 0.012],
- [0.182, 0.184, 0.2, 0.205, 0.106, 0.062, 0.05, 0.011],
- [0.123, 0.125, 0.166, 0.216, 0.141, 0.114, 0.094, 0.021],
- [0.084, 0.084, 0.142, 0.228, 0.17, 0.143, 0.121, 0.028]
- ]
-
-P_B = np.array(P_B)
-codes_B = ('1', '2', '3', '4', '5', '6', '7', '8')
-```
-
-
-## Markov Chains as Digraphs
-
-### Contour plot of a transition matrix
-
-Here we define a function for producing contour plots of matrices.
-
-```{code-cell}
-def plot_matrices(matrix,
- codes,
- ax,
- font_size=12,
- alpha=0.6,
- colormap=cm.viridis,
- color45d=None,
- xlabel='sector $j$',
- ylabel='sector $i$'):
-
- ticks = range(len(matrix))
-
- levels = np.sqrt(np.linspace(0, 0.75, 100))
-
-
- if color45d != None:
- co = ax.contourf(ticks,
- ticks,
- matrix,
- alpha=alpha, cmap=colormap)
- ax.plot(ticks, ticks, color=color45d)
- else:
- co = ax.contourf(ticks,
- ticks,
- matrix,
- levels,
- alpha=alpha, cmap=colormap)
-
- ax.set_xlabel(xlabel, fontsize=font_size)
- ax.set_ylabel(ylabel, fontsize=font_size)
- ax.set_yticks(ticks)
- ax.set_yticklabels(codes_B)
- ax.set_xticks(ticks)
- ax.set_xticklabels(codes_B)
-
-```
-
-Now we use our function to produce a plot of the transition matrix for
-intergenerational social mobility, $P_B$.
-
-```{code-cell}
-fig, ax = plt.subplots(figsize=(6,6))
-plot_matrices(P_B.transpose(), codes_B, ax, alpha=0.75,
- colormap=cm.viridis, color45d='black',
- xlabel='state at time $t$', ylabel='state at time $t+1$')
-
-if export_figures:
- plt.savefig("figures/markov_matrix_visualization.pdf")
-plt.show()
-```
-
-
-### Wealth percentile over time
-
-Here we compare the mixing of the transition matrix for intergenerational
-social mobility $P_B$ and the transition matrix for international growth
-dynamics $P_Q$.
-
-We begin by creating `quantecon` `MarkovChain` objects with each of our transition
-matrices.
-
-```{code-cell}
-mc_B = qe.MarkovChain(P_B, state_values=range(1, 9))
-mc_Q = qe.MarkovChain(P_Q, state_values=range(1, 6))
-```
-
-Next we define a function to plot simulations of Markov chains.
-
-Two simulations will be run for each `MarkovChain`, one starting at the
-minimum initial value and one at the maximum.
-
-```{code-cell}
-def sim_fig(ax, mc, T=100, seed=14, title=None):
- X1 = mc.simulate(T, init=1, random_state=seed)
- X2 = mc.simulate(T, init=max(mc.state_values), random_state=seed+1)
- ax.plot(X1)
- ax.plot(X2)
- ax.set_xlabel("time")
- ax.set_ylabel("state")
- ax.set_title(title, fontsize=12)
-```
-
-Finally, we produce the figure.
-
-```{code-cell}
-fig, axes = plt.subplots(2, 1, figsize=(6, 4))
-ax = axes[0]
-sim_fig(axes[0], mc_B, title="$P_B$")
-sim_fig(axes[1], mc_Q, title="$P_Q$")
-axes[1].set_yticks((1, 2, 3, 4, 5))
-
-plt.tight_layout()
-if export_figures:
- plt.savefig("figures/benhabib_mobility_mixing.pdf")
-plt.show()
-```
-
-
-### Predicted vs realized cross-country income distributions for 2019
-
-Here we load a `pandas` `DataFrame` of GDP per capita data for countries compared to the global average.
-
-```{code-cell}
-gdppc_df = ch4_data['gdppc_df']
-gdppc_df.head()
-```
-
-Now we assign countries bins, as per Quah (1993).
-
-```{code-cell}
-q = [0, 0.25, 0.5, 1.0, 2.0, np.inf]
-l = [0, 1, 2, 3, 4]
-
-x = pd.cut(gdppc_df.gdppc_r, bins=q, labels=l)
-gdppc_df['interval'] = x
-
-gdppc_df = gdppc_df.reset_index()
-gdppc_df['interval'] = gdppc_df['interval'].astype(float)
-gdppc_df['year'] = gdppc_df['year'].astype(float)
-```
-
-Here we define a function for calculating the cross-country income
-distributions for a given date range.
-
-```{code-cell}
-def gdp_dist_estimate(df, l, yr=(1960, 2019)):
- Y = np.zeros(len(l))
- for i in l:
- Y[i] = df[
- (df['interval'] == i) &
- (df['year'] <= yr[1]) &
- (df['year'] >= yr[0])
- ].count()[0]
-
- return Y / Y.sum()
-```
-
-We calculate the true distribution for 1985.
-
-```{code-cell}
-ψ_1985 = gdp_dist_estimate(gdppc_df,l,yr=(1985, 1985))
-```
-
-Now we use the transition matrix to update the 1985 distribution $t = 2019 - 1985 = 34$
-times to get our predicted 2019 distribution.
-
-```{code-cell}
-ψ_2019_predicted = ψ_1985 @ np.linalg.matrix_power(P_Q, 2019-1985)
-```
-
-Now, calculate the true 2019 distribution.
-
-```{code-cell}
-ψ_2019 = gdp_dist_estimate(gdppc_df,l,yr=(2019, 2019))
-```
-
-Finally we produce the plot.
-
-```{code-cell}
-states = np.arange(0, 5)
-ticks = range(5)
-codes_S = ('1', '2', '3', '4', '5')
-
-fig, ax = plt.subplots(figsize=(6, 4))
-width = 0.4
-ax.plot(states, ψ_2019_predicted, '-o', alpha=0.7, label='predicted')
-ax.plot(states, ψ_2019, '-o', alpha=0.7, label='realized')
-ax.set_xlabel("state")
-ax.set_ylabel("probability")
-ax.set_yticks((0.15, 0.2, 0.25, 0.3))
-ax.set_xticks(ticks)
-ax.set_xticklabels(codes_S)
-
-ax.legend(loc='upper center', fontsize=12)
-if export_figures:
- plt.savefig("figures/quah_gdppc_prediction.pdf")
-plt.show()
-
-```
-
-### Distribution dynamics
-
-Here we define a function for plotting the convergence of marginal
-distributions $\psi$ under a transition matrix $P$ on the unit simplex.
-
-```{code-cell}
-def convergence_plot(ψ, P, n=14, angle=50):
-
- ax = qbn_plt.unit_simplex(angle)
-
- # Convergence plot
-
- P = np.array(P)
-
- ψ = ψ # Initial condition
-
- x_vals, y_vals, z_vals = [], [], []
- for t in range(n):
- x_vals.append(ψ[0])
- y_vals.append(ψ[1])
- z_vals.append(ψ[2])
- ψ = ψ @ P
-
- ax.scatter(x_vals, y_vals, z_vals, c='darkred', s=80, alpha=0.7, depthshade=False)
-
- mc = qe.MarkovChain(P)
- ψ_star = mc.stationary_distributions[0]
- ax.scatter(ψ_star[0], ψ_star[1], ψ_star[2], c='k', s=80)
-
- return ψ
-
-```
-
-Now we define P.
-
-```{code-cell}
-P = (
- (0.9, 0.1, 0.0),
- (0.4, 0.4, 0.2),
- (0.1, 0.1, 0.8)
- )
-```
-
-#### A trajectory from $\psi_0 = (0, 0, 1)$
-
-Here we see the sequence of marginals appears to converge.
-
-```{code-cell}
-ψ_0 = (0, 0, 1)
-ψ = convergence_plot(ψ_0, P)
-if export_figures:
- plt.savefig("figures/simplex_2.pdf")
-plt.show()
-```
-
-#### A trajectory from $\psi_0 = (0, 1/2, 1/2)$
-
-Here we see again that the sequence of marginals appears to converge, and the
-limit appears not to depend on the initial distribution.
-
-```{code-cell}
-ψ_0 = (0, 1/2, 1/2)
-ψ = convergence_plot(ψ_0, P, n=12)
-if export_figures:
- plt.savefig("figures/simplex_3.pdf")
-plt.show()
-```
-
-
-### Distribution projections from $P_B$
-
-Here we define a function for plotting $\psi$ after $n$ iterations of the
-transition matrix $P$. The distribution $\psi_0$ is taken as the uniform
-distribution over the
-state space.
-
-```{code-cell}
-def transition(P, n, ax=None):
-
- P = np.array(P)
- nstates = P.shape[1]
- s0 = np.ones(8) * 1/nstates
- s = s0
-
- for i in range(n):
- s = s @ P
-
- if ax is None:
- fig, ax = plt.subplots()
-
- ax.plot(range(1, nstates+1), s, '-o', alpha=0.6)
- ax.set(ylim=(0, 0.25),
- xticks=((1, nstates)))
- ax.set_title(f"$t = {n}$")
-
- return ax
-```
-
-We now generate the marginal distributions after 0, 1, 2, and 100 iterations for $P_B$.
-
-```{code-cell}
-ns = (0, 1, 2, 100)
-fig, axes = plt.subplots(1, len(ns), figsize=(6, 4))
-
-for n, ax in zip(ns, axes):
- ax = transition(P_B, n, ax=ax)
- ax.set_xlabel("Quantile")
-
-plt.tight_layout()
-if export_figures:
- plt.savefig("figures/benhabib_mobility_dists.pdf")
-plt.show()
-```
-
-
-
-## Asymptotics
-
-### Convergence of the empirical distribution to $\psi^*$
-
-We begin by creating a `MarkovChain` object, taking $P_B$ as the transition matrix.
-
-```{code-cell}
-mc = qe.MarkovChain(P_B)
-```
-
-Next we use the `quantecon` package to calculate the true stationary distribution.
-
-```{code-cell}
-stationary = mc.stationary_distributions[0]
-n = len(mc.P)
-```
-
-Now we define a function to simulate the Markov chain.
-
-```{code-cell}
-def simulate_distribution(mc, T=100):
- # Simulate path
- n = len(mc.P)
- path = mc.simulate_indices(ts_length=T, random_state=1)
- distribution = np.empty(n)
- for i in range(n):
- distribution[i] = np.mean(path==i)
- return distribution
-
-```
-
-We run simulations of length 10, 100, 1,000 and 10,000.
-
-```{code-cell}
-lengths = [10, 100, 1_000, 10_000]
-dists = []
-
-for t in lengths:
- dists.append(simulate_distribution(mc, t))
-```
-
-Now we produce the plots.
-
-We see that the simulated distribution starts to approach the true stationary distribution.
-
-```{code-cell}
-fig, axes = plt.subplots(2, 2, figsize=(9, 6), sharex='all')#, sharey='all')
-
-axes = axes.flatten()
-
-for dist, ax, t in zip(dists, axes, lengths):
-
- ax.plot(np.arange(n)+1 + .25,
- stationary,
- '-o',
- #width = 0.25,
- label='$\\psi^*$',
- alpha=0.75)
-
- ax.plot(np.arange(n)+1,
- dist,
- '-o',
- #width = 0.25,
- label=f'$\\hat \\psi_k$ with $k={t}$',
- alpha=0.75)
-
-
- ax.set_xlabel("state", fontsize=12)
- ax.set_ylabel("prob.", fontsize=12)
- ax.set_xticks(np.arange(n)+1)
- ax.legend(loc='upper right', fontsize=12, frameon=False)
- ax.set_ylim(0, 0.5)
-
-if export_figures:
- plt.savefig("figures/benhabib_ergodicity_1.pdf")
-plt.show()
-```
diff --git a/code_book/ch_opt.md b/code_book/ch_opt.md
deleted file mode 100644
index 2b0dce1..0000000
--- a/code_book/ch_opt.md
+++ /dev/null
@@ -1,416 +0,0 @@
----
-jupytext:
- cell_metadata_filter: -all
- formats: md:myst
- text_representation:
- extension: .md
- format_name: myst
- format_version: 0.13
- jupytext_version: 1.10.3
-kernelspec:
- display_name: Python 3
- language: python
- name: python3
----
-
-# Chapter 3 - Optimal Flows (Python Code)
-
-```{code-cell}
----
-tags: [hide-output]
----
-! pip install --upgrade quantecon_book_networks
-```
-
-We begin with some imports
-
-```{code-cell}
-import quantecon as qe
-import quantecon_book_networks
-import quantecon_book_networks.input_output as qbn_io
-import quantecon_book_networks.plotting as qbn_plt
-import quantecon_book_networks.data as qbn_data
-ch3_data = qbn_data.optimal_flows()
-export_figures = False
-```
-
-```{code-cell}
-import numpy as np
-from scipy.optimize import linprog
-import networkx as nx
-import ot
-import matplotlib.pyplot as plt
-from matplotlib import cm
-from matplotlib.patches import Polygon
-from matplotlib.artist import Artist
-quantecon_book_networks.config("matplotlib")
-```
-
-## Linear Programming and Duality
-
-### Betweenness centrality (by color and node size) for the Florentine families
-
-We load the Florentine Families data from the NetworkX package.
-```{code-cell}
-G = nx.florentine_families_graph()
-```
-
-Next we calculate betweenness centrality.
-
-```{code-cell}
-bc_dict = nx.betweenness_centrality(G)
-```
-
-And we produce the plot.
-
-```{code-cell}
-fig, ax = plt.subplots(figsize=(9.4, 9.4))
-
-plt.axis("off")
-nx.draw_networkx(
- G,
- ax=ax,
- pos=nx.spring_layout(G, seed=1234),
- with_labels=True,
- alpha=.8,
- arrowsize=15,
- connectionstyle="arc3,rad=0.1",
- node_size=[10_000*(size+0.1) for size in bc_dict.values()],
- node_color=[cm.plasma(bc+0.4) for bc in bc_dict.values()],
-)
-if export_figures:
- plt.savefig("figures/betweenness_centrality_1.pdf")
-```
-
-### Revenue maximizing quantities and a Python implementation of linear programming
-
-First we specify our linear program.
-
-```{code-cell}
-A = ((2, 5),
- (4, 2))
-b = (30, 20)
-c = (-3, -4) # minus in order to minimize
-```
-
-And now we use SciPy's linear programing module to solve our linear program.
-
-```{code-cell}
-from scipy.optimize import linprog
-result = linprog(c, A_ub=A, b_ub=b)
-print(result.x)
-```
-
-Here we produce a visualization of what is being done.
-
-```{code-cell}
-fig, ax = plt.subplots(figsize=(8, 4.5))
-plt.rcParams['font.size'] = '14'
-
-# Draw constraint lines
-
-ax.plot(np.linspace(-1, 17.5, 100), 6-0.4*np.linspace(-1, 17.5, 100))
-ax.plot(np.linspace(-1, 5.5, 100), 10-2*np.linspace(-1, 5.5, 100))
-ax.text(10, 2.5, "$2q_1 + 5q_2 \leq 30$")
-ax.text(1.5, 8, "$4q_1 + 2q_2 \leq 20$")
-ax.text(-2, 2, "$q_2 \geq 0$")
-ax.text(2.5, -0.7, "$q_1 \geq 0$")
-
-# Draw the feasible region
-feasible_set = Polygon(np.array([[0, 0],
- [0, 6],
- [2.5, 5],
- [5, 0]]))
-ax.add_artist(feasible_set)
-Artist.set_alpha(feasible_set, 0.2)
-
-# Draw the objective function
-ax.plot(np.linspace(-1, 5.5, 100), 3.875-0.75*np.linspace(-1, 5.5, 100), 'g-')
-ax.plot(np.linspace(-1, 5.5, 100), 5.375-0.75*np.linspace(-1, 5.5, 100), 'g-')
-ax.plot(np.linspace(-1, 5.5, 100), 6.875-0.75*np.linspace(-1, 5.5, 100), 'g-')
-ax.text(5.8, 1, "revenue $ = 3q_1 + 4q_2$")
-
-# Draw the optimal solution
-ax.plot(2.5, 5, "o", color="black")
-ax.text(2.7, 5.2, "optimal solution")
-
-for spine in ['right', 'top']:
- ax.spines[spine].set_color('none')
-
-ax.set_xticks(())
-ax.set_yticks(())
-
-for spine in ['left', 'bottom']:
- ax.spines[spine].set_position('zero')
-
-ax.set_ylim(-1, 8)
-
-if export_figures:
- plt.savefig("figures/linear_programming_1.pdf")
-plt.show()
-```
-
-## Optimal Transport
-
-
-### Transforming one distribution into another
-
-Below we provide code to produce a visualization of transforming one
-distribution into another in one dimension.
-
-```{code-cell}
-σ = 0.1
-
-def ϕ(z):
- return (1 / np.sqrt(2 * σ**2 * np.pi)) * np.exp(-z**2 / (2 * σ**2))
-
-def v(x, a=0.4, b=0.6, s=1.0, t=1.4):
- return a * ϕ(x - s) + b * ϕ(x - t)
-
-fig, ax = plt.subplots(figsize=(10, 4))
-
-x = np.linspace(0.2, 4, 1000)
-ax.plot(x, v(x), label="$\\phi$")
-ax.plot(x, v(x, s=3.0, t=3.3, a=0.6), label="$\\psi$")
-
-ax.legend(loc='upper left', fontsize=12, frameon=False)
-
-ax.arrow(1.8, 1.6, 0.8, 0.0, width=0.01, head_width=0.08)
-ax.annotate('transform', xy=(1.9, 1.9), fontsize=12)
-if export_figures:
- plt.savefig("figures/ot_figs_1.pdf")
-plt.show()
-```
-
-### Function to solve a transport problem via linear programming
-
-Here we define a function to solve optimal transport problems using linear programming.
-
-```{code-cell}
-def ot_solver(phi, psi, c, method='highs-ipm'):
- """
- Solve the OT problem associated with distributions phi, psi
- and cost matrix c.
- Parameters
- ----------
- phi : 1-D array
- Distribution over the source locations.
- psi : 1-D array
- Distribution over the target locations.
- c : 2-D array
- Cost matrix.
- """
- n, m = len(phi), len(psi)
-
- # vectorize c
- c_vec = c.reshape((m * n, 1), order='F')
-
- # Construct A and b
- A1 = np.kron(np.ones((1, m)), np.identity(n))
- A2 = np.kron(np.identity(m), np.ones((1, n)))
- A = np.vstack((A1, A2))
- b = np.hstack((phi, psi))
-
- # Call sover
- res = linprog(c_vec, A_eq=A, b_eq=b, method=method)
-
- # Invert the vec operation to get the solution as a matrix
- pi = res.x.reshape((n, m), order='F')
- return pi
-```
-
-Now we can set up a simple optimal transport problem.
-
-```{code-cell}
-phi = np.array((0.5, 0.5))
-psi = np.array((1, 0))
-c = np.ones((2, 2))
-```
-
-Next we solve using the above function.
-
-```{code-cell}
-ot_solver(phi, psi, c)
-```
-
-We see we get the same result as when using the Python optimal transport
-package.
-
-```{code-cell}
-ot.emd(phi, psi, c)
-```
-
-### An optimal transport problem solved by linear programming
-
-Here we demonstrate a more detailed optimal transport problem. We begin by defining a node class.
-
-```{code-cell}
-class Node:
-
- def __init__(self, x, y, mass, group, name):
-
- self.x, self.y = x, y
- self.mass, self.group = mass, group
- self.name = name
-```
-
-Now we define a function for randomly generating nodes.
-
-```{code-cell}
-from scipy.stats import betabinom
-
-def build_nodes_of_one_type(group='phi', n=100, seed=123):
-
- nodes = []
- np.random.seed(seed)
-
- for i in range(n):
-
- if group == 'phi':
- m = 1/n
- x = np.random.uniform(-2, 2)
- y = np.random.uniform(-2, 2)
- else:
- m = betabinom.pmf(i, n-1, 2, 2)
- x = 0.6 * np.random.uniform(-1.5, 1.5)
- y = 0.6 * np.random.uniform(-1.5, 1.5)
-
- name = group + str(i)
- nodes.append(Node(x, y, m, group, name))
-
- return nodes
-```
-
-We now generate our source and target nodes.
-```{code-cell}
-n_phi = 32
-n_psi = 32
-
-phi_list = build_nodes_of_one_type(group='phi', n=n_phi)
-psi_list = build_nodes_of_one_type(group='psi', n=n_psi)
-
-phi_probs = [phi.mass for phi in phi_list]
-psi_probs = [psi.mass for psi in psi_list]
-```
-
-Now we define our transport costs.
-```{code-cell}
-c = np.empty((n_phi, n_psi))
-for i in range(n_phi):
- for j in range(n_psi):
- x0, y0 = phi_list[i].x, phi_list[i].y
- x1, y1 = psi_list[j].x, psi_list[j].y
- c[i, j] = np.sqrt((x0-x1)**2 + (y0-y1)**2)
-```
-
-We solve our optimal transport problem using the Python optimal transport package.
-
-```{code-cell}
-pi = ot.emd(phi_probs, psi_probs, c)
-```
-
-Finally we produce a graph of our sources, targets, and optimal transport plan.
-
-```{code-cell}
-g = nx.DiGraph()
-g.add_nodes_from([phi.name for phi in phi_list])
-g.add_nodes_from([psi.name for psi in psi_list])
-
-for i in range(n_phi):
- for j in range(n_psi):
- if pi[i, j] > 0:
- g.add_edge(phi_list[i].name, psi_list[j].name, weight=pi[i, j])
-
-node_pos_dict={}
-for phi in phi_list:
- node_pos_dict[phi.name] = (phi.x, phi.y)
-for psi in psi_list:
- node_pos_dict[psi.name] = (psi.x, psi.y)
-
-node_color_list = []
-node_size_list = []
-scale = 8_000
-for phi in phi_list:
- node_color_list.append('blue')
- node_size_list.append(phi.mass * scale)
-for psi in psi_list:
- node_color_list.append('red')
- node_size_list.append(psi.mass * scale)
-
-fig, ax = plt.subplots(figsize=(7, 10))
-plt.axis('off')
-
-nx.draw_networkx_nodes(g,
- node_pos_dict,
- node_color=node_color_list,
- node_size=node_size_list,
- edgecolors='grey',
- linewidths=1,
- alpha=0.5,
- ax=ax)
-
-nx.draw_networkx_edges(g,
- node_pos_dict,
- arrows=True,
- connectionstyle='arc3,rad=0.1',
- alpha=0.6)
-if export_figures:
- plt.savefig("figures/ot_large_scale_1.pdf")
-plt.show()
-```
-
-### Solving linear assignment as an optimal transport problem
-
-Here we set up a linear assignment problem (matching $n$ workers to $n$ jobs).
-
-```{code-cell}
-n = 4
-phi = np.ones(n)
-psi = np.ones(n)
-```
-
-We generate our cost matrix (the cost of training the $i$th worker for the $j$th job)
-
-```{code-cell}
-c = np.random.uniform(size=(n, n))
-```
-
-Finally, we solve our linear assignment problem as a special case of optimal transport.
-
-```{code-cell}
-ot.emd(phi, psi, c)
-```
-
-### Python Spatial Analysis library
-
-Readers interested in computational optimal transport should also consider
-PySAL, the [Python Spatial Analysis library](https://pysal.org/). See, for
-example, https://pysal.org/spaghetti/notebooks/transportation-problem.html.
-
-### The General Flow Problem
-
-Here we solve a simple network flow problem as a linear program. We begin by
-defining the node-edge incidence matrix.
-
-```{code-cell}
-A = (
-( 1, 1, 0, 0),
-(-1, 0, 1, 0),
-( 0, 0, -1, 1),
-( 0, -1, 0, -1)
-)
-```
-
-Now we define exogenous supply and transport costs.
-
-```{code-cell}
-b = (10, 0, 0, -10)
-c = (1, 4, 1, 1)
-```
-
-Finally we solve as a linear program.
-
-```{code-cell}
-result = linprog(c, A_eq=A, b_eq=b, method='highs-ipm')
-print(result.x)
-```
diff --git a/code_book/ch_opt_julia.md b/code_book/ch_opt_julia.md
deleted file mode 100644
index 6cd231f..0000000
--- a/code_book/ch_opt_julia.md
+++ /dev/null
@@ -1,118 +0,0 @@
----
-jupytext:
- cell_metadata_filter: -all
- formats: md:myst
- text_representation:
- extension: .md
- format_name: myst
- format_version: 0.13
- jupytext_version: 1.10.3
-kernelspec:
- display_name: Julia
- language: Julia
- name: julia-1.9
----
-
-
-# Chapter 3 - Optimal Flows (Julia Code)
-
-## Bellman’s Method
-
-Here we demonstrate solving a shortest path problem using Bellman's method.
-
-Our first step is to set up the cost function, which we store as an array
-called `c`. Note that we set `c[i, j] = Inf` when no edge exists from `i` to
-`j`.
-
-```{code-cell}
-:tags: ["remove-output"]
-
-c = fill(Inf, (7, 7))
-c[1, 2], c[1, 3], c[1, 4] = 1, 5, 3
-c[2, 4], c[2, 5] = 9, 6
-c[3, 6] = 2
-c[4, 6] = 4
-c[5, 7] = 4
-c[6, 7] = 1
-c[7, 7] = 0
-```
-
-Next we define the Bellman operator.
-
-```{code-cell}
-:tags: ["remove-output"]
-
-function T(q)
- Tq = similar(q)
- n = length(q)
- for x in 1:n
- Tq[x] = minimum(c[x, :] + q[:])
- end
- return Tq
-end
-```
-
-Now we arbitrarily set $q \equiv 0$, generate the sequence of iterates $T_q$,
-$T^2_q$, $T^3_q$ and plot them. By $T^3_q$ has already converged on $q^∗$.
-
-```{code-cell}
-using PyPlot
-export_figures = false
-fig, ax = plt.subplots(figsize=(6, 4))
-
-n = 7
-q = zeros(n)
-ax.plot(1:n, q)
-ax.set_xlabel("cost-to-go")
-ax.set_ylabel("nodes")
-
-for i in 1:3
- new_q = T(q)
- ax.plot(1:n, new_q, "-o", alpha=0.7, label="iterate $i")
- q = new_q
-end
-
-ax.legend()
-if export_figures == true
- plt.savefig("figures/shortest_path_iter_1.pdf")
-end
-```
-
-## Linear programming
-
-When solving linear programs, one option is to use a domain specific modeling
-language to set out the objective and constraints in the optimization problem.
-Here we demonstrate the Julia package `JuMP`.
-
-```{code-cell}
-using JuMP
-using GLPK
-```
-
-We create our model object and select our solver.
-
-```{code-cell}
-m = Model()
-set_optimizer(m, GLPK.Optimizer)
-```
-
-Now we add variables, constraints and an objective to our model.
-
-```{code-cell}
-:tags: ["remove-output"]
-
-@variable(m, q1 >= 0)
-@variable(m, q2 >= 0)
-@constraint(m, 2q1 + 5q2 <= 30)
-@constraint(m, 4q1 + 2q2 <= 20)
-@objective(m, Max, 3q1 + 4q2)
-```
-
-Finally we solve our linear program.
-
-```{code-cell}
-optimize!(m)
-
-println(value.(q1))
-println(value.(q2))
-```
diff --git a/code_book/ch_production.md b/code_book/ch_production.md
deleted file mode 100644
index c6d7b4a..0000000
--- a/code_book/ch_production.md
+++ /dev/null
@@ -1,416 +0,0 @@
----
-jupytext:
- cell_metadata_filter: -all
- formats: md:myst
- text_representation:
- extension: .md
- format_name: myst
- format_version: 0.13
- jupytext_version: 1.11.5
-kernelspec:
- display_name: Python 3
- language: python
- name: python3
----
-
-# Chapter 2 - Production (Python Code)
-
-```{code-cell} ipython3
----
-tags: [hide-output]
----
-! pip install --upgrade quantecon_book_networks
-```
-
-We begin with some imports.
-
-```{code-cell} ipython3
-import quantecon as qe
-import quantecon_book_networks
-import quantecon_book_networks.input_output as qbn_io
-import quantecon_book_networks.plotting as qbn_plt
-import quantecon_book_networks.data as qbn_data
-ch2_data = qbn_data.production()
-default_figsize = (6, 4)
-export_figures = False
-```
-
-```{code-cell} ipython3
-import numpy as np
-import pandas as pd
-import networkx as nx
-import matplotlib.pyplot as plt
-import matplotlib.cm as cm
-import matplotlib.colors as plc
-from matplotlib import cm
-quantecon_book_networks.config("matplotlib")
-```
-
-## Multisector Models
-
-We start by loading a graph of linkages between 15 US sectors in 2021.
-
-Our graph comes as a list of sector codes, an adjacency matrix of sales between
-the sectors, and a list the total sales of each sector.
-
-In particular, `Z[i,j]` is the sales from industry `i` to industry `j`, and `X[i]` is the the total sales
-of each sector `i`.
-
-```{code-cell} ipython3
-codes = ch2_data["us_sectors_15"]["codes"]
-Z = ch2_data["us_sectors_15"]["adjacency_matrix"]
-X = ch2_data["us_sectors_15"]["total_industry_sales"]
-```
-
-Now we define a function to build coefficient matrices.
-
-Two coefficient matrices are returned. The backward linkage case, where sales
-between sector `i` and `j` are given as a fraction of total sales of sector
-`j`. The forward linkage case, where sales between sector `i` and `j` are
-given as a fraction of total sales of sector `i`.
-
-```{code-cell} ipython3
-def build_coefficient_matrices(Z, X):
- """
- Build coefficient matrices A and F from Z and X via
-
- A[i, j] = Z[i, j] / X[j]
- F[i, j] = Z[i, j] / X[i]
-
- """
- A, F = np.empty_like(Z), np.empty_like(Z)
- n = A.shape[0]
- for i in range(n):
- for j in range(n):
- A[i, j] = Z[i, j] / X[j]
- F[i, j] = Z[i, j] / X[i]
-
- return A, F
-
-A, F = build_coefficient_matrices(Z, X)
-```
-
-### Backward linkages for 15 US sectors in 2021
-
-Here we calculate the hub-based eigenvector centrality of our backward linkage coefficient matrix.
-
-```{code-cell} ipython3
-centrality = qbn_io.eigenvector_centrality(A)
-```
-
-Now we use the `quantecon_book_networks` package to produce our plot.
-
-```{code-cell} ipython3
-fig, ax = plt.subplots(figsize=(8, 10))
-plt.axis("off")
-color_list = qbn_io.colorise_weights(centrality, beta=False)
-# Remove self-loops
-A1 = A.copy()
-for i in range(A1.shape[0]):
- A1[i][i] = 0
-qbn_plt.plot_graph(A1, X, ax, codes,
- layout_type='spring',
- layout_seed=5432167,
- tol=0.0,
- node_color_list=color_list)
-
-if export_figures:
- plt.savefig("figures/input_output_analysis_15.pdf")
-plt.show()
-```
-
-### Eigenvector centrality of across US industrial sectors
-
-Now we plot a bar chart of hub-based eigenvector centrality by sector.
-
-```{code-cell} ipython3
-fig, ax = plt.subplots(figsize=default_figsize)
-ax.bar(codes, centrality, color=color_list, alpha=0.6)
-ax.set_ylabel("eigenvector centrality", fontsize=12)
-if export_figures:
- plt.savefig("figures/input_output_analysis_15_ec.pdf")
-plt.show()
-```
-
-### Output multipliers across 15 US industrial sectors
-
-Output multipliers are equal to the authority-based Katz centrality measure of
-the backward linkage coefficient matrix.
-
-Here we calculate authority-based
-Katz centrality using the `quantecon_book_networks` package.
-
-```{code-cell} ipython3
-omult = qbn_io.katz_centrality(A, authority=True)
-```
-
-```{code-cell} ipython3
-fig, ax = plt.subplots(figsize=default_figsize)
-omult_color_list = qbn_io.colorise_weights(omult,beta=False)
-ax.bar(codes, omult, color=omult_color_list, alpha=0.6)
-ax.set_ylabel("Output multipliers", fontsize=12)
-if export_figures:
- plt.savefig("figures/input_output_analysis_15_omult.pdf")
-plt.show()
-```
-
-### Forward linkages and upstreamness over US industrial sectors
-
-Upstreamness is the hub-based Katz centrality of the forward linkage
-coefficient matrix.
-
-Here we calculate hub-based Katz centrality.
-
-```{code-cell} ipython3
-upstreamness = qbn_io.katz_centrality(F)
-```
-
-Now we plot the network.
-
-```{code-cell} ipython3
-fig, ax = plt.subplots(figsize=(8, 10))
-plt.axis("off")
-upstreamness_color_list = qbn_io.colorise_weights(upstreamness,beta=False)
-# Remove self-loops
-for i in range(F.shape[0]):
- F[i][i] = 0
-qbn_plt.plot_graph(F, X, ax, codes,
- layout_type='spring', # alternative layouts: spring, circular, random, spiral
- layout_seed=5432167,
- tol=0.0,
- node_color_list=upstreamness_color_list)
-
-if export_figures:
- plt.savefig("figures/input_output_analysis_15_fwd.pdf")
-plt.show()
-```
-
-### Relative upstreamness of US industrial sectors
-
-Here we produce a barplot of upstreamness.
-
-```{code-cell} ipython3
-fig, ax = plt.subplots(figsize=default_figsize)
-ax.bar(codes, upstreamness, color=upstreamness_color_list, alpha=0.6)
-ax.set_ylabel("upstreamness", fontsize=12)
-if export_figures:
- plt.savefig("figures/input_output_analysis_15_up.pdf")
-plt.show()
-```
-
-### Hub-based Katz centrality of across 15 US industrial sectors
-
-Next we plot the hub-based Katz centrality of the backward linkage coefficient matrix.
-
-```{code-cell} ipython3
-kcentral = qbn_io.katz_centrality(A)
-```
-
-```{code-cell} ipython3
-fig, ax = plt.subplots(figsize=default_figsize)
-kcentral_color_list = qbn_io.colorise_weights(kcentral,beta=False)
-ax.bar(codes, kcentral, color=kcentral_color_list, alpha=0.6)
-ax.set_ylabel("Katz hub centrality", fontsize=12)
-if export_figures:
- plt.savefig("figures/input_output_analysis_15_katz.pdf")
-plt.show()
-```
-
-### The Leontief inverse 𝐿 (hot colors are larger values)
-
-We construct the Leontief inverse matrix from 15 sector adjacency matrix.
-
-```{code-cell} ipython3
-I = np.identity(len(A))
-L = np.linalg.inv(I - A)
-```
-
-Now we produce the plot.
-
-```{code-cell} ipython3
-fig, ax = plt.subplots(figsize=(6.5, 5.5))
-
-ticks = range(len(L))
-
-levels = np.sqrt(np.linspace(0, 0.75, 100))
-
-co = ax.contourf(ticks,
- ticks,
- L,
- levels,
- alpha=0.85, cmap=cm.plasma)
-
-ax.set_xlabel('sector $j$', fontsize=12)
-ax.set_ylabel('sector $i$', fontsize=12)
-ax.set_yticks(ticks)
-ax.set_yticklabels(codes)
-ax.set_xticks(ticks)
-ax.set_xticklabels(codes)
-
-if export_figures:
- plt.savefig("figures/input_output_analysis_15_leo.pdf")
-plt.show()
-```
-
-### Propagation of demand shocks via backward linkages
-
-We begin by generating a demand shock vector $d$.
-
-```{code-cell} ipython3
-N = len(A)
-np.random.seed(1234)
-d = np.random.rand(N)
-d[6] = 1 # positive shock to agriculture
-```
-
-Now we simulate the demand shock propagating through the economy.
-
-```{code-cell} ipython3
-sim_length = 11
-x = d
-x_vecs = []
-for i in range(sim_length):
- if i % 2 ==0:
- x_vecs.append(x)
- x = A @ x
-```
-
-Finally, we plot the shock propagating through the economy.
-
-```{code-cell} ipython3
-fig, axes = plt.subplots(3, 2, figsize=(8, 10))
-axes = axes.flatten()
-
-for ax, x_vec, i in zip(axes, x_vecs, range(sim_length)):
- if i % 2 != 0:
- pass
- ax.set_title(f"round {i*2}")
- x_vec_cols = qbn_io.colorise_weights(x_vec,beta=False)
- # remove self-loops
- for i in range(len(A)):
- A[i][i] = 0
- qbn_plt.plot_graph(A, X, ax, codes,
- layout_type='spring',
- layout_seed=342156,
- node_color_list=x_vec_cols,
- node_size_multiple=0.00028,
- edge_size_multiple=0.8)
-
-plt.tight_layout()
-if export_figures:
- plt.savefig("figures/input_output_analysis_15_shocks.pdf")
-plt.show()
-```
-
-### Network for 71 US sectors in 2021
-
-We start by loading a graph of linkages between 71 US sectors in 2021.
-
-```{code-cell} ipython3
-codes_71 = ch2_data['us_sectors_71']['codes']
-A_71 = ch2_data['us_sectors_71']['adjacency_matrix']
-X_71 = ch2_data['us_sectors_71']['total_industry_sales']
-```
-
-Next we calculate our graph's properties.
-
-We use hub-based eigenvector centrality as our centrality measure for this plot.
-
-```{code-cell} ipython3
-centrality_71 = qbn_io.eigenvector_centrality(A_71)
-color_list_71 = qbn_io.colorise_weights(centrality_71,beta=False)
-```
-
-Finally we produce the plot.
-
-```{code-cell} ipython3
-fig, ax = plt.subplots(figsize=(10, 12))
-plt.axis("off")
-# Remove self-loops
-for i in range(A_71.shape[0]):
- A_71[i][i] = 0
-qbn_plt.plot_graph(A_71, X_71, ax, codes_71,
- node_size_multiple=0.0005,
- edge_size_multiple=4.0,
- layout_type='spring',
- layout_seed=5432167,
- tol=0.01,
- node_color_list=color_list_71)
-
-if export_figures:
- plt.savefig("figures/input_output_analysis_71.pdf")
-plt.show()
-```
-
-### Network for 114 Australian industry sectors in 2020
-
-Next we load a graph of linkages between 114 Australian sectors in 2020.
-
-```{code-cell} ipython3
-codes_114 = ch2_data['au_sectors_114']['codes']
-A_114 = ch2_data['au_sectors_114']['adjacency_matrix']
-X_114 = ch2_data['au_sectors_114']['total_industry_sales']
-```
-
-Next we calculate our graph's properties.
-
-We use hub-based eigenvector centrality as our centrality measure for this plot.
-
-```{code-cell} ipython3
-centrality_114 = qbn_io.eigenvector_centrality(A_114)
-color_list_114 = qbn_io.colorise_weights(centrality_114,beta=False)
-```
-
-Finally we produce the plot.
-
-```{code-cell} ipython3
-fig, ax = plt.subplots(figsize=(11, 13.2))
-plt.axis("off")
-# Remove self-loops
-for i in range(A_114.shape[0]):
- A_114[i][i] = 0
-qbn_plt.plot_graph(A_114, X_114, ax, codes_114,
- node_size_multiple=0.008,
- edge_size_multiple=5.0,
- layout_type='spring',
- layout_seed=5432167,
- tol=0.03,
- node_color_list=color_list_114)
-
-if export_figures:
- plt.savefig("figures/input_output_analysis_aus_114.pdf")
-plt.show()
-```
-
-### GDP growth rates and std. deviations (in parentheses) for 8 countries
-
-Here we load a `pandas` DataFrame of GDP growth rates.
-
-```{code-cell} ipython3
-gdp_df = ch2_data['gdp_df']
-gdp_df.head()
-```
-
-Now we plot the growth rates and calculate their standard deviations.
-
-```{code-cell} ipython3
-fig, axes = plt.subplots(5, 2, figsize=(8, 9))
-axes = axes.flatten()
-
-countries = gdp_df.columns
-t = np.asarray(gdp_df.index.astype(float))
-series = [np.asarray(gdp_df[country].astype(float)) for country in countries]
-
-
-for ax, country, gdp_data in zip(axes, countries, series):
-
- ax.plot(t, gdp_data)
- ax.set_title(f'{country} (${gdp_data.std():1.2f}$\%)' )
- ax.set_ylabel('\%')
- ax.set_ylim((-12, 14))
-
-plt.tight_layout()
-if export_figures:
- plt.savefig("figures/gdp_growth.pdf")
-plt.show()
-```
diff --git a/code_book/img/betweenness_centrality_1.png b/code_book/img/betweenness_centrality_1.png
deleted file mode 100644
index 6ec8f49..0000000
Binary files a/code_book/img/betweenness_centrality_1.png and /dev/null differ
diff --git a/code_book/img/code.png b/code_book/img/code.png
deleted file mode 100644
index e1820ad..0000000
Binary files a/code_book/img/code.png and /dev/null differ
diff --git a/code_book/intro.md b/code_book/intro.md
deleted file mode 100644
index f6db647..0000000
--- a/code_book/intro.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-substitutions:
- br: |
- ```{eval-rst}
- .. raw:: html
-
-
- ```
----
-# Economic Networks {{ br }} Theory and Computation (Code Book)
-
-**Authors**: [John Stachurski](https://johnstachurski.net/) and [Thomas J. Sargent](http://www.tomsargent.com/)
-
-```{tableofcontents}
-```
\ No newline at end of file
diff --git a/code_book/pkg_funcs.md b/code_book/pkg_funcs.md
deleted file mode 100644
index 2ed0e5d..0000000
--- a/code_book/pkg_funcs.md
+++ /dev/null
@@ -1,359 +0,0 @@
----
-jupytext:
- cell_metadata_filter: -all
- formats: md:myst
- text_representation:
- extension: .md
- format_name: myst
- format_version: 0.13
- jupytext_version: 1.10.3
-kernelspec:
- display_name: Python 3
- language: python
- name: python3
----
-
-# quantecon_book_networks
-
-## input_output
-
-### node_total_exports
-```{code-cell}
-def node_total_exports(G):
- node_exports = []
- for node1 in G.nodes():
- total_export = 0
- for node2 in G[node1]:
- total_export += G[node1][node2]['weight']
- node_exports.append(total_export)
- return node_exports
-```
-
-### node_total_imports
-```{code-cell}
-def node_total_imports(G):
- node_imports = []
- for node1 in G.nodes():
- total_import = 0
- for node2 in G[node1]:
- total_import += G[node2][node1]['weight']
- node_imports.append(total_import)
- return node_imports
-```
-
-### edge_weights
-```{code-cell}
-def edge_weights(G):
- edge_weights = [G[u][v]['weight'] for u,v in G.edges()]
- return edge_weights
-```
-
-### normalise_weights
-```{code-cell}
-def normalise_weights(weights,scalar=1):
- max_value = np.max(weights)
- return [scalar * (weight / max_value) for weight in weights]
-```
-
-### to_zero_one
-```{code-cell}
-def to_zero_one(x):
- "Map vector x to the zero one interval."
- x = np.array(x)
- x_min, x_max = x.min(), x.max()
- return (x - x_min)/(x_max - x_min)
-```
-
-### to_zero_one_beta
-```{code-cell}
-def to_zero_one_beta(x,
- qrange=[0.25, 0.75],
- beta_para=[0.5, 0.5]):
-
- """
- Nonlinearly map vector x to the zero one interval with beta distribution.
- https://en.wikipedia.org/wiki/Beta_distribution
- """
- x = np.array(x)
- x_min, x_max = x.min(), x.max()
- if beta_para != None:
- a, b = beta_para
- return beta.cdf((x - x_min) /(x_max - x_min), a, b)
- else:
- q1, q2 = qrange
- return (x - x_min) * (q2 - q1) /(x_max - x_min) + q1
-```
-
-### colorise_weights
-```{code-cell}
-import matplotlib.cm as cm
-def colorise_weights(weights,beta=True,color_palette=cm.plasma):
- if beta:
- cp = color_palette(to_zero_one_beta(weights))
- else:
- cp = color_palette(to_zero_one(weights))
- return cp
-```
-
-### spec_rad
-```{code-cell}
-def spec_rad(M):
- """
- Compute the spectral radius of M.
- """
- return np.max(np.abs(np.linalg.eigvals(M)))
-```
-
-### adjacency_matrix_to_graph
-```{code-cell}
-def adjacency_matrix_to_graph(A,
- codes,
- tol=0.0): # clip entries below tol
- """
- Build a networkx graph object given an adjacency matrix
- """
- G = nx.DiGraph()
- N = len(A)
-
- # Add nodes
- for i, code in enumerate(codes):
- G.add_node(code, name=code)
-
- # Add the edges
- for i in range(N):
- for j in range(N):
- a = A[i, j]
- if a > tol:
- G.add_edge(codes[i], codes[j], weight=a)
-
- return G
-```
-
-### eigenvector_centrality
-```{code-cell}
-def eigenvector_centrality(A, k=40, authority=False):
- """
- Computes the dominant eigenvector of A. Assumes A is
- primitive and uses the power method.
- """
- A_temp = A.T if authority else A
- n = len(A_temp)
- r = spec_rad(A_temp)
- e = r**(-k) * (np.linalg.matrix_power(A_temp, k) @ np.ones(n))
- return e / np.sum(e)
-```
-
-### katz_centrality
-```{code-cell}
-def katz_centrality(A, b=1, authority=False):
- """
- Computes the Katz centrality of A, defined as the x solving
-
- x = 1 + b A x (1 = vector of ones)
-
- Assumes that A is square.
-
- If authority=True, then A is replaced by its transpose.
- """
- n = len(A)
- I = np.identity(n)
- C = I - b * A.T if authority else I - b * A
- return np.linalg.solve(C, np.ones(n))
-```
-
-### build_unweighted_matrix
-```{code-cell}
-def build_unweighted_matrix(Z, tol=1e-5):
- """
- return a unweighted adjacency matrix
- """
- return 1*(Z>tol)
-```
-
-### erdos_renyi_graph
-```{code-cell}
-def erdos_renyi_graph(n=100, p=0.5, seed=1234):
- "Returns an Erdős-Rényi random graph."
-
- np.random.seed(seed)
- edges = itertools.combinations(range(n), 2)
- G = nx.Graph()
-
- for e in edges:
- if np.random.rand() < p:
- G.add_edge(*e)
- return G
-```
-
-### build_coefficient_matrices
-```{code-cell}
-def build_coefficient_matrices(Z, X):
- """
- Build coefficient matrices A and F from Z and X via
-
- A[i, j] = Z[i, j] / X[j]
- F[i, j] = Z[i, j] / X[i]
-
- """
- A, F = np.empty_like(Z), np.empty_like(Z)
- n = A.shape[0]
- for i in range(n):
- for j in range(n):
- A[i, j] = Z[i, j] / X[j]
- F[i, j] = Z[i, j] / X[i]
-
- return A, F
-```
-
-## plotting
-
-### plot_graph
-```{code-cell}
-def plot_graph(A,
- X,
- ax,
- codes,
- node_color_list=None,
- node_size_multiple=0.0005,
- edge_size_multiple=14,
- layout_type='circular',
- layout_seed=1234,
- tol=0.03): # clip entries below tol
-
- G = nx.DiGraph()
- N = len(A)
-
- # Add nodes, with weights by sales of the sector
- for i, w in enumerate(X):
- G.add_node(codes[i], weight=w, name=codes[i])
-
- node_sizes = X * node_size_multiple
-
- # Position the nodes
- if layout_type == 'circular':
- node_pos_dict = nx.circular_layout(G)
- elif layout_type == 'spring':
- node_pos_dict = nx.spring_layout(G, seed=layout_seed)
- elif layout_type == 'random':
- node_pos_dict = nx.random_layout(G, seed=layout_seed)
- elif layout_type == 'spiral':
- node_pos_dict = nx.spiral_layout(G)
-
- # Add the edges, along with their colors and widths
- edge_colors = []
- edge_widths = []
- for i in range(N):
- for j in range(N):
- a = A[i, j]
- if a > tol:
- G.add_edge(codes[i], codes[j])
- edge_colors.append(node_color_list[i])
- width = a * edge_size_multiple
- edge_widths.append(width)
-
- # Plot the networks
- nx.draw_networkx_nodes(G,
- node_pos_dict,
- node_color=node_color_list,
- node_size=node_sizes,
- edgecolors='grey',
- linewidths=2,
- alpha=0.6,
- ax=ax)
-
- nx.draw_networkx_labels(G,
- node_pos_dict,
- font_size=10,
- ax=ax)
-
- nx.draw_networkx_edges(G,
- node_pos_dict,
- edge_color=edge_colors,
- width=edge_widths,
- arrows=True,
- arrowsize=20,
- alpha=0.6,
- ax=ax,
- arrowstyle='->',
- node_size=node_sizes,
- connectionstyle='arc3,rad=0.15')
-```
-
-### plot_matrices
-```{code-cell}
-def plot_matrices(matrix,
- codes,
- ax,
- font_size=12,
- alpha=0.6,
- colormap=cm.viridis,
- color45d=None,
- xlabel='sector $j$',
- ylabel='sector $i$'):
-
- ticks = range(len(matrix))
-
- levels = np.sqrt(np.linspace(0, 0.75, 100))
-
-
- if color45d != None:
- co = ax.contourf(ticks,
- ticks,
- matrix,
-# levels,
- alpha=alpha, cmap=colormap)
- ax.plot(ticks, ticks, color=color45d)
- else:
- co = ax.contourf(ticks,
- ticks,
- matrix,
- levels,
- alpha=alpha, cmap=colormap)
-
- #plt.colorbar(co)
-
- ax.set_xlabel(xlabel, fontsize=font_size)
- ax.set_ylabel(ylabel, fontsize=font_size)
- ax.set_yticks(ticks)
- ax.set_yticklabels(codes)
- ax.set_xticks(ticks)
- ax.set_xticklabels(codes)
-```
-
-### unit_simplex
-```{code-cell}
-def unit_simplex(angle):
-
- fig = plt.figure(figsize=(10, 8))
- ax = fig.add_subplot(111, projection='3d')
-
- vtx = [[0, 0, 1],
- [0, 1, 0],
- [1, 0, 0]]
-
- tri = Poly3DCollection([vtx], color='darkblue', alpha=0.3)
- tri.set_facecolor([0.5, 0.5, 1])
- ax.add_collection3d(tri)
-
- ax.set(xlim=(0, 1), ylim=(0, 1), zlim=(0, 1),
- xticks=(1,), yticks=(1,), zticks=(1,))
-
- ax.set_xticklabels(['$(1, 0, 0)$'], fontsize=16)
- ax.set_yticklabels([f'$(0, 1, 0)$'], fontsize=16)
- ax.set_zticklabels([f'$(0, 0, 1)$'], fontsize=16)
-
- ax.xaxis.majorTicks[0].set_pad(15)
- ax.yaxis.majorTicks[0].set_pad(15)
- ax.zaxis.majorTicks[0].set_pad(35)
-
- ax.view_init(30, angle)
-
- # Move axis to origin
- ax.xaxis._axinfo['juggled'] = (0, 0, 0)
- ax.yaxis._axinfo['juggled'] = (1, 1, 1)
- ax.zaxis._axinfo['juggled'] = (2, 2, 0)
-
- ax.grid(False)
-
- return ax
-```
\ No newline at end of file
diff --git a/code_book/status.md b/code_book/status.md
deleted file mode 100644
index 56b9450..0000000
--- a/code_book/status.md
+++ /dev/null
@@ -1,17 +0,0 @@
----
-jupytext:
- text_representation:
- extension: .md
- format_name: myst
-kernelspec:
- display_name: Python 3
- language: python
- name: python3
----
-
-# Execution Statistics
-
-This table contains the latest execution statistics.
-
-```{nb-exec-table}
-```
diff --git a/ipynb/appendix.ipynb b/ipynb/appendix.ipynb
new file mode 100644
index 0000000..316c944
--- /dev/null
+++ b/ipynb/appendix.ipynb
@@ -0,0 +1,776 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "35da8276",
+ "metadata": {},
+ "source": [
+ "# Appendix Code\n",
+ "\n",
+ "We begin with some imports"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "46328226",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import numpy as np \n",
+ "import matplotlib.pyplot as plt \n",
+ "import quantecon_book_networks \n",
+ "from mpl_toolkits.mplot3d.axes3d import Axes3D, proj3d \n",
+ "from matplotlib import cm \n",
+ "export_figures = False\n",
+ "quantecon_book_networks.config(\"matplotlib\") "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "483e031e",
+ "metadata": {},
+ "source": [
+ "## Functions\n",
+ "\n",
+ "### One-to-one and onto functions on $(0,1)$\n",
+ "\n",
+ "We start by defining the domain and our one-to-one and onto function examples."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0b617e66",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x = np.linspace(0, 1, 100)\n",
+ "titles = 'one-to-one', 'onto'\n",
+ "labels = '$f(x)=1/2 + x/2$', '$f(x)=4x(1-x)$'\n",
+ "funcs = lambda x: 1/2 + x/2, lambda x: 4 * x * (1-x)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "40b1bb2d",
+ "metadata": {},
+ "source": [
+ "The figure can now be produced as follows."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "12ba8e7b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, axes = plt.subplots(1, 2, figsize=(10, 4))\n",
+ "for f, ax, lb, ti in zip(funcs, axes, labels, titles):\n",
+ " ax.set_xlim(0, 1)\n",
+ " ax.set_ylim(0, 1.01)\n",
+ " ax.plot(x, f(x), label=lb)\n",
+ " ax.set_title(ti, fontsize=12)\n",
+ " ax.legend(loc='lower center', fontsize=12, frameon=False)\n",
+ " ax.set_xticks((0, 1))\n",
+ " ax.set_yticks((0, 1))\n",
+ "\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/func_types_1.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "fe7452ac",
+ "metadata": {},
+ "source": [
+ "### Some functions are bijections\n",
+ "\n",
+ "This figure can be produced in a similar manner to 6.1."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5907691a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x = np.linspace(0, 1, 100)\n",
+ "titles = 'constant', 'bijection'\n",
+ "labels = '$f(x)=1/2$', '$f(x)=1-x$'\n",
+ "funcs = lambda x: 1/2 + 0 * x, lambda x: 1-x\n",
+ "\n",
+ "fig, axes = plt.subplots(1, 2, figsize=(10, 4))\n",
+ "for f, ax, lb, ti in zip(funcs, axes, labels, titles):\n",
+ " ax.set_xlim(0, 1)\n",
+ " ax.set_ylim(0, 1.01)\n",
+ " ax.plot(x, f(x), label=lb)\n",
+ " ax.set_title(ti, fontsize=12)\n",
+ " ax.legend(loc='lower center', fontsize=12, frameon=False)\n",
+ " ax.set_xticks((0, 1))\n",
+ " ax.set_yticks((0, 1))\n",
+ " \n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/func_types_2.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "bbfde515",
+ "metadata": {},
+ "source": [
+ "## Fixed Points\n",
+ "\n",
+ "### Graph and fixed points of $G \\colon x \\mapsto 2.125/(1 + x^{-4})$\n",
+ "\n",
+ "We begin by defining the domain and the function."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3110e5cd",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "xmin, xmax = 0.0000001, 2\n",
+ "xgrid = np.linspace(xmin, xmax, 200)\n",
+ "g = lambda x: 2.125 / (1 + x**(-4))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "6532632d",
+ "metadata": {},
+ "source": [
+ "Next we define our fixed points."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3b9ef820",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fps_labels = ('$x_\\ell$', '$x_m$', '$x_h$' )\n",
+ "fps = (0.01, 0.94, 1.98)\n",
+ "coords = ((40, 80), (40, -40), (-40, -80))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "87f4c5c2",
+ "metadata": {},
+ "source": [
+ "Finally we can produce the figure."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f8f4567d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots(figsize=(6.5, 6))\n",
+ "\n",
+ "ax.set_xlim(xmin, xmax)\n",
+ "ax.set_ylim(xmin, xmax)\n",
+ "\n",
+ "ax.plot(xgrid, g(xgrid), 'b-', lw=2, alpha=0.6, label='$G$')\n",
+ "ax.plot(xgrid, xgrid, 'k-', lw=1, alpha=0.7, label='$45^o$')\n",
+ "\n",
+ "ax.legend(fontsize=14)\n",
+ "\n",
+ "ax.plot(fps, fps, 'ro', ms=8, alpha=0.6)\n",
+ "\n",
+ "for (fp, lb, coord) in zip(fps, fps_labels, coords):\n",
+ " ax.annotate(lb, \n",
+ " xy=(fp, fp),\n",
+ " xycoords='data',\n",
+ " xytext=coord,\n",
+ " textcoords='offset points',\n",
+ " fontsize=16,\n",
+ " arrowprops=dict(arrowstyle=\"->\"))\n",
+ "\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/three_fixed_points.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b6b47191",
+ "metadata": {},
+ "source": [
+ "## Complex Numbers\n",
+ "\n",
+ "### The complex number $(a, b) = r e^{i \\phi}$\n",
+ "\n",
+ "We start by abbreviating some useful values and functions."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ee845f66",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "π = np.pi\n",
+ "zeros = np.zeros\n",
+ "ones = np.ones\n",
+ "fs = 18"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4a40f91e",
+ "metadata": {},
+ "source": [
+ "Next we set our parameters."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "fa26afd6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "r = 2\n",
+ "φ = π/3\n",
+ "x = r * np.cos(φ)\n",
+ "x_range = np.linspace(0, x, 1000)\n",
+ "φ_range = np.linspace(0, φ, 1000)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d55d19ec",
+ "metadata": {},
+ "source": [
+ "Finally we produce the plot."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7036969e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig = plt.figure(figsize=(7, 7))\n",
+ "ax = plt.subplot(111, projection='polar')\n",
+ "\n",
+ "ax.plot((0, φ), (0, r), marker='o', color='b', alpha=0.5) # Plot r\n",
+ "ax.plot(zeros(x_range.shape), x_range, color='b', alpha=0.5) # Plot x\n",
+ "ax.plot(φ_range, x / np.cos(φ_range), color='b', alpha=0.5) # Plot y\n",
+ "ax.plot(φ_range, ones(φ_range.shape) * 0.15, color='green') # Plot φ\n",
+ "\n",
+ "ax.margins(0) # Let the plot starts at origin\n",
+ "\n",
+ "ax.set_rmax(2)\n",
+ "ax.set_rticks((1, 2)) # Less radial ticks\n",
+ "ax.set_rlabel_position(-88.5) # Get radial labels away from plotted line\n",
+ "\n",
+ "ax.text(φ, r+0.04 , r'$(a, b) = (1, \\sqrt{3})$', fontsize=fs) # Label z\n",
+ "ax.text(φ+0.4, 1 , '$r = 2$', fontsize=fs) # Label r\n",
+ "ax.text(0-0.4, 0.5, '$1$', fontsize=fs) # Label x\n",
+ "ax.text(0.5, 1.2, '$\\sqrt{3}$', fontsize=fs) # Label y\n",
+ "ax.text(0.3, 0.25, '$\\\\varphi = \\\\pi/3$', fontsize=fs) # Label θ\n",
+ "\n",
+ "xT=plt.xticks()[0]\n",
+ "xL=['0',\n",
+ " r'$\\frac{\\pi}{4}$',\n",
+ " r'$\\frac{\\pi}{2}$',\n",
+ " r'$\\frac{3\\pi}{4}$',\n",
+ " r'$\\pi$',\n",
+ " r'$\\frac{5\\pi}{4}$',\n",
+ " r'$\\frac{3\\pi}{2}$',\n",
+ " r'$\\frac{7\\pi}{4}$']\n",
+ "\n",
+ "plt.xticks(xT, xL, fontsize=fs+2)\n",
+ "ax.grid(True)\n",
+ "\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/complex_number.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "418ac96b",
+ "metadata": {},
+ "source": [
+ "## Convergence\n",
+ "\n",
+ "### Convergence of a sequence to the origin in $\\mathbb{R}^3$\n",
+ "\n",
+ "We define our transformation matrix, initial point, and number of iterations."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ebbf6747",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "θ = 0.1\n",
+ "A = ((np.cos(θ), - np.sin(θ), 0.0001),\n",
+ " (np.sin(θ), np.cos(θ), 0.001),\n",
+ " (np.sin(θ), np.cos(θ), 1))\n",
+ "\n",
+ "A = 0.98 * np.array(A)\n",
+ "p = np.array((1, 1, 1))\n",
+ "n = 200"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "815fed7d",
+ "metadata": {},
+ "source": [
+ "Now we can produce the plot by repeatedly transforming our point with the transformation matrix and plotting each resulting point."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "14e5a9a7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig = plt.figure(figsize=(6, 6))\n",
+ "ax = fig.add_subplot(111, projection='3d')\n",
+ "ax.view_init(elev=20, azim=-40)\n",
+ "\n",
+ "ax.set_xlim((-1.5, 1.5))\n",
+ "ax.set_ylim((-1.5, 1.5))\n",
+ "ax.set_xticks((-1,0,1))\n",
+ "ax.set_yticks((-1,0,1))\n",
+ "\n",
+ "for i in range(n):\n",
+ " x, y, z = p\n",
+ " ax.plot([x], [y], [z], 'o', ms=4, color=cm.jet_r(i / n))\n",
+ " p = A @ p\n",
+ " \n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/euclidean_convergence_1.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8804299c",
+ "metadata": {},
+ "source": [
+ "## Linear Algebra\n",
+ "\n",
+ "### The span of vectors $u$, $v$, $w$ in $\\mathbb{R}$\n",
+ "\n",
+ "We begin by importing the `FancyArrowPatch` class and extending it."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "07b47dce",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from matplotlib.patches import FancyArrowPatch\n",
+ "\n",
+ "class Arrow3D(FancyArrowPatch):\n",
+ " def __init__(self, xs, ys, zs, *args, **kwargs):\n",
+ " FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)\n",
+ " self._verts3d = xs, ys, zs\n",
+ "\n",
+ " def do_3d_projection(self, renderer=None):\n",
+ " xs3d, ys3d, zs3d = self._verts3d\n",
+ " xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, self.axes.M)\n",
+ " self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))\n",
+ "\n",
+ " return np.min(zs)\n",
+ "\n",
+ " def draw(self, renderer):\n",
+ " xs3d, ys3d, zs3d = self._verts3d\n",
+ " xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, self.axes.M)\n",
+ " self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))\n",
+ " FancyArrowPatch.draw(self, renderer)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2a087a52",
+ "metadata": {},
+ "source": [
+ "Next we generate our vectors $u$, $v$, $w$, ensuring linear dependence."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ced11ab1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "α, β = 0.2, 0.1\n",
+ "def f(x, y):\n",
+ " return α * x + β * y\n",
+ "\n",
+ "# Vector locations, by coordinate\n",
+ "x_coords = np.array((3, 3, -3.5))\n",
+ "y_coords = np.array((4, -4, 3.0))\n",
+ "z_coords = f(x_coords, y_coords)\n",
+ "\n",
+ "vecs = [np.array((x, y, z)) for x, y, z in zip(x_coords, y_coords, z_coords)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "165912ee",
+ "metadata": {},
+ "source": [
+ "Next we define the spanning plane."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "97870984",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x_min, x_max = -5, 5\n",
+ "y_min, y_max = -5, 5\n",
+ "\n",
+ "grid_size = 20\n",
+ "xr2 = np.linspace(x_min, x_max, grid_size)\n",
+ "yr2 = np.linspace(y_min, y_max, grid_size)\n",
+ "x2, y2 = np.meshgrid(xr2, yr2)\n",
+ "z2 = f(x2, y2)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "be62269c",
+ "metadata": {},
+ "source": [
+ "Finally we generate the plot."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "49b4eff0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig = plt.figure(figsize=(12, 7))\n",
+ "ax = plt.axes(projection ='3d')\n",
+ "ax.view_init(elev=10., azim=-80)\n",
+ "\n",
+ "ax.set(xlim=(x_min, x_max), \n",
+ " ylim=(x_min, x_max), \n",
+ " zlim=(x_min, x_max),\n",
+ " xticks=(0,), yticks=(0,), zticks=(0,))\n",
+ "\n",
+ "# Draw the axes\n",
+ "gs = 3\n",
+ "z = np.linspace(x_min, x_max, gs)\n",
+ "x = np.zeros(gs)\n",
+ "y = np.zeros(gs)\n",
+ "ax.plot(x, y, z, 'k-', lw=2, alpha=0.5)\n",
+ "ax.plot(z, x, y, 'k-', lw=2, alpha=0.5)\n",
+ "ax.plot(y, z, x, 'k-', lw=2, alpha=0.5)\n",
+ "\n",
+ "# Draw the vectors\n",
+ "for v in vecs:\n",
+ " a = Arrow3D([0, v[0]], \n",
+ " [0, v[1]], \n",
+ " [0, v[2]], \n",
+ " mutation_scale=20, \n",
+ " lw=1, \n",
+ " arrowstyle=\"-|>\", \n",
+ " color=\"b\")\n",
+ " ax.add_artist(a)\n",
+ "\n",
+ "\n",
+ "for v, label in zip(vecs, ('u', 'v', 'w')):\n",
+ " v = v * 1.1\n",
+ " ax.text(v[0], v[1], v[2], \n",
+ " f'${label}$', \n",
+ " fontsize=14)\n",
+ "\n",
+ "# Draw the plane\n",
+ "grid_size = 20\n",
+ "xr2 = np.linspace(x_min, x_max, grid_size)\n",
+ "yr2 = np.linspace(y_min, y_max, grid_size)\n",
+ "x2, y2 = np.meshgrid(xr2, yr2)\n",
+ "z2 = f(x2, y2)\n",
+ "\n",
+ "ax.plot_surface(x2, y2, z2, rstride=1, cstride=1, cmap=cm.jet,\n",
+ " linewidth=0, antialiased=True, alpha=0.2)\n",
+ "\n",
+ "\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/span1.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b87b7088",
+ "metadata": {},
+ "source": [
+ "## Linear Maps Are Matrices\n",
+ "\n",
+ "### Equivalence of the onto and one-to-one properties (for linear maps)\n",
+ "\n",
+ "This plot is produced similarly to Figures 6.1 and 6.2."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a243b6e1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, axes = plt.subplots(1, 2, figsize=(10, 4))\n",
+ "\n",
+ "x = np.linspace(-2, 2, 10)\n",
+ "\n",
+ "titles = 'non-bijection', 'bijection'\n",
+ "labels = '$f(x)=0 x$', '$f(x)=0.5 x$'\n",
+ "funcs = lambda x: 0*x, lambda x: 0.5 * x\n",
+ "\n",
+ "for ax, f, lb, ti in zip(axes, funcs, labels, titles):\n",
+ "\n",
+ " # Set the axes through the origin\n",
+ " for spine in ['left', 'bottom']:\n",
+ " ax.spines[spine].set_position('zero')\n",
+ " for spine in ['right', 'top']:\n",
+ " ax.spines[spine].set_color('none')\n",
+ " ax.set_yticks((-1, 1))\n",
+ " ax.set_ylim((-1, 1))\n",
+ " ax.set_xlim((-1, 1))\n",
+ " ax.set_xticks((-1, 1))\n",
+ " y = f(x)\n",
+ " ax.plot(x, y, '-', linewidth=4, label=lb, alpha=0.6)\n",
+ " ax.text(-0.8, 0.5, ti, fontsize=14)\n",
+ " ax.legend(loc='lower right', fontsize=12)\n",
+ " \n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/func_types_3.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e3aa06c3",
+ "metadata": {},
+ "source": [
+ "## Convexity and Polyhedra\n",
+ "\n",
+ "### A polyhedron $P$ represented as intersecting halfspaces\n",
+ "\n",
+ "Inequalities are of the form\n",
+ "\n",
+ "$$ a x + b y \\leq c $$\n",
+ "\n",
+ "To plot the halfspace we plot the line\n",
+ "\n",
+ "$$ y = c/b - a/b x $$\n",
+ "\n",
+ "and then fill in the halfspace using `fill_between` on points $x, y, \\hat y$,\n",
+ "where $\\hat y$ is either `y_min` or `y_max`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ae4e8b4d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots()\n",
+ "plt.axis('off')\n",
+ "\n",
+ "x = np.linspace(-10, 14, 200)\n",
+ "y_min, y_max = -2, 3\n",
+ "\n",
+ "a1, b1, c1 = 1.0, 8.0, -5.0\n",
+ "y = c1 / b1 - (a1 / b1) * x\n",
+ "ax.plot(x, y, label='$a_1 x_1 + b_1 x_2 = c_1$')\n",
+ "ax.fill_between(x, y, y_max, alpha=0.2)\n",
+ "\n",
+ "\n",
+ "a2, b2, c2 = 0.5, 0.75, 1.5\n",
+ "y = c2 / b2 - (a2 / b2) * x\n",
+ "ax.plot(x, y, label='$a_2 x_1 + b_2 x_2 = c_2$')\n",
+ "ax.fill_between(x, y, y_min, alpha=0.2)\n",
+ "\n",
+ "\n",
+ "a3, b3, c3 = -1.0, 1.0, 1.5\n",
+ "y = c3 / b3 - (a3 / b3) * x\n",
+ "ax.plot(x, y, label='$a_3 x_1 + b_3 x_2 = c_3$')\n",
+ "ax.fill_between(x, y, y_min, alpha=0.2)\n",
+ "\n",
+ "ax.plot((0.23,), (1.82,), 'ko')\n",
+ "ax.plot((-1.95,), (-0.4,), 'ko')\n",
+ "ax.plot((4.8,), (-1.2,), 'ko')\n",
+ "\n",
+ "\n",
+ "ax.annotate('$P$', xy=(0, 0), fontsize=12)\n",
+ "\n",
+ "ax.set_ylim(y_min, y_max)\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/polyhedron1.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "45b13eed",
+ "metadata": {},
+ "source": [
+ "## Saddle Points and Duality"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "53bdce43",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig = plt.figure(figsize=(8.5, 6))\n",
+ "\n",
+ "## Top left Plot\n",
+ "\n",
+ "ax = fig.add_subplot(221, projection='3d')\n",
+ "\n",
+ "plot_args = {'rstride': 1, 'cstride': 1, 'cmap':\"viridis\",\n",
+ " 'linewidth': 0.4, 'antialiased': True, \"alpha\":0.75,\n",
+ " 'vmin': -1, 'vmax': 1}\n",
+ "\n",
+ "x, y = np.mgrid[-1:1:31j, -1:1:31j]\n",
+ "z = x**2 - y**2\n",
+ "\n",
+ "ax.plot_surface(x, y, z, **plot_args)\n",
+ "\n",
+ "ax.view_init(azim=245, elev=20)\n",
+ "\n",
+ "ax.set_xlim(-1, 1)\n",
+ "ax.set_ylim(-1, 1)\n",
+ "ax.set_zlim(-1, 1)\n",
+ "\n",
+ "ax.set_xticks([0])\n",
+ "ax.set_xticklabels([r\"$x^*$\"], fontsize=16)\n",
+ "ax.set_yticks([0])\n",
+ "ax.set_yticklabels([r\"$\\theta^*$\"], fontsize=16)\n",
+ "\n",
+ "\n",
+ "ax.set_zticks([])\n",
+ "ax.set_zticklabels([])\n",
+ "\n",
+ "ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) \n",
+ "ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0)) \n",
+ "ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n",
+ "\n",
+ "ax.set_zlabel(\"$L(x,\\\\theta)$\", fontsize=14)\n",
+ "\n",
+ "## Top Right Plot\n",
+ "\n",
+ "ax = fig.add_subplot(222)\n",
+ "\n",
+ "\n",
+ "plot_args = {'cmap':\"viridis\", 'antialiased': True, \"alpha\":0.6,\n",
+ " 'vmin': -1, 'vmax': 1}\n",
+ "\n",
+ "x, y = np.mgrid[-1:1:31j, -1:1:31j]\n",
+ "z = x**2 - y**2\n",
+ "\n",
+ "ax.contourf(x, y, z, **plot_args)\n",
+ "\n",
+ "ax.plot([0], [0], 'ko')\n",
+ "\n",
+ "ax.set_xlim(-1, 1)\n",
+ "ax.set_ylim(-1, 1)\n",
+ "\n",
+ "plt.xticks([ 0],\n",
+ " [r\"$x^*$\"], fontsize=16)\n",
+ "\n",
+ "plt.yticks([0],\n",
+ " [r\"$\\theta^*$\"], fontsize=16)\n",
+ "\n",
+ "ax.hlines(0, -1, 1, color='k', ls='-', lw=1)\n",
+ "ax.vlines(0, -1, 1, color='k', ls='-', lw=1)\n",
+ "\n",
+ "coords=(-35, 30)\n",
+ "ax.annotate(r'$L(x, \\theta^*)$', \n",
+ " xy=(-0.5, 0), \n",
+ " xycoords=\"data\",\n",
+ " xytext=coords,\n",
+ " textcoords=\"offset points\",\n",
+ " fontsize=12,\n",
+ " arrowprops={\"arrowstyle\" : \"->\"})\n",
+ "\n",
+ "coords=(35, 30)\n",
+ "ax.annotate(r'$L(x^*, \\theta)$', \n",
+ " xy=(0, 0.25), \n",
+ " xycoords=\"data\",\n",
+ " xytext=coords,\n",
+ " textcoords=\"offset points\",\n",
+ " fontsize=12,\n",
+ " arrowprops={\"arrowstyle\" : \"->\"})\n",
+ "\n",
+ "## Bottom Left Plot\n",
+ "\n",
+ "ax = fig.add_subplot(223)\n",
+ "\n",
+ "x = np.linspace(-1, 1, 100)\n",
+ "ax.plot(x, -x**2, label='$\\\\theta \\mapsto L(x^*, \\\\theta)$')\n",
+ "ax.set_ylim((-1, 1))\n",
+ "ax.legend(fontsize=14)\n",
+ "ax.set_xticks([])\n",
+ "ax.set_yticks([])\n",
+ "\n",
+ "## Bottom Right Plot\n",
+ "\n",
+ "ax = fig.add_subplot(224)\n",
+ "\n",
+ "x = np.linspace(-1, 1, 100)\n",
+ "ax.plot(x, x**2, label='$x \\mapsto L(x, \\\\theta^*)$')\n",
+ "ax.set_ylim((-1, 1))\n",
+ "ax.legend(fontsize=14, loc='lower right')\n",
+ "ax.set_xticks([])\n",
+ "ax.set_yticks([])\n",
+ "\n",
+ "if export_figures:\n",
+ "\tplt.savefig(\"figures/saddle_1.pdf\")\n",
+ "plt.show()"
+ ]
+ }
+ ],
+ "metadata": {
+ "jupytext": {
+ "cell_metadata_filter": "-all"
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/ipynb/ch_fpms.ipynb b/ipynb/ch_fpms.ipynb
new file mode 100644
index 0000000..8327586
--- /dev/null
+++ b/ipynb/ch_fpms.ipynb
@@ -0,0 +1,273 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "ab032fde",
+ "metadata": {},
+ "source": [
+ "# Chapter 5 - Nonlinear Interactions (Python Code)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f0b02209",
+ "metadata": {
+ "tags": [
+ "hide-output"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "! pip install --upgrade quantecon_book_networks"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7d82d322",
+ "metadata": {},
+ "source": [
+ "We begin with some imports"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "be38b591",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import quantecon as qe\n",
+ "import quantecon_book_networks\n",
+ "import quantecon_book_networks.input_output as qbn_io\n",
+ "import quantecon_book_networks.plotting as qbn_plt\n",
+ "import quantecon_book_networks.data as qbn_data\n",
+ "export_figures = False"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e481b4fb",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import numpy as np\n",
+ "import networkx as nx\n",
+ "import matplotlib.pyplot as plt\n",
+ "from matplotlib import cm\n",
+ "quantecon_book_networks.config(\"matplotlib\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7f027495",
+ "metadata": {},
+ "source": [
+ "## Financial Networks\n",
+ "\n",
+ "### Equity-Cross Holdings\n",
+ "\n",
+ "Here we define a class for modelling a financial network where firms are linked by share cross-holdings,\n",
+ "and there are failure costs as described by [Elliott et al. (2014)](https://www.aeaweb.org/articles?id=10.1257/aer.104.10.3115)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "58e02f09",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class FinNet:\n",
+ " \n",
+ " def __init__(self, n=100, c=0.72, d=1, θ=0.5, β=1.0, seed=1234):\n",
+ " \n",
+ " self.n, self.c, self.d, self.θ, self.β = n, c, d, θ, β\n",
+ " np.random.seed(seed)\n",
+ " \n",
+ " self.e = np.ones(n)\n",
+ " self.C, self.C_hat = self.generate_primitives()\n",
+ " self.A = self.C_hat @ np.linalg.inv(np.identity(n) - self.C)\n",
+ " self.v_bar = self.A @ self.e\n",
+ " self.t = np.full(n, θ)\n",
+ " \n",
+ " def generate_primitives(self):\n",
+ " \n",
+ " n, c, d = self.n, self.c, self.d\n",
+ " B = np.zeros((n, n))\n",
+ " C = np.zeros_like(B)\n",
+ "\n",
+ " for i in range(n):\n",
+ " for j in range(n):\n",
+ " if i != j and np.random.rand() < d/(n-1):\n",
+ " B[i,j] = 1\n",
+ " \n",
+ " for i in range(n):\n",
+ " for j in range(n):\n",
+ " k = np.sum(B[:,j])\n",
+ " if k > 0:\n",
+ " C[i,j] = c * B[i,j] / k\n",
+ " \n",
+ " C_hat = np.identity(n) * (1 - c)\n",
+ " \n",
+ " return C, C_hat\n",
+ " \n",
+ " def T(self, v):\n",
+ " Tv = self.A @ (self.e - self.β * np.where(v < self.t, 1, 0))\n",
+ " return Tv\n",
+ " \n",
+ " def compute_equilibrium(self):\n",
+ " i = 0\n",
+ " v = self.v_bar\n",
+ " error = 1\n",
+ " while error > 1e-10:\n",
+ " print(f\"number of failing firms is \", np.sum(v < self.θ))\n",
+ " new_v = self.T(v)\n",
+ " error = np.max(np.abs(new_v - v))\n",
+ " v = new_v\n",
+ " i = i+1\n",
+ " \n",
+ " print(f\"Terminated after {i} iterations\")\n",
+ " return v\n",
+ " \n",
+ " def map_values_to_colors(self, v, j):\n",
+ " cols = cm.plasma(qbn_io.to_zero_one(v))\n",
+ " if j != 0:\n",
+ " for i in range(len(v)):\n",
+ " if v[i] < self.t[i]:\n",
+ " cols[i] = 0.0\n",
+ " return cols"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "dbf9b681",
+ "metadata": {},
+ "source": [
+ "Now we create a financial network."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "663e7992",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fn = FinNet(n=100, c=0.72, d=1, θ=0.3, β=1.0)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c89db6a0",
+ "metadata": {},
+ "source": [
+ "And compute its equilibrium."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "81efad24",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fn.compute_equilibrium()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "25a95eb9",
+ "metadata": {},
+ "source": [
+ "### Waves of bankruptcies in a financial network\n",
+ "\n",
+ "Now we visualise the network after different numbers of iterations. \n",
+ "\n",
+ "For convenience we will first define a function to plot the graphs of the financial network."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ff6c8d54",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def plot_fin_graph(G, ax, node_color_list):\n",
+ " \n",
+ " n = G.number_of_nodes()\n",
+ "\n",
+ " node_pos_dict = nx.spring_layout(G, k=1.1)\n",
+ " edge_colors = []\n",
+ "\n",
+ " for i in range(n):\n",
+ " for j in range(n):\n",
+ " edge_colors.append(node_color_list[i])\n",
+ "\n",
+ " \n",
+ " nx.draw_networkx_nodes(G, \n",
+ " node_pos_dict, \n",
+ " node_color=node_color_list, \n",
+ " edgecolors='grey', \n",
+ " node_size=100,\n",
+ " linewidths=2, \n",
+ " alpha=0.8, \n",
+ " ax=ax)\n",
+ "\n",
+ " nx.draw_networkx_edges(G, \n",
+ " node_pos_dict, \n",
+ " edge_color=edge_colors, \n",
+ " alpha=0.4, \n",
+ " ax=ax)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "bd11cd19",
+ "metadata": {},
+ "source": [
+ "Now we will iterate by applying the operator $T$ to the vector of firm values $v$ and produce the plots."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c4e15ff6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "G = nx.from_numpy_array(np.array(fn.C), create_using=nx.DiGraph)\n",
+ "v = fn.v_bar\n",
+ "\n",
+ "k = 15\n",
+ "d = 3\n",
+ "fig, axes = plt.subplots(int(k/d), 1, figsize=(10, 12))\n",
+ "\n",
+ "for i in range(k):\n",
+ " if i % d == 0:\n",
+ " ax = axes[int(i/d)]\n",
+ " ax.set_title(f\"iteration {i}\")\n",
+ "\n",
+ " plot_fin_graph(G, ax, fn.map_values_to_colors(v, i))\n",
+ " v = fn.T(v)\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/fin_network_sims_1.pdf\")\n",
+ "plt.show()"
+ ]
+ }
+ ],
+ "metadata": {
+ "jupytext": {
+ "cell_metadata_filter": "-all"
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/ipynb/ch_intro.ipynb b/ipynb/ch_intro.ipynb
new file mode 100644
index 0000000..a796306
--- /dev/null
+++ b/ipynb/ch_intro.ipynb
@@ -0,0 +1,1399 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "5175177c",
+ "metadata": {},
+ "source": [
+ "# Chapter 1 - Introduction (Python Code)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0e0179da",
+ "metadata": {
+ "tags": [
+ "hide-output"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "! pip install --upgrade quantecon_book_networks kaleido"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "36c17bdf",
+ "metadata": {},
+ "source": [
+ "We begin by importing the `quantecon` package as well as some functions and data that have been packaged for release with this text."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "cf584c62",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import quantecon as qe\n",
+ "import quantecon_book_networks\n",
+ "import quantecon_book_networks.input_output as qbn_io\n",
+ "import quantecon_book_networks.data as qbn_data\n",
+ "import quantecon_book_networks.plotting as qbn_plot\n",
+ "ch1_data = qbn_data.introduction()\n",
+ "default_figsize = (6, 4)\n",
+ "export_figures = False"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "858f6c0d",
+ "metadata": {},
+ "source": [
+ "Next we import some common python libraries."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "622fff13",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import networkx as nx\n",
+ "import statsmodels.api as sm\n",
+ "import matplotlib.pyplot as plt\n",
+ "import matplotlib.cm as cm\n",
+ "import matplotlib.ticker as ticker\n",
+ "from mpl_toolkits.mplot3d.art3d import Poly3DCollection\n",
+ "import matplotlib.patches as mpatches\n",
+ "import plotly.graph_objects as go\n",
+ "quantecon_book_networks.config(\"matplotlib\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "0ee79c32",
+ "metadata": {},
+ "source": [
+ "## Motivation\n",
+ "\n",
+ "### International trade in crude oil 2021\n",
+ "\n",
+ "We begin by loading a `NetworkX` directed graph object that represents international trade in crude oil."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7e495966",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "DG = ch1_data[\"crude_oil\"]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "284d4d87",
+ "metadata": {},
+ "source": [
+ "Next we transform the data to prepare it for display as a Sankey diagram."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0c944e03",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "nodeid = {}\n",
+ "for ix,nd in enumerate(DG.nodes()):\n",
+ " nodeid[nd] = ix\n",
+ "\n",
+ "# Links\n",
+ "source = []\n",
+ "target = []\n",
+ "value = []\n",
+ "for src,tgt in DG.edges():\n",
+ " source.append(nodeid[src])\n",
+ " target.append(nodeid[tgt])\n",
+ " value.append(DG[src][tgt]['weight'])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2cd4537f",
+ "metadata": {},
+ "source": [
+ "Finally we produce our plot."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f6a9a856",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig = go.Figure(data=[go.Sankey(\n",
+ " node = dict(\n",
+ " pad = 15,\n",
+ " thickness = 20,\n",
+ " line = dict(color = \"black\", width = 0.5),\n",
+ " label = list(nodeid.keys()),\n",
+ " color = \"blue\"\n",
+ " ),\n",
+ " link = dict(\n",
+ " source = source,\n",
+ " target = target,\n",
+ " value = value\n",
+ " ))])\n",
+ "\n",
+ "\n",
+ "fig.update_layout(title_text=\"Crude Oil\", font_size=10, width=600, height=800)\n",
+ "if export_figures:\n",
+ " fig.write_image(\"figures/crude_oil_2021.pdf\")\n",
+ "fig.show(renderer='svg')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "663c1374",
+ "metadata": {},
+ "source": [
+ "### International trade in commercial aircraft during 2019\n",
+ "\n",
+ "For this plot we will use a cleaned dataset from \n",
+ "[Harvard, CID Dataverse](https://dataverse.harvard.edu/dataverse/atlas)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "31cf9ca7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "DG = ch1_data['aircraft_network']\n",
+ "pos = ch1_data['aircraft_network_pos']"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "990418cb",
+ "metadata": {},
+ "source": [
+ "We begin by calculating some features of our graph using the `NetworkX` and\n",
+ "the `quantecon_book_networks` packages."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4da4ce46",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "centrality = nx.eigenvector_centrality(DG)\n",
+ "node_total_exports = qbn_io.node_total_exports(DG)\n",
+ "edge_weights = qbn_io.edge_weights(DG)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ead51019",
+ "metadata": {},
+ "source": [
+ "Now we convert our graph features to plot features."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f9be3605",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "node_pos_dict = pos\n",
+ "\n",
+ "node_sizes = qbn_io.normalise_weights(node_total_exports,10000)\n",
+ "edge_widths = qbn_io.normalise_weights(edge_weights,10)\n",
+ "\n",
+ "node_colors = qbn_io.colorise_weights(list(centrality.values()),color_palette=cm.viridis)\n",
+ "node_to_color = dict(zip(DG.nodes,node_colors))\n",
+ "edge_colors = []\n",
+ "for src,_ in DG.edges:\n",
+ " edge_colors.append(node_to_color[src])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b44e9249",
+ "metadata": {},
+ "source": [
+ "Finally we produce the plot."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5dfccd9c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots(figsize=(10, 10))\n",
+ "ax.axis('off')\n",
+ "\n",
+ "nx.draw_networkx_nodes(DG, \n",
+ " node_pos_dict, \n",
+ " node_color=node_colors, \n",
+ " node_size=node_sizes, \n",
+ " linewidths=2, \n",
+ " alpha=0.6, \n",
+ " ax=ax)\n",
+ "\n",
+ "nx.draw_networkx_labels(DG, \n",
+ " node_pos_dict, \n",
+ " ax=ax)\n",
+ "\n",
+ "nx.draw_networkx_edges(DG, \n",
+ " node_pos_dict, \n",
+ " edge_color=edge_colors, \n",
+ " width=edge_widths, \n",
+ " arrows=True, \n",
+ " arrowsize=20, \n",
+ " ax=ax,\n",
+ " node_size=node_sizes, \n",
+ " connectionstyle='arc3,rad=0.15')\n",
+ "\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/commercial_aircraft_2019_1.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "70e419f0",
+ "metadata": {},
+ "source": [
+ "## Spectral Theory\n",
+ "\n",
+ "### Spectral Radii\n",
+ "\n",
+ "Here we provide code for computing the spectral radius of a matrix."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d0ac0f17",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def spec_rad(M):\n",
+ " \"\"\"\n",
+ " Compute the spectral radius of M.\n",
+ " \"\"\"\n",
+ " return np.max(np.abs(np.linalg.eigvals(M)))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0bdca499",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "M = np.array([[1,2],[2,1]])\n",
+ "spec_rad(M)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f4fa90a5",
+ "metadata": {},
+ "source": [
+ "This function is available in the `quantecon_book_networks` package, along with \n",
+ "several other functions for used repeatedly in the text. Source code for\n",
+ "these functions can be seen [here](pkg_funcs)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2a1e4cba",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "qbn_io.spec_rad(M)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9ac85796",
+ "metadata": {},
+ "source": [
+ "## Probability\n",
+ "\n",
+ "### The unit simplex in $\\mathbb{R}^3$\n",
+ "\n",
+ "Here we define a function for plotting the unit simplex."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a661d596",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def unit_simplex(angle):\n",
+ " \n",
+ " fig = plt.figure(figsize=(10, 8))\n",
+ " ax = fig.add_subplot(111, projection='3d')\n",
+ "\n",
+ " vtx = [[0, 0, 1],\n",
+ " [0, 1, 0], \n",
+ " [1, 0, 0]]\n",
+ " \n",
+ " tri = Poly3DCollection([vtx], color='darkblue', alpha=0.3)\n",
+ " tri.set_facecolor([0.5, 0.5, 1])\n",
+ " ax.add_collection3d(tri)\n",
+ "\n",
+ " ax.set(xlim=(0, 1), ylim=(0, 1), zlim=(0, 1), \n",
+ " xticks=(1,), yticks=(1,), zticks=(1,))\n",
+ "\n",
+ " ax.set_xticklabels(['$(1, 0, 0)$'], fontsize=16)\n",
+ " ax.set_yticklabels([f'$(0, 1, 0)$'], fontsize=16)\n",
+ " ax.set_zticklabels([f'$(0, 0, 1)$'], fontsize=16)\n",
+ "\n",
+ " ax.xaxis.majorTicks[0].set_pad(15)\n",
+ " ax.yaxis.majorTicks[0].set_pad(15)\n",
+ " ax.zaxis.majorTicks[0].set_pad(35)\n",
+ "\n",
+ " ax.view_init(30, angle)\n",
+ "\n",
+ " # Move axis to origin\n",
+ " ax.xaxis._axinfo['juggled'] = (0, 0, 0)\n",
+ " ax.yaxis._axinfo['juggled'] = (1, 1, 1)\n",
+ " ax.zaxis._axinfo['juggled'] = (2, 2, 0)\n",
+ " \n",
+ " ax.grid(False) \n",
+ " \n",
+ " return ax"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "33c4089d",
+ "metadata": {},
+ "source": [
+ "We can now produce the plot."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "337d3792",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "unit_simplex(50)\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/simplex_1.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8086d97f",
+ "metadata": {},
+ "source": [
+ "### Independent draws from Student’s t and Normal distributions\n",
+ "\n",
+ "Here we illustrate the occurrence of \"extreme\" events in heavy tailed distributions. \n",
+ "We start by generating 1,000 samples from a normal distribution and a Student's t distribution."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7be0e011",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from scipy.stats import t\n",
+ "n = 1000\n",
+ "np.random.seed(123)\n",
+ "\n",
+ "s = 2\n",
+ "n_data = np.random.randn(n) * s\n",
+ "\n",
+ "t_dist = t(df=1.5)\n",
+ "t_data = t_dist.rvs(n)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "37b4b6cb",
+ "metadata": {},
+ "source": [
+ "When we plot our samples, we see the Student's t distribution frequently\n",
+ "generates samples many standard deviations from the mean."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "90cd4891",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, axes = plt.subplots(1, 2, figsize=(8, 3.4))\n",
+ "\n",
+ "for ax in axes:\n",
+ " ax.set_ylim((-50, 50))\n",
+ " ax.plot((0, n), (0, 0), 'k-', lw=0.3)\n",
+ "\n",
+ "ax = axes[0]\n",
+ "ax.plot(list(range(n)), t_data, linestyle='', marker='o', alpha=0.5, ms=4)\n",
+ "ax.vlines(list(range(n)), 0, t_data, 'k', lw=0.2)\n",
+ "ax.get_xaxis().set_major_formatter(\n",
+ " ticker.FuncFormatter(lambda x, p: format(int(x), ',')))\n",
+ "ax.set_title(f\"Student t draws\", fontsize=11)\n",
+ "\n",
+ "ax = axes[1]\n",
+ "ax.plot(list(range(n)), n_data, linestyle='', marker='o', alpha=0.5, ms=4)\n",
+ "ax.vlines(list(range(n)), 0, n_data, lw=0.2)\n",
+ "ax.get_xaxis().set_major_formatter(\n",
+ " ticker.FuncFormatter(lambda x, p: format(int(x), ',')))\n",
+ "ax.set_title(f\"$N(0, \\sigma^2)$ with $\\sigma = {s}$\", fontsize=11)\n",
+ "\n",
+ "plt.tight_layout()\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/heavy_tailed_draws.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ca8d8028",
+ "metadata": {},
+ "source": [
+ "### CCDF plots for the Pareto and Exponential distributions\n",
+ "\n",
+ "When the Pareto tail property holds, the CCDF is eventually log linear. Here\n",
+ "we illustrates this using a Pareto distribution. For comparison, an exponential\n",
+ "distribution is also shown. First we define our domain and the Pareto and\n",
+ "Exponential distributions."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8a79084d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x = np.linspace(1, 10, 500)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "89d0baa5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "α = 1.5\n",
+ "def Gp(x):\n",
+ " return x**(-α)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "fb412bf5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "λ = 1.0\n",
+ "def Ge(x):\n",
+ " return np.exp(-λ * x)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "90785fac",
+ "metadata": {},
+ "source": [
+ "We then plot our distribution on a log-log scale."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a6b2f844",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots(figsize=default_figsize)\n",
+ "\n",
+ "ax.plot(np.log(x), np.log(Gp(x)), label=\"Pareto\")\n",
+ "ax.plot(np.log(x), np.log(Ge(x)), label=\"Exponential\")\n",
+ "\n",
+ "ax.legend(fontsize=12, frameon=False, loc=\"lower left\")\n",
+ "ax.set_xlabel(\"$\\ln x$\", fontsize=12)\n",
+ "ax.set_ylabel(\"$\\ln G(x)$\", fontsize=12)\n",
+ "\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/ccdf_comparison_1.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "63906e68",
+ "metadata": {},
+ "source": [
+ "### Empirical CCDF plots for largest firms (Forbes)\n",
+ "\n",
+ "Here we show that the distribution of firm sizes has a Pareto tail. We start\n",
+ "by loading the `forbes_global_2000` dataset."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8b9e33a0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "dfff = ch1_data['forbes_global_2000']"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9266e970",
+ "metadata": {},
+ "source": [
+ "We calculate values of the empirical CCDF."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2a342d5a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "data = np.asarray(dfff['Market Value'])[0:500]\n",
+ "y_vals = np.empty_like(data, dtype='float64')\n",
+ "n = len(data)\n",
+ "for i, d in enumerate(data):\n",
+ " # record fraction of sample above d\n",
+ " y_vals[i] = np.sum(data >= d) / n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "690fa754",
+ "metadata": {},
+ "source": [
+ "Now we fit a linear trend line (on the log-log scale)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "956c019d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "x, y = np.log(data), np.log(y_vals)\n",
+ "results = sm.OLS(y, sm.add_constant(x)).fit()\n",
+ "b, a = results.params"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c90d836b",
+ "metadata": {},
+ "source": [
+ "Finally we produce our plot."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a13d4b05",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots(figsize=(7.3, 4))\n",
+ "\n",
+ "ax.scatter(x, y, alpha=0.3, label=\"firm size (market value)\")\n",
+ "ax.plot(x, x * a + b, 'k-', alpha=0.6, label=f\"slope = ${a: 1.2f}$\")\n",
+ "\n",
+ "ax.set_xlabel('log value', fontsize=12)\n",
+ "ax.set_ylabel(\"log prob.\", fontsize=12)\n",
+ "ax.legend(loc='lower left', fontsize=12)\n",
+ " \n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/empirical_powerlaw_plots_firms_forbes.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d80c5a0a",
+ "metadata": {},
+ "source": [
+ "## Graph Theory\n",
+ "\n",
+ "### Zeta and Pareto distributions\n",
+ "\n",
+ "We begin by defining the Zeta and Pareto distributions."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "19032324",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "γ = 2.0\n",
+ "α = γ - 1"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "abccb3b0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def z(k, c=2.0):\n",
+ " return c * k**(-γ)\n",
+ "\n",
+ "k_grid = np.arange(1, 10+1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "fb68c8bc",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def p(x, c=2.0):\n",
+ " return c * x**(-γ)\n",
+ "\n",
+ "x_grid = np.linspace(1, 10, 200)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f605c4dc",
+ "metadata": {},
+ "source": [
+ "Then we can produce our plot"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d3661175",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots(figsize=default_figsize)\n",
+ "ax.plot(k_grid, z(k_grid), '-o', label='zeta distribution with $\\gamma=2$')\n",
+ "ax.plot(x_grid, p(x_grid), label='density of Pareto with tail index $\\\\alpha$')\n",
+ "ax.legend(fontsize=12)\n",
+ "ax.set_yticks((0, 1, 2))\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/zeta_1.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "35f01cce",
+ "metadata": {},
+ "source": [
+ "### NetworkX digraph plot\n",
+ "\n",
+ "We start by creating a graph object and populating it with edges."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "cd8fae62",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "G_p = nx.DiGraph()\n",
+ "\n",
+ "edge_list = [\n",
+ " ('p', 'p'),\n",
+ " ('m', 'p'), ('m', 'm'), ('m', 'r'),\n",
+ " ('r', 'p'), ('r', 'm'), ('r', 'r')\n",
+ "]\n",
+ "\n",
+ "for e in edge_list:\n",
+ " u, v = e\n",
+ " G_p.add_edge(u, v)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "77e32349",
+ "metadata": {},
+ "source": [
+ "Now we can plot our graph."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "892bc151",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots()\n",
+ "nx.spring_layout(G_p, seed=4)\n",
+ "nx.draw_spring(G_p, ax=ax, node_size=500, with_labels=True, \n",
+ " font_weight='bold', arrows=True, alpha=0.8,\n",
+ " connectionstyle='arc3,rad=0.25', arrowsize=20)\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/networkx_basics_1.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f089c248",
+ "metadata": {},
+ "source": [
+ "The `DiGraph` object has methods that calculate in-degree and out-degree of vertices."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c3e1adc6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "G_p.in_degree('p')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a502c57a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "G_p.out_degree('p')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "84636840",
+ "metadata": {},
+ "source": [
+ "Additionally, the `NetworkX` package supplies functions for testing\n",
+ "communication and strong connectedness, as well as to compute strongly\n",
+ "connected components."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9b41f9be",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "G = nx.DiGraph()\n",
+ "G.add_edge(1, 1)\n",
+ "G.add_edge(2, 1)\n",
+ "G.add_edge(2, 3)\n",
+ "G.add_edge(3, 2)\n",
+ "list(nx.strongly_connected_components(G))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "79d4b29c",
+ "metadata": {},
+ "source": [
+ "Like `NetworkX`, the Python library `quantecon` \n",
+ "provides access to some graph-theoretic algorithms. \n",
+ "\n",
+ "In the case of QuantEcon's `DiGraph` object, an instance is created via the adjacency matrix."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "cbd43d1a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "A = ((1, 0, 0),\n",
+ " (1, 1, 1),\n",
+ " (1, 1, 1))\n",
+ "A = np.array(A) # Convert to NumPy array\n",
+ "G = qe.DiGraph(A)\n",
+ "\n",
+ "G.strongly_connected_components"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "0c670e09",
+ "metadata": {},
+ "source": [
+ "### International private credit flows by country\n",
+ "\n",
+ "We begin by loading an adjacency matrix of international private credit flows\n",
+ "(in the form of a NumPy array and a list of country labels)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "cd6f046d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "Z = ch1_data[\"adjacency_matrix\"][\"Z\"]\n",
+ "Z_visual= ch1_data[\"adjacency_matrix\"][\"Z_visual\"]\n",
+ "countries = ch1_data[\"adjacency_matrix\"][\"countries\"]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ade862b8",
+ "metadata": {},
+ "source": [
+ "To calculate our graph's properties, we use hub-based eigenvector\n",
+ "centrality as our centrality measure for this plot."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4a89769e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "centrality = qbn_io.eigenvector_centrality(Z_visual, authority=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7be043ae",
+ "metadata": {},
+ "source": [
+ "Now we convert our graph features to plot features."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ce2696fd",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "node_colors = cm.plasma(qbn_io.to_zero_one_beta(centrality))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8ba53d05",
+ "metadata": {},
+ "source": [
+ "Finally we produce the plot."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1f802d18",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "X = qbn_io.to_zero_one_beta(Z.sum(axis=1))\n",
+ "\n",
+ "fig, ax = plt.subplots(figsize=(8, 10))\n",
+ "plt.axis(\"off\")\n",
+ "\n",
+ "qbn_plot.plot_graph(Z_visual, X, ax, countries,\n",
+ " layout_type='spring',\n",
+ " layout_seed=1234,\n",
+ " node_size_multiple=3000,\n",
+ " edge_size_multiple=0.000006,\n",
+ " tol=0.0,\n",
+ " node_color_list=node_colors) \n",
+ "\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/financial_network_analysis_visualization.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "88e9c96e",
+ "metadata": {},
+ "source": [
+ "### Centrality measures for the credit network\n",
+ "\n",
+ "This figure looks at six different centrality measures.\n",
+ "\n",
+ "We begin by defining a function for calculating eigenvector centrality.\n",
+ "\n",
+ "Hub-based centrality is calculated by default, although authority-based centrality\n",
+ "can be calculated by setting `authority=True`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "577372e6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def eigenvector_centrality(A, k=40, authority=False):\n",
+ " \"\"\"\n",
+ " Computes the dominant eigenvector of A. Assumes A is \n",
+ " primitive and uses the power method. \n",
+ " \n",
+ " \"\"\"\n",
+ " A_temp = A.T if authority else A\n",
+ " n = len(A_temp)\n",
+ " r = spec_rad(A_temp)\n",
+ " e = r**(-k) * (np.linalg.matrix_power(A_temp, k) @ np.ones(n))\n",
+ " return e / np.sum(e)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c16682dc",
+ "metadata": {},
+ "source": [
+ "Here a similar function is defined for calculating Katz centrality."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9d7fbee4",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def katz_centrality(A, b=1, authority=False):\n",
+ " \"\"\"\n",
+ " Computes the Katz centrality of A, defined as the x solving\n",
+ "\n",
+ " x = 1 + b A x (1 = vector of ones)\n",
+ "\n",
+ " Assumes that A is square.\n",
+ "\n",
+ " If authority=True, then A is replaced by its transpose.\n",
+ " \"\"\"\n",
+ " n = len(A)\n",
+ " I = np.identity(n)\n",
+ " C = I - b * A.T if authority else I - b * A\n",
+ " return np.linalg.solve(C, np.ones(n))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c0812961",
+ "metadata": {},
+ "source": [
+ "Now we generate an unweighted version of our matrix to help calculate in-degree and out-degree."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2d665a8f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "D = qbn_io.build_unweighted_matrix(Z)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "947d8b01",
+ "metadata": {},
+ "source": [
+ "We now use the above to calculate the six centrality measures."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "413f7029",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "outdegree = D.sum(axis=1)\n",
+ "ecentral_hub = eigenvector_centrality(Z, authority=False)\n",
+ "kcentral_hub = katz_centrality(Z, b=1/1_700_000)\n",
+ "\n",
+ "indegree = D.sum(axis=0)\n",
+ "ecentral_authority = eigenvector_centrality(Z, authority=True)\n",
+ "kcentral_authority = katz_centrality(Z, b=1/1_700_000, authority=True)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a14bf02e",
+ "metadata": {},
+ "source": [
+ "Here we provide a helper function that returns a DataFrame for each measure.\n",
+ "The DataFrame is ordered by that measure and contains color information."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2bcb86db",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def centrality_plot_data(countries, centrality_measures):\n",
+ " df = pd.DataFrame({'code': countries,\n",
+ " 'centrality':centrality_measures, \n",
+ " 'color': qbn_io.colorise_weights(centrality_measures).tolist()\n",
+ " })\n",
+ " return df.sort_values('centrality')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c5777a24",
+ "metadata": {},
+ "source": [
+ "Finally, we plot the various centrality measures."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "47130789",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "centrality_measures = [outdegree, indegree, \n",
+ " ecentral_hub, ecentral_authority, \n",
+ " kcentral_hub, kcentral_authority]\n",
+ "\n",
+ "ylabels = ['out degree', 'in degree',\n",
+ " 'eigenvector hub','eigenvector authority', \n",
+ " 'Katz hub', 'Katz authority']\n",
+ "\n",
+ "ylims = [(0, 20), (0, 20), \n",
+ " None, None, \n",
+ " None, None]\n",
+ "\n",
+ "\n",
+ "fig, axes = plt.subplots(3, 2, figsize=(10, 12))\n",
+ "\n",
+ "axes = axes.flatten()\n",
+ "\n",
+ "for i, ax in enumerate(axes):\n",
+ " df = centrality_plot_data(countries, centrality_measures[i])\n",
+ " \n",
+ " ax.bar('code', 'centrality', data=df, color=df[\"color\"], alpha=0.6)\n",
+ " \n",
+ " patch = mpatches.Patch(color=None, label=ylabels[i], visible=False)\n",
+ " ax.legend(handles=[patch], fontsize=12, loc=\"upper left\", handlelength=0, frameon=False)\n",
+ " \n",
+ " if ylims[i] is not None:\n",
+ " ax.set_ylim(ylims[i])\n",
+ "\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/financial_network_analysis_centrality.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "11204e33",
+ "metadata": {},
+ "source": [
+ "### Computing in and out degree distributions\n",
+ "\n",
+ "The in-degree distribution evaluated at $k$ is the fraction of nodes in a\n",
+ "network that have in-degree $k$. The in-degree distribution of a `NetworkX`\n",
+ "DiGraph can be calculated using the code below."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "61512e1d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def in_degree_dist(G):\n",
+ " n = G.number_of_nodes()\n",
+ " iG = np.array([G.in_degree(v) for v in G.nodes()])\n",
+ " d = [np.mean(iG == k) for k in range(n+1)]\n",
+ " return d"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8121ffff",
+ "metadata": {},
+ "source": [
+ "The out-degree distribution is defined analogously."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "fc3e776b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def out_degree_dist(G):\n",
+ " n = G.number_of_nodes()\n",
+ " oG = np.array([G.out_degree(v) for v in G.nodes()])\n",
+ " d = [np.mean(oG == k) for k in range(n+1)]\n",
+ " return d"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "840131af",
+ "metadata": {},
+ "source": [
+ "### Degree distribution for international aircraft trade\n",
+ "\n",
+ "Here we illustrate that the commercial aircraft international trade network is\n",
+ "approximately scale-free by plotting the degree distribution alongside\n",
+ "$f(x)=cx-\\gamma$ with $c=0.2$ and $\\gamma=1.1$. \n",
+ "\n",
+ "In this calculation of the degree distribution, performed by the NetworkX\n",
+ "function `degree_histogram`, directions are ignored and the network is treated\n",
+ "as an undirected graph."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ecc6928e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def plot_degree_dist(G, ax, loglog=True, label=None):\n",
+ " \"Plot the degree distribution of a graph G on axis ax.\"\n",
+ " dd = [x for x in nx.degree_histogram(G) if x > 0]\n",
+ " dd = np.array(dd) / np.sum(dd) # normalize\n",
+ " if loglog:\n",
+ " ax.loglog(dd, '-o', lw=0.5, label=label)\n",
+ " else:\n",
+ " ax.plot(dd, '-o', lw=0.5, label=label)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f5ec0060",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots(figsize=default_figsize)\n",
+ "\n",
+ "plot_degree_dist(DG, ax, loglog=False, label='degree distribution')\n",
+ "\n",
+ "xg = np.linspace(0.5, 25, 250)\n",
+ "ax.plot(xg, 0.2 * xg**(-1.1), label='power law')\n",
+ "ax.set_xlim(0.9, 22)\n",
+ "ax.set_ylim(0, 0.25)\n",
+ "ax.legend()\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/commercial_aircraft_2019_2.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "6c8cb9ba",
+ "metadata": {},
+ "source": [
+ "### Random graphs\n",
+ "\n",
+ "The code to produce the Erdos-Renyi random graph, used below, applies the\n",
+ "combinations function from the `itertools` library. The function\n",
+ "`combinations(A, k)` returns a list of all subsets of $A$ of size $k$. For\n",
+ "example:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "875a6ae1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import itertools\n",
+ "letters = 'a', 'b', 'c'\n",
+ "list(itertools.combinations(letters, 2))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "fe83b981",
+ "metadata": {},
+ "source": [
+ "Below we generate random graphs using the Erdos-Renyi and Barabasi-Albert\n",
+ "algorithms. Here, for convenience, we will define a function to plot these\n",
+ "graphs."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "bc9858c8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def plot_random_graph(RG,ax):\n",
+ " node_pos_dict = nx.spring_layout(RG, k=1.1)\n",
+ "\n",
+ " centrality = nx.degree_centrality(RG)\n",
+ " node_color_list = qbn_io.colorise_weights(list(centrality.values()))\n",
+ "\n",
+ " edge_color_list = []\n",
+ " for i in range(n):\n",
+ " for j in range(n):\n",
+ " edge_color_list.append(node_color_list[i])\n",
+ "\n",
+ " nx.draw_networkx_nodes(RG, \n",
+ " node_pos_dict, \n",
+ " node_color=node_color_list, \n",
+ " edgecolors='grey', \n",
+ " node_size=100,\n",
+ " linewidths=2, \n",
+ " alpha=0.8, \n",
+ " ax=ax)\n",
+ "\n",
+ " nx.draw_networkx_edges(RG, \n",
+ " node_pos_dict, \n",
+ " edge_color=edge_colors, \n",
+ " alpha=0.4, \n",
+ " ax=ax)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "968c2f98",
+ "metadata": {},
+ "source": [
+ "### An instance of an Erdos–Renyi random graph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "027cd2dc",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "n = 100\n",
+ "p = 0.05\n",
+ "G_er = qbn_io.erdos_renyi_graph(n, p, seed=1234)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "86c89612",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, axes = plt.subplots(1, 2, figsize=(10, 3.2))\n",
+ "\n",
+ "axes[0].set_title(\"Graph visualization\")\n",
+ "plot_random_graph(G_er,axes[0])\n",
+ "\n",
+ "axes[1].set_title(\"Degree distribution\")\n",
+ "plot_degree_dist(G_er, axes[1], loglog=False)\n",
+ "\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/rand_graph_experiments_1.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "1c2f0411",
+ "metadata": {},
+ "source": [
+ "### An instance of a preferential attachment random graph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1fcae0d0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "n = 100\n",
+ "m = 5\n",
+ "G_ba = nx.generators.random_graphs.barabasi_albert_graph(n, m, seed=123)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5b513178",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, axes = plt.subplots(1, 2, figsize=(10, 3.2))\n",
+ "\n",
+ "axes[0].set_title(\"Graph visualization\")\n",
+ "plot_random_graph(G_ba, axes[0])\n",
+ "\n",
+ "axes[1].set_title(\"Degree distribution\")\n",
+ "plot_degree_dist(G_ba, axes[1], loglog=False)\n",
+ "\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/rand_graph_experiments_2.pdf\")\n",
+ "plt.show()"
+ ]
+ }
+ ],
+ "metadata": {
+ "jupytext": {
+ "cell_metadata_filter": "-all"
+ },
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/ipynb/ch_mcs.ipynb b/ipynb/ch_mcs.ipynb
new file mode 100644
index 0000000..f8690e2
--- /dev/null
+++ b/ipynb/ch_mcs.ipynb
@@ -0,0 +1,799 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "9eba78b3",
+ "metadata": {},
+ "source": [
+ "# Chapter 4 - Markov Chains and Networks (Python Code)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0c5585b0",
+ "metadata": {
+ "tags": [
+ "hide-output"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "! pip install --upgrade quantecon_book_networks"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "03d19647",
+ "metadata": {},
+ "source": [
+ "We begin with some imports."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "48b566bb",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import quantecon as qe\n",
+ "import quantecon_book_networks\n",
+ "import quantecon_book_networks.input_output as qbn_io\n",
+ "import quantecon_book_networks.plotting as qbn_plt\n",
+ "import quantecon_book_networks.data as qbn_data\n",
+ "ch4_data = qbn_data.markov_chains_and_networks()\n",
+ "export_figures = False"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d76606ee",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import networkx as nx\n",
+ "import matplotlib.pyplot as plt\n",
+ "from matplotlib import cm\n",
+ "from mpl_toolkits.mplot3d import Axes3D\n",
+ "from mpl_toolkits.mplot3d.art3d import Poly3DCollection\n",
+ "quantecon_book_networks.config(\"matplotlib\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "da06eb17",
+ "metadata": {},
+ "source": [
+ "## Example transition matrices\n",
+ "\n",
+ "In this chapter two transition matrices are used.\n",
+ "\n",
+ "First, a Markov model is estimated in the international growth dynamics study\n",
+ "of [Quah (1993)](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.142.5504&rep=rep1&type=pdf).\n",
+ "\n",
+ "The state is real GDP per capita in a given country relative to the world\n",
+ "average. \n",
+ "\n",
+ "Quah discretizes the possible values to 0–1/4, 1/4–1/2, 1/2–1, 1–2\n",
+ "and 2–inf, calling these states 1 to 5 respectively. The transitions are over\n",
+ "a one year period."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b1d19891",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "P_Q = [\n",
+ " [0.97, 0.03, 0, 0, 0 ],\n",
+ " [0.05, 0.92, 0.03, 0, 0 ],\n",
+ " [0, 0.04, 0.92, 0.04, 0 ],\n",
+ " [0, 0, 0.04, 0.94, 0.02],\n",
+ " [0, 0, 0, 0.01, 0.99]\n",
+ "]\n",
+ "P_Q = np.array(P_Q)\n",
+ "codes_Q = ('1', '2', '3', '4', '5')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c9d3f18b",
+ "metadata": {},
+ "source": [
+ "Second, [Benhabib et al. (2015)](https://www.economicdynamics.org/meetpapers/2015/paper_364.pdf) estimate the following transition matrix for intergenerational social mobility.\n",
+ "\n",
+ "The states are percentiles of the wealth distribution. \n",
+ "\n",
+ "In particular, states 1, 2,..., 8, correspond to the percentiles 0-20%, 20-40%, 40-60%, 60-80%, 80-90%, 90-95%, 95-99%, 99-100%."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8a2ff2d3",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "P_B = [\n",
+ " [0.222, 0.222, 0.215, 0.187, 0.081, 0.038, 0.029, 0.006],\n",
+ " [0.221, 0.22, 0.215, 0.188, 0.082, 0.039, 0.029, 0.006],\n",
+ " [0.207, 0.209, 0.21, 0.194, 0.09, 0.046, 0.036, 0.008],\n",
+ " [0.198, 0.201, 0.207, 0.198, 0.095, 0.052, 0.04, 0.009],\n",
+ " [0.175, 0.178, 0.197, 0.207, 0.11, 0.067, 0.054, 0.012],\n",
+ " [0.182, 0.184, 0.2, 0.205, 0.106, 0.062, 0.05, 0.011],\n",
+ " [0.123, 0.125, 0.166, 0.216, 0.141, 0.114, 0.094, 0.021],\n",
+ " [0.084, 0.084, 0.142, 0.228, 0.17, 0.143, 0.121, 0.028]\n",
+ " ]\n",
+ "\n",
+ "P_B = np.array(P_B)\n",
+ "codes_B = ('1', '2', '3', '4', '5', '6', '7', '8')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "53bd61df",
+ "metadata": {},
+ "source": [
+ "## Markov Chains as Digraphs\n",
+ "\n",
+ "### Contour plot of a transition matrix \n",
+ "\n",
+ "Here we define a function for producing contour plots of matrices."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a95d7285",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def plot_matrices(matrix,\n",
+ " codes,\n",
+ " ax,\n",
+ " font_size=12,\n",
+ " alpha=0.6, \n",
+ " colormap=cm.viridis, \n",
+ " color45d=None, \n",
+ " xlabel='sector $j$', \n",
+ " ylabel='sector $i$'):\n",
+ " \n",
+ " ticks = range(len(matrix))\n",
+ "\n",
+ " levels = np.sqrt(np.linspace(0, 0.75, 100))\n",
+ " \n",
+ " \n",
+ " if color45d != None:\n",
+ " co = ax.contourf(ticks, \n",
+ " ticks,\n",
+ " matrix,\n",
+ " alpha=alpha, cmap=colormap)\n",
+ " ax.plot(ticks, ticks, color=color45d)\n",
+ " else:\n",
+ " co = ax.contourf(ticks, \n",
+ " ticks,\n",
+ " matrix,\n",
+ " levels,\n",
+ " alpha=alpha, cmap=colormap)\n",
+ "\n",
+ " ax.set_xlabel(xlabel, fontsize=font_size)\n",
+ " ax.set_ylabel(ylabel, fontsize=font_size)\n",
+ " ax.set_yticks(ticks)\n",
+ " ax.set_yticklabels(codes_B)\n",
+ " ax.set_xticks(ticks)\n",
+ " ax.set_xticklabels(codes_B)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "15a6af09",
+ "metadata": {},
+ "source": [
+ "Now we use our function to produce a plot of the transition matrix for\n",
+ "intergenerational social mobility, $P_B$."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8dc01df7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots(figsize=(6,6))\n",
+ "plot_matrices(P_B.transpose(), codes_B, ax, alpha=0.75, \n",
+ " colormap=cm.viridis, color45d='black',\n",
+ " xlabel='state at time $t$', ylabel='state at time $t+1$')\n",
+ "\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/markov_matrix_visualization.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b2476ede",
+ "metadata": {},
+ "source": [
+ "### Wealth percentile over time\n",
+ "\n",
+ "Here we compare the mixing of the transition matrix for intergenerational\n",
+ "social mobility $P_B$ and the transition matrix for international growth\n",
+ "dynamics $P_Q$. \n",
+ "\n",
+ "We begin by creating `quantecon` `MarkovChain` objects with each of our transition\n",
+ "matrices."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f13e25bb",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "mc_B = qe.MarkovChain(P_B, state_values=range(1, 9))\n",
+ "mc_Q = qe.MarkovChain(P_Q, state_values=range(1, 6))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "df645ec9",
+ "metadata": {},
+ "source": [
+ "Next we define a function to plot simulations of Markov chains. \n",
+ "\n",
+ "Two simulations will be run for each `MarkovChain`, one starting at the\n",
+ "minimum initial value and one at the maximum."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "174ec3fc",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def sim_fig(ax, mc, T=100, seed=14, title=None):\n",
+ " X1 = mc.simulate(T, init=1, random_state=seed)\n",
+ " X2 = mc.simulate(T, init=max(mc.state_values), random_state=seed+1)\n",
+ " ax.plot(X1)\n",
+ " ax.plot(X2)\n",
+ " ax.set_xlabel(\"time\")\n",
+ " ax.set_ylabel(\"state\")\n",
+ " ax.set_title(title, fontsize=12)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "05dc505f",
+ "metadata": {},
+ "source": [
+ "Finally, we produce the figure."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6c98817b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, axes = plt.subplots(2, 1, figsize=(6, 4))\n",
+ "ax = axes[0]\n",
+ "sim_fig(axes[0], mc_B, title=\"$P_B$\")\n",
+ "sim_fig(axes[1], mc_Q, title=\"$P_Q$\")\n",
+ "axes[1].set_yticks((1, 2, 3, 4, 5))\n",
+ "\n",
+ "plt.tight_layout()\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/benhabib_mobility_mixing.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2bf229d0",
+ "metadata": {},
+ "source": [
+ "### Predicted vs realized cross-country income distributions for 2019\n",
+ "\n",
+ "Here we load a `pandas` `DataFrame` of GDP per capita data for countries compared to the global average."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "44dc111f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "gdppc_df = ch4_data['gdppc_df']\n",
+ "gdppc_df.head()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d3a50768",
+ "metadata": {},
+ "source": [
+ "Now we assign countries bins, as per Quah (1993)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "09775bab",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "q = [0, 0.25, 0.5, 1.0, 2.0, np.inf]\n",
+ "l = [0, 1, 2, 3, 4]\n",
+ "\n",
+ "x = pd.cut(gdppc_df.gdppc_r, bins=q, labels=l)\n",
+ "gdppc_df['interval'] = x\n",
+ "\n",
+ "gdppc_df = gdppc_df.reset_index()\n",
+ "gdppc_df['interval'] = gdppc_df['interval'].astype(float)\n",
+ "gdppc_df['year'] = gdppc_df['year'].astype(float)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8c91bb21",
+ "metadata": {},
+ "source": [
+ "Here we define a function for calculating the cross-country income\n",
+ "distributions for a given date range."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e7e86382",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def gdp_dist_estimate(df, l, yr=(1960, 2019)):\n",
+ " Y = np.zeros(len(l))\n",
+ " for i in l:\n",
+ " Y[i] = df[\n",
+ " (df['interval'] == i) & \n",
+ " (df['year'] <= yr[1]) & \n",
+ " (df['year'] >= yr[0])\n",
+ " ].count()[0]\n",
+ " \n",
+ " return Y / Y.sum()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ad6d54d7",
+ "metadata": {},
+ "source": [
+ "We calculate the true distribution for 1985."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "130b145a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ψ_1985 = gdp_dist_estimate(gdppc_df,l,yr=(1985, 1985))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b3b24523",
+ "metadata": {},
+ "source": [
+ "Now we use the transition matrix to update the 1985 distribution $t = 2019 - 1985 = 34$\n",
+ "times to get our predicted 2019 distribution."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "de8e360b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ψ_2019_predicted = ψ_1985 @ np.linalg.matrix_power(P_Q, 2019-1985)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "3e8be48f",
+ "metadata": {},
+ "source": [
+ "Now, calculate the true 2019 distribution."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "48653d34",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ψ_2019 = gdp_dist_estimate(gdppc_df,l,yr=(2019, 2019))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "886af82b",
+ "metadata": {},
+ "source": [
+ "Finally we produce the plot."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0d6c8426",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "states = np.arange(0, 5)\n",
+ "ticks = range(5)\n",
+ "codes_S = ('1', '2', '3', '4', '5')\n",
+ "\n",
+ "fig, ax = plt.subplots(figsize=(6, 4))\n",
+ "width = 0.4\n",
+ "ax.plot(states, ψ_2019_predicted, '-o', alpha=0.7, label='predicted')\n",
+ "ax.plot(states, ψ_2019, '-o', alpha=0.7, label='realized')\n",
+ "ax.set_xlabel(\"state\")\n",
+ "ax.set_ylabel(\"probability\")\n",
+ "ax.set_yticks((0.15, 0.2, 0.25, 0.3))\n",
+ "ax.set_xticks(ticks)\n",
+ "ax.set_xticklabels(codes_S)\n",
+ "\n",
+ "ax.legend(loc='upper center', fontsize=12)\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/quah_gdppc_prediction.pdf\")\n",
+ "plt.show()\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ce838e58",
+ "metadata": {},
+ "source": [
+ "### Distribution dynamics\n",
+ "\n",
+ "Here we define a function for plotting the convergence of marginal\n",
+ "distributions $\\psi$ under a transition matrix $P$ on the unit simplex."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6eaa5db8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def convergence_plot(ψ, P, n=14, angle=50):\n",
+ "\n",
+ " ax = qbn_plt.unit_simplex(angle)\n",
+ "\n",
+ " # Convergence plot\n",
+ " \n",
+ " P = np.array(P)\n",
+ "\n",
+ " ψ = ψ # Initial condition\n",
+ "\n",
+ " x_vals, y_vals, z_vals = [], [], []\n",
+ " for t in range(n):\n",
+ " x_vals.append(ψ[0])\n",
+ " y_vals.append(ψ[1])\n",
+ " z_vals.append(ψ[2])\n",
+ " ψ = ψ @ P\n",
+ "\n",
+ " ax.scatter(x_vals, y_vals, z_vals, c='darkred', s=80, alpha=0.7, depthshade=False)\n",
+ "\n",
+ " mc = qe.MarkovChain(P)\n",
+ " ψ_star = mc.stationary_distributions[0]\n",
+ " ax.scatter(ψ_star[0], ψ_star[1], ψ_star[2], c='k', s=80)\n",
+ "\n",
+ " return ψ\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "5bc093e3",
+ "metadata": {},
+ "source": [
+ "Now we define P."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "64d602c8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "P = (\n",
+ " (0.9, 0.1, 0.0),\n",
+ " (0.4, 0.4, 0.2),\n",
+ " (0.1, 0.1, 0.8)\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a38c94cd",
+ "metadata": {},
+ "source": [
+ "#### A trajectory from $\\psi_0 = (0, 0, 1)$\n",
+ "\n",
+ "Here we see the sequence of marginals appears to converge."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "39d90e91",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ψ_0 = (0, 0, 1)\n",
+ "ψ = convergence_plot(ψ_0, P)\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/simplex_2.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "da3337df",
+ "metadata": {},
+ "source": [
+ "#### A trajectory from $\\psi_0 = (0, 1/2, 1/2)$\n",
+ "\n",
+ "Here we see again that the sequence of marginals appears to converge, and the\n",
+ "limit appears not to depend on the initial distribution."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c4ba2e7b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ψ_0 = (0, 1/2, 1/2)\n",
+ "ψ = convergence_plot(ψ_0, P, n=12)\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/simplex_3.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7491cf30",
+ "metadata": {},
+ "source": [
+ "### Distribution projections from $P_B$\n",
+ "\n",
+ "Here we define a function for plotting $\\psi$ after $n$ iterations of the\n",
+ "transition matrix $P$. The distribution $\\psi_0$ is taken as the uniform \n",
+ "distribution over the\n",
+ "state space."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6dd25c98",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def transition(P, n, ax=None):\n",
+ " \n",
+ " P = np.array(P)\n",
+ " nstates = P.shape[1]\n",
+ " s0 = np.ones(8) * 1/nstates\n",
+ " s = s0\n",
+ " \n",
+ " for i in range(n):\n",
+ " s = s @ P\n",
+ " \n",
+ " if ax is None:\n",
+ " fig, ax = plt.subplots()\n",
+ " \n",
+ " ax.plot(range(1, nstates+1), s, '-o', alpha=0.6)\n",
+ " ax.set(ylim=(0, 0.25), \n",
+ " xticks=((1, nstates)))\n",
+ " ax.set_title(f\"$t = {n}$\")\n",
+ " \n",
+ " return ax"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "659480d1",
+ "metadata": {},
+ "source": [
+ "We now generate the marginal distributions after 0, 1, 2, and 100 iterations for $P_B$."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a1bbb0cd",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ns = (0, 1, 2, 100)\n",
+ "fig, axes = plt.subplots(1, len(ns), figsize=(6, 4))\n",
+ "\n",
+ "for n, ax in zip(ns, axes):\n",
+ " ax = transition(P_B, n, ax=ax)\n",
+ " ax.set_xlabel(\"Quantile\")\n",
+ "\n",
+ "plt.tight_layout()\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/benhabib_mobility_dists.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e7ee0c57",
+ "metadata": {},
+ "source": [
+ "## Asymptotics\n",
+ "\n",
+ "### Convergence of the empirical distribution to $\\psi^*$\n",
+ "\n",
+ "We begin by creating a `MarkovChain` object, taking $P_B$ as the transition matrix."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2f911e34",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "mc = qe.MarkovChain(P_B)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7ca9220f",
+ "metadata": {},
+ "source": [
+ "Next we use the `quantecon` package to calculate the true stationary distribution."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "50947355",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "stationary = mc.stationary_distributions[0]\n",
+ "n = len(mc.P)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "0ea1a5bf",
+ "metadata": {},
+ "source": [
+ "Now we define a function to simulate the Markov chain."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d98416ff",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def simulate_distribution(mc, T=100):\n",
+ " # Simulate path \n",
+ " n = len(mc.P)\n",
+ " path = mc.simulate_indices(ts_length=T, random_state=1)\n",
+ " distribution = np.empty(n)\n",
+ " for i in range(n):\n",
+ " distribution[i] = np.mean(path==i)\n",
+ " return distribution\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7bd6bdaa",
+ "metadata": {},
+ "source": [
+ "We run simulations of length 10, 100, 1,000 and 10,000."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "bef22ec7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "lengths = [10, 100, 1_000, 10_000]\n",
+ "dists = []\n",
+ "\n",
+ "for t in lengths:\n",
+ " dists.append(simulate_distribution(mc, t))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a185b18b",
+ "metadata": {},
+ "source": [
+ "Now we produce the plots. \n",
+ "\n",
+ "We see that the simulated distribution starts to approach the true stationary distribution."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6ccd5bdf",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, axes = plt.subplots(2, 2, figsize=(9, 6), sharex='all')#, sharey='all')\n",
+ "\n",
+ "axes = axes.flatten()\n",
+ "\n",
+ "for dist, ax, t in zip(dists, axes, lengths):\n",
+ " \n",
+ " ax.plot(np.arange(n)+1 + .25, \n",
+ " stationary, \n",
+ " '-o',\n",
+ " #width = 0.25, \n",
+ " label='$\\\\psi^*$', \n",
+ " alpha=0.75)\n",
+ " \n",
+ " ax.plot(np.arange(n)+1, \n",
+ " dist, \n",
+ " '-o',\n",
+ " #width = 0.25, \n",
+ " label=f'$\\\\hat \\\\psi_k$ with $k={t}$', \n",
+ " alpha=0.75)\n",
+ "\n",
+ "\n",
+ " ax.set_xlabel(\"state\", fontsize=12)\n",
+ " ax.set_ylabel(\"prob.\", fontsize=12)\n",
+ " ax.set_xticks(np.arange(n)+1)\n",
+ " ax.legend(loc='upper right', fontsize=12, frameon=False)\n",
+ " ax.set_ylim(0, 0.5)\n",
+ " \n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/benhabib_ergodicity_1.pdf\")\n",
+ "plt.show()"
+ ]
+ }
+ ],
+ "metadata": {
+ "jupytext": {
+ "cell_metadata_filter": "-all"
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/ipynb/ch_opt.ipynb b/ipynb/ch_opt.ipynb
new file mode 100644
index 0000000..f18ddc2
--- /dev/null
+++ b/ipynb/ch_opt.ipynb
@@ -0,0 +1,728 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "461d8d77",
+ "metadata": {},
+ "source": [
+ "# Chapter 3 - Optimal Flows (Python Code)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e85fdcd6",
+ "metadata": {
+ "tags": [
+ "hide-output"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "! pip install --upgrade quantecon_book_networks"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d3d5fcb1",
+ "metadata": {},
+ "source": [
+ "We begin with some imports"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2a00ff7f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import quantecon as qe\n",
+ "import quantecon_book_networks\n",
+ "import quantecon_book_networks.input_output as qbn_io\n",
+ "import quantecon_book_networks.plotting as qbn_plt\n",
+ "import quantecon_book_networks.data as qbn_data\n",
+ "ch3_data = qbn_data.optimal_flows()\n",
+ "export_figures = False"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2ed711ad",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import numpy as np\n",
+ "from scipy.optimize import linprog\n",
+ "import networkx as nx\n",
+ "import ot\n",
+ "import matplotlib.pyplot as plt\n",
+ "from matplotlib import cm\n",
+ "from matplotlib.patches import Polygon\n",
+ "from matplotlib.artist import Artist \n",
+ "quantecon_book_networks.config(\"matplotlib\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7a5e9cd1",
+ "metadata": {},
+ "source": [
+ "## Linear Programming and Duality\n",
+ "\n",
+ "### Betweenness centrality (by color and node size) for the Florentine families\n",
+ "\n",
+ "We load the Florentine Families data from the NetworkX package."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4e14e416",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "G = nx.florentine_families_graph()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "5199cc62",
+ "metadata": {},
+ "source": [
+ "Next we calculate betweenness centrality."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "dab1b1d9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "bc_dict = nx.betweenness_centrality(G)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "700349de",
+ "metadata": {},
+ "source": [
+ "And we produce the plot."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "33557ff6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots(figsize=(9.4, 9.4))\n",
+ "\n",
+ "plt.axis(\"off\")\n",
+ "nx.draw_networkx(\n",
+ " G, \n",
+ " ax=ax,\n",
+ " pos=nx.spring_layout(G, seed=1234), \n",
+ " with_labels=True,\n",
+ " alpha=.8,\n",
+ " arrowsize=15,\n",
+ " connectionstyle=\"arc3,rad=0.1\",\n",
+ " node_size=[10_000*(size+0.1) for size in bc_dict.values()], \n",
+ " node_color=[cm.plasma(bc+0.4) for bc in bc_dict.values()],\n",
+ ")\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/betweenness_centrality_1.pdf\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "0fa794a1",
+ "metadata": {},
+ "source": [
+ "### Revenue maximizing quantities and a Python implementation of linear programming\n",
+ "\n",
+ "First we specify our linear program."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "59dd01db",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "A = ((2, 5),\n",
+ " (4, 2))\n",
+ "b = (30, 20)\n",
+ "c = (-3, -4) # minus in order to minimize"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f64d9118",
+ "metadata": {},
+ "source": [
+ "And now we use SciPy's linear programing module to solve our linear program."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e0bf9ba9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from scipy.optimize import linprog\n",
+ "result = linprog(c, A_ub=A, b_ub=b)\n",
+ "print(result.x)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b1c7bfbb",
+ "metadata": {},
+ "source": [
+ "Here we produce a visualization of what is being done."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "41077bbd",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots(figsize=(8, 4.5))\n",
+ "plt.rcParams['font.size'] = '14'\n",
+ "\n",
+ "# Draw constraint lines\n",
+ "\n",
+ "ax.plot(np.linspace(-1, 17.5, 100), 6-0.4*np.linspace(-1, 17.5, 100))\n",
+ "ax.plot(np.linspace(-1, 5.5, 100), 10-2*np.linspace(-1, 5.5, 100))\n",
+ "ax.text(10, 2.5, \"$2q_1 + 5q_2 \\leq 30$\")\n",
+ "ax.text(1.5, 8, \"$4q_1 + 2q_2 \\leq 20$\")\n",
+ "ax.text(-2, 2, \"$q_2 \\geq 0$\")\n",
+ "ax.text(2.5, -0.7, \"$q_1 \\geq 0$\")\n",
+ "\n",
+ "# Draw the feasible region\n",
+ "feasible_set = Polygon(np.array([[0, 0], \n",
+ " [0, 6], \n",
+ " [2.5, 5], \n",
+ " [5, 0]]))\n",
+ "ax.add_artist(feasible_set)\n",
+ "Artist.set_alpha(feasible_set, 0.2) \n",
+ "\n",
+ "# Draw the objective function\n",
+ "ax.plot(np.linspace(-1, 5.5, 100), 3.875-0.75*np.linspace(-1, 5.5, 100), 'g-')\n",
+ "ax.plot(np.linspace(-1, 5.5, 100), 5.375-0.75*np.linspace(-1, 5.5, 100), 'g-')\n",
+ "ax.plot(np.linspace(-1, 5.5, 100), 6.875-0.75*np.linspace(-1, 5.5, 100), 'g-')\n",
+ "ax.text(5.8, 1, \"revenue $ = 3q_1 + 4q_2$\")\n",
+ "\n",
+ "# Draw the optimal solution\n",
+ "ax.plot(2.5, 5, \"o\", color=\"black\")\n",
+ "ax.text(2.7, 5.2, \"optimal solution\")\n",
+ "\n",
+ "for spine in ['right', 'top']:\n",
+ " ax.spines[spine].set_color('none')\n",
+ " \n",
+ "ax.set_xticks(())\n",
+ "ax.set_yticks(())\n",
+ "\n",
+ "for spine in ['left', 'bottom']:\n",
+ " ax.spines[spine].set_position('zero')\n",
+ " \n",
+ "ax.set_ylim(-1, 8)\n",
+ "\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/linear_programming_1.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b1c4f25b",
+ "metadata": {},
+ "source": [
+ "## Optimal Transport\n",
+ " \n",
+ "\n",
+ "### Transforming one distribution into another\n",
+ "\n",
+ "Below we provide code to produce a visualization of transforming one\n",
+ "distribution into another in one dimension."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "92b7150b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "σ = 0.1\n",
+ "\n",
+ "def ϕ(z):\n",
+ " return (1 / np.sqrt(2 * σ**2 * np.pi)) * np.exp(-z**2 / (2 * σ**2))\n",
+ "\n",
+ "def v(x, a=0.4, b=0.6, s=1.0, t=1.4):\n",
+ " return a * ϕ(x - s) + b * ϕ(x - t)\n",
+ "\n",
+ "fig, ax = plt.subplots(figsize=(10, 4))\n",
+ "\n",
+ "x = np.linspace(0.2, 4, 1000)\n",
+ "ax.plot(x, v(x), label=\"$\\\\phi$\")\n",
+ "ax.plot(x, v(x, s=3.0, t=3.3, a=0.6), label=\"$\\\\psi$\")\n",
+ "\n",
+ "ax.legend(loc='upper left', fontsize=12, frameon=False)\n",
+ "\n",
+ "ax.arrow(1.8, 1.6, 0.8, 0.0, width=0.01, head_width=0.08)\n",
+ "ax.annotate('transform', xy=(1.9, 1.9), fontsize=12)\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/ot_figs_1.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7464f721",
+ "metadata": {},
+ "source": [
+ "### Function to solve a transport problem via linear programming\n",
+ "\n",
+ "Here we define a function to solve optimal transport problems using linear programming."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d94e547a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def ot_solver(phi, psi, c, method='highs-ipm'):\n",
+ " \"\"\"\n",
+ " Solve the OT problem associated with distributions phi, psi\n",
+ " and cost matrix c.\n",
+ " Parameters\n",
+ " ----------\n",
+ " phi : 1-D array\n",
+ " Distribution over the source locations.\n",
+ " psi : 1-D array\n",
+ " Distribution over the target locations.\n",
+ " c : 2-D array\n",
+ " Cost matrix.\n",
+ " \"\"\"\n",
+ " n, m = len(phi), len(psi)\n",
+ "\n",
+ " # vectorize c\n",
+ " c_vec = c.reshape((m * n, 1), order='F')\n",
+ "\n",
+ " # Construct A and b\n",
+ " A1 = np.kron(np.ones((1, m)), np.identity(n))\n",
+ " A2 = np.kron(np.identity(m), np.ones((1, n)))\n",
+ " A = np.vstack((A1, A2))\n",
+ " b = np.hstack((phi, psi))\n",
+ "\n",
+ " # Call sover\n",
+ " res = linprog(c_vec, A_eq=A, b_eq=b, method=method)\n",
+ "\n",
+ " # Invert the vec operation to get the solution as a matrix\n",
+ " pi = res.x.reshape((n, m), order='F')\n",
+ " return pi"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "dfdeb358",
+ "metadata": {},
+ "source": [
+ "Now we can set up a simple optimal transport problem."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "54e87d72",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "phi = np.array((0.5, 0.5))\n",
+ "psi = np.array((1, 0))\n",
+ "c = np.ones((2, 2))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b81830ff",
+ "metadata": {},
+ "source": [
+ "Next we solve using the above function."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3e044c8c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ot_solver(phi, psi, c)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f44e6ab3",
+ "metadata": {},
+ "source": [
+ "We see we get the same result as when using the Python optimal transport\n",
+ "package."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "82d43df5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ot.emd(phi, psi, c) "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "642f85ba",
+ "metadata": {},
+ "source": [
+ "### An optimal transport problem solved by linear programming\n",
+ "\n",
+ "Here we demonstrate a more detailed optimal transport problem. We begin by defining a node class."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9bcd7819",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class Node:\n",
+ "\n",
+ " def __init__(self, x, y, mass, group, name):\n",
+ "\n",
+ " self.x, self.y = x, y\n",
+ " self.mass, self.group = mass, group\n",
+ " self.name = name"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ad91c7e4",
+ "metadata": {},
+ "source": [
+ "Now we define a function for randomly generating nodes."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7d1bfd36",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from scipy.stats import betabinom\n",
+ "\n",
+ "def build_nodes_of_one_type(group='phi', n=100, seed=123):\n",
+ "\n",
+ " nodes = []\n",
+ " np.random.seed(seed)\n",
+ "\n",
+ " for i in range(n):\n",
+ " \n",
+ " if group == 'phi':\n",
+ " m = 1/n\n",
+ " x = np.random.uniform(-2, 2)\n",
+ " y = np.random.uniform(-2, 2)\n",
+ " else:\n",
+ " m = betabinom.pmf(i, n-1, 2, 2)\n",
+ " x = 0.6 * np.random.uniform(-1.5, 1.5)\n",
+ " y = 0.6 * np.random.uniform(-1.5, 1.5)\n",
+ " \n",
+ " name = group + str(i)\n",
+ " nodes.append(Node(x, y, m, group, name))\n",
+ "\n",
+ " return nodes"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "45b701ab",
+ "metadata": {},
+ "source": [
+ "We now generate our source and target nodes."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "efa68979",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "n_phi = 32\n",
+ "n_psi = 32\n",
+ "\n",
+ "phi_list = build_nodes_of_one_type(group='phi', n=n_phi)\n",
+ "psi_list = build_nodes_of_one_type(group='psi', n=n_psi)\n",
+ "\n",
+ "phi_probs = [phi.mass for phi in phi_list]\n",
+ "psi_probs = [psi.mass for psi in psi_list]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "61f09811",
+ "metadata": {},
+ "source": [
+ "Now we define our transport costs."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "38af4ff7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "c = np.empty((n_phi, n_psi))\n",
+ "for i in range(n_phi):\n",
+ " for j in range(n_psi):\n",
+ " x0, y0 = phi_list[i].x, phi_list[i].y\n",
+ " x1, y1 = psi_list[j].x, psi_list[j].y\n",
+ " c[i, j] = np.sqrt((x0-x1)**2 + (y0-y1)**2)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "cbe78a44",
+ "metadata": {},
+ "source": [
+ "We solve our optimal transport problem using the Python optimal transport package."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c8cbef04",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pi = ot.emd(phi_probs, psi_probs, c)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "3fd92511",
+ "metadata": {},
+ "source": [
+ "Finally we produce a graph of our sources, targets, and optimal transport plan."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "fa85e02e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "g = nx.DiGraph()\n",
+ "g.add_nodes_from([phi.name for phi in phi_list])\n",
+ "g.add_nodes_from([psi.name for psi in psi_list])\n",
+ "\n",
+ "for i in range(n_phi):\n",
+ " for j in range(n_psi):\n",
+ " if pi[i, j] > 0:\n",
+ " g.add_edge(phi_list[i].name, psi_list[j].name, weight=pi[i, j])\n",
+ "\n",
+ "node_pos_dict={}\n",
+ "for phi in phi_list:\n",
+ " node_pos_dict[phi.name] = (phi.x, phi.y)\n",
+ "for psi in psi_list:\n",
+ " node_pos_dict[psi.name] = (psi.x, psi.y)\n",
+ "\n",
+ "node_color_list = []\n",
+ "node_size_list = []\n",
+ "scale = 8_000\n",
+ "for phi in phi_list:\n",
+ " node_color_list.append('blue')\n",
+ " node_size_list.append(phi.mass * scale)\n",
+ "for psi in psi_list:\n",
+ " node_color_list.append('red')\n",
+ " node_size_list.append(psi.mass * scale)\n",
+ "\n",
+ "fig, ax = plt.subplots(figsize=(7, 10))\n",
+ "plt.axis('off')\n",
+ "\n",
+ "nx.draw_networkx_nodes(g, \n",
+ " node_pos_dict, \n",
+ " node_color=node_color_list,\n",
+ " node_size=node_size_list,\n",
+ " edgecolors='grey',\n",
+ " linewidths=1,\n",
+ " alpha=0.5,\n",
+ " ax=ax)\n",
+ "\n",
+ "nx.draw_networkx_edges(g, \n",
+ " node_pos_dict, \n",
+ " arrows=True,\n",
+ " connectionstyle='arc3,rad=0.1',\n",
+ " alpha=0.6)\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/ot_large_scale_1.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4bd33217",
+ "metadata": {},
+ "source": [
+ "### Solving linear assignment as an optimal transport problem\n",
+ "\n",
+ "Here we set up a linear assignment problem (matching $n$ workers to $n$ jobs)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4f26dc4c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "n = 4\n",
+ "phi = np.ones(n)\n",
+ "psi = np.ones(n)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "756aa046",
+ "metadata": {},
+ "source": [
+ "We generate our cost matrix (the cost of training the $i$th worker for the $j$th job)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "437274b1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "c = np.random.uniform(size=(n, n))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b6a66222",
+ "metadata": {},
+ "source": [
+ "Finally, we solve our linear assignment problem as a special case of optimal transport."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "69565581",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ot.emd(phi, psi, c)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ef9fc68a",
+ "metadata": {},
+ "source": [
+ "### Python Spatial Analysis library\n",
+ "\n",
+ "Readers interested in computational optimal transport should also consider\n",
+ "PySAL, the [Python Spatial Analysis library](https://pysal.org/). See, for\n",
+ "example, https://pysal.org/spaghetti/notebooks/transportation-problem.html.\n",
+ "\n",
+ "### The General Flow Problem\n",
+ "\n",
+ "Here we solve a simple network flow problem as a linear program. We begin by\n",
+ "defining the node-edge incidence matrix."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1e7fc46b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "A = (\n",
+ "( 1, 1, 0, 0),\n",
+ "(-1, 0, 1, 0),\n",
+ "( 0, 0, -1, 1),\n",
+ "( 0, -1, 0, -1)\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "23fac21d",
+ "metadata": {},
+ "source": [
+ "Now we define exogenous supply and transport costs."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f8c5644d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "b = (10, 0, 0, -10)\n",
+ "c = (1, 4, 1, 1)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "3fe351ee",
+ "metadata": {},
+ "source": [
+ "Finally we solve as a linear program."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a257b5e1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "result = linprog(c, A_eq=A, b_eq=b, method='highs-ipm')\n",
+ "print(result.x)"
+ ]
+ }
+ ],
+ "metadata": {
+ "jupytext": {
+ "cell_metadata_filter": "-all"
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/ipynb/ch_opt_julia.ipynb b/ipynb/ch_opt_julia.ipynb
new file mode 100644
index 0000000..b88726d
--- /dev/null
+++ b/ipynb/ch_opt_julia.ipynb
@@ -0,0 +1,209 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "736e5076",
+ "metadata": {},
+ "source": [
+ "# Chapter 3 - Optimal Flows (Julia Code)\n",
+ "\n",
+ "## Bellman’s Method\n",
+ "\n",
+ "Here we demonstrate solving a shortest path problem using Bellman's method.\n",
+ "\n",
+ "Our first step is to set up the cost function, which we store as an array\n",
+ "called `c`. Note that we set `c[i, j] = Inf` when no edge exists from `i` to\n",
+ "`j`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c36f47c4",
+ "metadata": {
+ "tags": [
+ "remove-output"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "c = fill(Inf, (7, 7))\n",
+ "c[1, 2], c[1, 3], c[1, 4] = 1, 5, 3\n",
+ "c[2, 4], c[2, 5] = 9, 6\n",
+ "c[3, 6] = 2\n",
+ "c[4, 6] = 4\n",
+ "c[5, 7] = 4\n",
+ "c[6, 7] = 1\n",
+ "c[7, 7] = 0"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2b8fa3ad",
+ "metadata": {},
+ "source": [
+ "Next we define the Bellman operator."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c2bc51e6",
+ "metadata": {
+ "tags": [
+ "remove-output"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "function T(q)\n",
+ " Tq = similar(q)\n",
+ " n = length(q)\n",
+ " for x in 1:n\n",
+ " Tq[x] = minimum(c[x, :] + q[:])\n",
+ " end\n",
+ " return Tq\n",
+ "end"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9857bf8a",
+ "metadata": {},
+ "source": [
+ "Now we arbitrarily set $q \\equiv 0$, generate the sequence of iterates $T_q$,\n",
+ "$T^2_q$, $T^3_q$ and plot them. By $T^3_q$ has already converged on $q^∗$."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "757cadc8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "using PyPlot\n",
+ "export_figures = false\n",
+ "fig, ax = plt.subplots(figsize=(6, 4))\n",
+ "\n",
+ "n = 7\n",
+ "q = zeros(n)\n",
+ "ax.plot(1:n, q)\n",
+ "ax.set_xlabel(\"cost-to-go\")\n",
+ "ax.set_ylabel(\"nodes\")\n",
+ "\n",
+ "for i in 1:3\n",
+ " new_q = T(q)\n",
+ " ax.plot(1:n, new_q, \"-o\", alpha=0.7, label=\"iterate $i\")\n",
+ " q = new_q\n",
+ "end\n",
+ "\n",
+ "ax.legend()\n",
+ "if export_figures == true\n",
+ " plt.savefig(\"figures/shortest_path_iter_1.pdf\")\n",
+ "end"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d31abe43",
+ "metadata": {},
+ "source": [
+ "## Linear programming\n",
+ "\n",
+ "When solving linear programs, one option is to use a domain specific modeling\n",
+ "language to set out the objective and constraints in the optimization problem.\n",
+ "Here we demonstrate the Julia package `JuMP`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "67361f35",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "using JuMP\n",
+ "using GLPK"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9bd4aef4",
+ "metadata": {},
+ "source": [
+ "We create our model object and select our solver."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "856ef0ee",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "m = Model()\n",
+ "set_optimizer(m, GLPK.Optimizer)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "fe817ce7",
+ "metadata": {},
+ "source": [
+ "Now we add variables, constraints and an objective to our model."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "07228cb0",
+ "metadata": {
+ "tags": [
+ "remove-output"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "@variable(m, q1 >= 0)\n",
+ "@variable(m, q2 >= 0)\n",
+ "@constraint(m, 2q1 + 5q2 <= 30)\n",
+ "@constraint(m, 4q1 + 2q2 <= 20)\n",
+ "@objective(m, Max, 3q1 + 4q2)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "842781b0",
+ "metadata": {},
+ "source": [
+ "Finally we solve our linear program."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "83c0d928",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "optimize!(m)\n",
+ "\n",
+ "println(value.(q1)) \n",
+ "println(value.(q2))"
+ ]
+ }
+ ],
+ "metadata": {
+ "jupytext": {
+ "cell_metadata_filter": "-all"
+ },
+ "kernelspec": {
+ "display_name": "Julia",
+ "language": "Julia",
+ "name": "julia-1.9"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/ipynb/ch_production.ipynb b/ipynb/ch_production.ipynb
new file mode 100644
index 0000000..8f51490
--- /dev/null
+++ b/ipynb/ch_production.ipynb
@@ -0,0 +1,737 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "7e970772",
+ "metadata": {},
+ "source": [
+ "# Chapter 2 - Production (Python Code)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "fd1d49bf",
+ "metadata": {
+ "tags": [
+ "hide-output"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "! pip install --upgrade quantecon_book_networks"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "5bb0e40f",
+ "metadata": {},
+ "source": [
+ "We begin with some imports."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9b06f1ff",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import quantecon as qe\n",
+ "import quantecon_book_networks\n",
+ "import quantecon_book_networks.input_output as qbn_io\n",
+ "import quantecon_book_networks.plotting as qbn_plt\n",
+ "import quantecon_book_networks.data as qbn_data\n",
+ "ch2_data = qbn_data.production()\n",
+ "default_figsize = (6, 4)\n",
+ "export_figures = False"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "67d26617",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import networkx as nx\n",
+ "import matplotlib.pyplot as plt\n",
+ "import matplotlib.cm as cm\n",
+ "import matplotlib.colors as plc\n",
+ "from matplotlib import cm\n",
+ "quantecon_book_networks.config(\"matplotlib\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "aa98eb15",
+ "metadata": {},
+ "source": [
+ "## Multisector Models\n",
+ "\n",
+ "We start by loading a graph of linkages between 15 US sectors in 2021. \n",
+ "\n",
+ "Our graph comes as a list of sector codes, an adjacency matrix of sales between\n",
+ "the sectors, and a list the total sales of each sector. \n",
+ "\n",
+ "In particular, `Z[i,j]` is the sales from industry `i` to industry `j`, and `X[i]` is the the total sales\n",
+ "of each sector `i`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "bac5b180",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "codes = ch2_data[\"us_sectors_15\"][\"codes\"]\n",
+ "Z = ch2_data[\"us_sectors_15\"][\"adjacency_matrix\"]\n",
+ "X = ch2_data[\"us_sectors_15\"][\"total_industry_sales\"]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c3a07fd9",
+ "metadata": {},
+ "source": [
+ "Now we define a function to build coefficient matrices. \n",
+ "\n",
+ "Two coefficient matrices are returned. The backward linkage case, where sales\n",
+ "between sector `i` and `j` are given as a fraction of total sales of sector\n",
+ "`j`. The forward linkage case, where sales between sector `i` and `j` are\n",
+ "given as a fraction of total sales of sector `i`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "fa4c9d05",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def build_coefficient_matrices(Z, X):\n",
+ " \"\"\"\n",
+ " Build coefficient matrices A and F from Z and X via \n",
+ " \n",
+ " A[i, j] = Z[i, j] / X[j] \n",
+ " F[i, j] = Z[i, j] / X[i]\n",
+ " \n",
+ " \"\"\"\n",
+ " A, F = np.empty_like(Z), np.empty_like(Z)\n",
+ " n = A.shape[0]\n",
+ " for i in range(n):\n",
+ " for j in range(n):\n",
+ " A[i, j] = Z[i, j] / X[j]\n",
+ " F[i, j] = Z[i, j] / X[i]\n",
+ "\n",
+ " return A, F\n",
+ "\n",
+ "A, F = build_coefficient_matrices(Z, X)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "66ecec7b",
+ "metadata": {},
+ "source": [
+ "### Backward linkages for 15 US sectors in 2021\n",
+ "\n",
+ "Here we calculate the hub-based eigenvector centrality of our backward linkage coefficient matrix."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f1e25af6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "centrality = qbn_io.eigenvector_centrality(A)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "99a5def5",
+ "metadata": {},
+ "source": [
+ "Now we use the `quantecon_book_networks` package to produce our plot."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5c9a1c8e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots(figsize=(8, 10))\n",
+ "plt.axis(\"off\")\n",
+ "color_list = qbn_io.colorise_weights(centrality, beta=False)\n",
+ "# Remove self-loops\n",
+ "A1 = A.copy()\n",
+ "for i in range(A1.shape[0]):\n",
+ " A1[i][i] = 0\n",
+ "qbn_plt.plot_graph(A1, X, ax, codes, \n",
+ " layout_type='spring',\n",
+ " layout_seed=5432167,\n",
+ " tol=0.0,\n",
+ " node_color_list=color_list) \n",
+ "\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/input_output_analysis_15.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "30667357",
+ "metadata": {},
+ "source": [
+ "### Eigenvector centrality of across US industrial sectors\n",
+ "\n",
+ "Now we plot a bar chart of hub-based eigenvector centrality by sector."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "60e3a77c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots(figsize=default_figsize)\n",
+ "ax.bar(codes, centrality, color=color_list, alpha=0.6)\n",
+ "ax.set_ylabel(\"eigenvector centrality\", fontsize=12)\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/input_output_analysis_15_ec.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f0556d61",
+ "metadata": {},
+ "source": [
+ "### Output multipliers across 15 US industrial sectors\n",
+ "\n",
+ "Output multipliers are equal to the authority-based Katz centrality measure of\n",
+ "the backward linkage coefficient matrix. \n",
+ "\n",
+ "Here we calculate authority-based\n",
+ "Katz centrality using the `quantecon_book_networks` package."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f1358ad2",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "omult = qbn_io.katz_centrality(A, authority=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d45c83f2",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots(figsize=default_figsize)\n",
+ "omult_color_list = qbn_io.colorise_weights(omult,beta=False)\n",
+ "ax.bar(codes, omult, color=omult_color_list, alpha=0.6)\n",
+ "ax.set_ylabel(\"Output multipliers\", fontsize=12)\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/input_output_analysis_15_omult.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d07a87b3",
+ "metadata": {},
+ "source": [
+ "### Forward linkages and upstreamness over US industrial sectors\n",
+ "\n",
+ "Upstreamness is the hub-based Katz centrality of the forward linkage\n",
+ "coefficient matrix. \n",
+ "\n",
+ "Here we calculate hub-based Katz centrality."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2df63367",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "upstreamness = qbn_io.katz_centrality(F)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "cff33a99",
+ "metadata": {},
+ "source": [
+ "Now we plot the network."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8ac45508",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots(figsize=(8, 10))\n",
+ "plt.axis(\"off\")\n",
+ "upstreamness_color_list = qbn_io.colorise_weights(upstreamness,beta=False)\n",
+ "# Remove self-loops\n",
+ "for i in range(F.shape[0]):\n",
+ " F[i][i] = 0\n",
+ "qbn_plt.plot_graph(F, X, ax, codes, \n",
+ " layout_type='spring', # alternative layouts: spring, circular, random, spiral\n",
+ " layout_seed=5432167,\n",
+ " tol=0.0,\n",
+ " node_color_list=upstreamness_color_list) \n",
+ "\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/input_output_analysis_15_fwd.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e9cbc94b",
+ "metadata": {},
+ "source": [
+ "### Relative upstreamness of US industrial sectors\n",
+ "\n",
+ "Here we produce a barplot of upstreamness."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "abc67147",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots(figsize=default_figsize)\n",
+ "ax.bar(codes, upstreamness, color=upstreamness_color_list, alpha=0.6)\n",
+ "ax.set_ylabel(\"upstreamness\", fontsize=12)\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/input_output_analysis_15_up.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f9223fd5",
+ "metadata": {},
+ "source": [
+ "### Hub-based Katz centrality of across 15 US industrial sectors\n",
+ "\n",
+ "Next we plot the hub-based Katz centrality of the backward linkage coefficient matrix."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "19bce52b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "kcentral = qbn_io.katz_centrality(A)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d7563381",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots(figsize=default_figsize)\n",
+ "kcentral_color_list = qbn_io.colorise_weights(kcentral,beta=False)\n",
+ "ax.bar(codes, kcentral, color=kcentral_color_list, alpha=0.6)\n",
+ "ax.set_ylabel(\"Katz hub centrality\", fontsize=12)\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/input_output_analysis_15_katz.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "bf61ce72",
+ "metadata": {},
+ "source": [
+ "### The Leontief inverse 𝐿 (hot colors are larger values)\n",
+ "\n",
+ "We construct the Leontief inverse matrix from 15 sector adjacency matrix."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "87c25e09",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "I = np.identity(len(A))\n",
+ "L = np.linalg.inv(I - A)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a37cada4",
+ "metadata": {},
+ "source": [
+ "Now we produce the plot."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d590c0e1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots(figsize=(6.5, 5.5))\n",
+ "\n",
+ "ticks = range(len(L))\n",
+ "\n",
+ "levels = np.sqrt(np.linspace(0, 0.75, 100))\n",
+ "\n",
+ "co = ax.contourf(ticks, \n",
+ " ticks,\n",
+ " L,\n",
+ " levels,\n",
+ " alpha=0.85, cmap=cm.plasma)\n",
+ "\n",
+ "ax.set_xlabel('sector $j$', fontsize=12)\n",
+ "ax.set_ylabel('sector $i$', fontsize=12)\n",
+ "ax.set_yticks(ticks)\n",
+ "ax.set_yticklabels(codes)\n",
+ "ax.set_xticks(ticks)\n",
+ "ax.set_xticklabels(codes)\n",
+ "\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/input_output_analysis_15_leo.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "1a9a7d40",
+ "metadata": {},
+ "source": [
+ "### Propagation of demand shocks via backward linkages\n",
+ "\n",
+ "We begin by generating a demand shock vector $d$."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0861b2f5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "N = len(A)\n",
+ "np.random.seed(1234)\n",
+ "d = np.random.rand(N) \n",
+ "d[6] = 1 # positive shock to agriculture"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7fa2840f",
+ "metadata": {},
+ "source": [
+ "Now we simulate the demand shock propagating through the economy."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9089f879",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "sim_length = 11\n",
+ "x = d\n",
+ "x_vecs = []\n",
+ "for i in range(sim_length):\n",
+ " if i % 2 ==0:\n",
+ " x_vecs.append(x)\n",
+ " x = A @ x"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4a51906c",
+ "metadata": {},
+ "source": [
+ "Finally, we plot the shock propagating through the economy."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9d777b13",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, axes = plt.subplots(3, 2, figsize=(8, 10))\n",
+ "axes = axes.flatten()\n",
+ "\n",
+ "for ax, x_vec, i in zip(axes, x_vecs, range(sim_length)):\n",
+ " if i % 2 != 0:\n",
+ " pass\n",
+ " ax.set_title(f\"round {i*2}\")\n",
+ " x_vec_cols = qbn_io.colorise_weights(x_vec,beta=False)\n",
+ " # remove self-loops\n",
+ " for i in range(len(A)):\n",
+ " A[i][i] = 0\n",
+ " qbn_plt.plot_graph(A, X, ax, codes,\n",
+ " layout_type='spring',\n",
+ " layout_seed=342156,\n",
+ " node_color_list=x_vec_cols,\n",
+ " node_size_multiple=0.00028,\n",
+ " edge_size_multiple=0.8)\n",
+ "\n",
+ "plt.tight_layout()\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/input_output_analysis_15_shocks.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "6ccd313f",
+ "metadata": {},
+ "source": [
+ "### Network for 71 US sectors in 2021\n",
+ "\n",
+ "We start by loading a graph of linkages between 71 US sectors in 2021."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "24d32882",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "codes_71 = ch2_data['us_sectors_71']['codes']\n",
+ "A_71 = ch2_data['us_sectors_71']['adjacency_matrix']\n",
+ "X_71 = ch2_data['us_sectors_71']['total_industry_sales']"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e4efe0f9",
+ "metadata": {},
+ "source": [
+ "Next we calculate our graph's properties. \n",
+ "\n",
+ "We use hub-based eigenvector centrality as our centrality measure for this plot."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c468f74b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "centrality_71 = qbn_io.eigenvector_centrality(A_71)\n",
+ "color_list_71 = qbn_io.colorise_weights(centrality_71,beta=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "37c499e0",
+ "metadata": {},
+ "source": [
+ "Finally we produce the plot."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3d920aca",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots(figsize=(10, 12))\n",
+ "plt.axis(\"off\")\n",
+ "# Remove self-loops\n",
+ "for i in range(A_71.shape[0]):\n",
+ " A_71[i][i] = 0\n",
+ "qbn_plt.plot_graph(A_71, X_71, ax, codes_71,\n",
+ " node_size_multiple=0.0005,\n",
+ " edge_size_multiple=4.0,\n",
+ " layout_type='spring',\n",
+ " layout_seed=5432167,\n",
+ " tol=0.01,\n",
+ " node_color_list=color_list_71)\n",
+ "\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/input_output_analysis_71.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "419a96fa",
+ "metadata": {},
+ "source": [
+ "### Network for 114 Australian industry sectors in 2020\n",
+ "\n",
+ "Next we load a graph of linkages between 114 Australian sectors in 2020."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "52309e87",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "codes_114 = ch2_data['au_sectors_114']['codes']\n",
+ "A_114 = ch2_data['au_sectors_114']['adjacency_matrix']\n",
+ "X_114 = ch2_data['au_sectors_114']['total_industry_sales']"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b5e46835",
+ "metadata": {},
+ "source": [
+ "Next we calculate our graph's properties. \n",
+ "\n",
+ "We use hub-based eigenvector centrality as our centrality measure for this plot."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9555ffa8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "centrality_114 = qbn_io.eigenvector_centrality(A_114)\n",
+ "color_list_114 = qbn_io.colorise_weights(centrality_114,beta=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "1b6801cd",
+ "metadata": {},
+ "source": [
+ "Finally we produce the plot."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1af80023",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, ax = plt.subplots(figsize=(11, 13.2))\n",
+ "plt.axis(\"off\")\n",
+ "# Remove self-loops\n",
+ "for i in range(A_114.shape[0]):\n",
+ " A_114[i][i] = 0\n",
+ "qbn_plt.plot_graph(A_114, X_114, ax, codes_114,\n",
+ " node_size_multiple=0.008,\n",
+ " edge_size_multiple=5.0,\n",
+ " layout_type='spring',\n",
+ " layout_seed=5432167,\n",
+ " tol=0.03,\n",
+ " node_color_list=color_list_114)\n",
+ "\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/input_output_analysis_aus_114.pdf\")\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a3095a50",
+ "metadata": {},
+ "source": [
+ "### GDP growth rates and std. deviations (in parentheses) for 8 countries\n",
+ "\n",
+ "Here we load a `pandas` DataFrame of GDP growth rates."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e7aa80e5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "gdp_df = ch2_data['gdp_df']\n",
+ "gdp_df.head()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c82df9d4",
+ "metadata": {},
+ "source": [
+ "Now we plot the growth rates and calculate their standard deviations."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "394a86ea",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fig, axes = plt.subplots(5, 2, figsize=(8, 9))\n",
+ "axes = axes.flatten()\n",
+ "\n",
+ "countries = gdp_df.columns\n",
+ "t = np.asarray(gdp_df.index.astype(float))\n",
+ "series = [np.asarray(gdp_df[country].astype(float)) for country in countries]\n",
+ "\n",
+ "\n",
+ "for ax, country, gdp_data in zip(axes, countries, series):\n",
+ " \n",
+ " ax.plot(t, gdp_data)\n",
+ " ax.set_title(f'{country} (${gdp_data.std():1.2f}$\\%)' )\n",
+ " ax.set_ylabel('\\%')\n",
+ " ax.set_ylim((-12, 14))\n",
+ "\n",
+ "plt.tight_layout()\n",
+ "if export_figures:\n",
+ " plt.savefig(\"figures/gdp_growth.pdf\")\n",
+ "plt.show()"
+ ]
+ }
+ ],
+ "metadata": {
+ "jupytext": {
+ "cell_metadata_filter": "-all"
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/code_book/figures/benhabib_mobility_dists.pdf b/ipynb/figures/benhabib_mobility_dists.pdf
similarity index 100%
rename from code_book/figures/benhabib_mobility_dists.pdf
rename to ipynb/figures/benhabib_mobility_dists.pdf
diff --git a/ipynb/pkg_funcs.ipynb b/ipynb/pkg_funcs.ipynb
new file mode 100644
index 0000000..886138c
--- /dev/null
+++ b/ipynb/pkg_funcs.ipynb
@@ -0,0 +1,582 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "83f7d689",
+ "metadata": {},
+ "source": [
+ "# quantecon_book_networks\n",
+ "\n",
+ "## input_output\n",
+ "\n",
+ "### node_total_exports"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6e1c0e01",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def node_total_exports(G):\n",
+ " node_exports = []\n",
+ " for node1 in G.nodes():\n",
+ " total_export = 0\n",
+ " for node2 in G[node1]:\n",
+ " total_export += G[node1][node2]['weight']\n",
+ " node_exports.append(total_export)\n",
+ " return node_exports"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d91edba5",
+ "metadata": {},
+ "source": [
+ "### node_total_imports"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d7942abf",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def node_total_imports(G):\n",
+ " node_imports = []\n",
+ " for node1 in G.nodes():\n",
+ " total_import = 0\n",
+ " for node2 in G[node1]:\n",
+ " total_import += G[node2][node1]['weight']\n",
+ " node_imports.append(total_import)\n",
+ " return node_imports"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "3dd8089f",
+ "metadata": {},
+ "source": [
+ "### edge_weights"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "fdf563f5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def edge_weights(G):\n",
+ " edge_weights = [G[u][v]['weight'] for u,v in G.edges()]\n",
+ " return edge_weights"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "055dcff7",
+ "metadata": {},
+ "source": [
+ "### normalise_weights"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "54211ff8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def normalise_weights(weights,scalar=1):\n",
+ " max_value = np.max(weights)\n",
+ " return [scalar * (weight / max_value) for weight in weights]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "bfaedc3f",
+ "metadata": {},
+ "source": [
+ "### to_zero_one"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "ed795d06",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def to_zero_one(x):\n",
+ " \"Map vector x to the zero one interval.\"\n",
+ " x = np.array(x)\n",
+ " x_min, x_max = x.min(), x.max()\n",
+ " return (x - x_min)/(x_max - x_min)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "6ce4a5bd",
+ "metadata": {},
+ "source": [
+ "### to_zero_one_beta"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f2d51900",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def to_zero_one_beta(x, \n",
+ " qrange=[0.25, 0.75], \n",
+ " beta_para=[0.5, 0.5]):\n",
+ " \n",
+ " \"\"\"\n",
+ " Nonlinearly map vector x to the zero one interval with beta distribution.\n",
+ " https://en.wikipedia.org/wiki/Beta_distribution\n",
+ " \"\"\"\n",
+ " x = np.array(x)\n",
+ " x_min, x_max = x.min(), x.max()\n",
+ " if beta_para != None:\n",
+ " a, b = beta_para\n",
+ " return beta.cdf((x - x_min) /(x_max - x_min), a, b)\n",
+ " else:\n",
+ " q1, q2 = qrange\n",
+ " return (x - x_min) * (q2 - q1) /(x_max - x_min) + q1"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "677a3b25",
+ "metadata": {},
+ "source": [
+ "### colorise_weights"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "650554ba",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import matplotlib.cm as cm\n",
+ "def colorise_weights(weights,beta=True,color_palette=cm.plasma):\n",
+ " if beta:\n",
+ " cp = color_palette(to_zero_one_beta(weights))\n",
+ " else:\n",
+ " cp = color_palette(to_zero_one(weights))\n",
+ " return cp "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "13e770e7",
+ "metadata": {},
+ "source": [
+ "### spec_rad"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8d31e595",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def spec_rad(M):\n",
+ " \"\"\"\n",
+ " Compute the spectral radius of M.\n",
+ " \"\"\"\n",
+ " return np.max(np.abs(np.linalg.eigvals(M)))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8b997a2b",
+ "metadata": {},
+ "source": [
+ "### adjacency_matrix_to_graph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1b8660e3",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def adjacency_matrix_to_graph(A, \n",
+ " codes,\n",
+ " tol=0.0): # clip entries below tol\n",
+ " \"\"\"\n",
+ " Build a networkx graph object given an adjacency matrix\n",
+ " \"\"\"\n",
+ " G = nx.DiGraph()\n",
+ " N = len(A)\n",
+ "\n",
+ " # Add nodes\n",
+ " for i, code in enumerate(codes):\n",
+ " G.add_node(code, name=code)\n",
+ "\n",
+ " # Add the edges\n",
+ " for i in range(N):\n",
+ " for j in range(N):\n",
+ " a = A[i, j]\n",
+ " if a > tol:\n",
+ " G.add_edge(codes[i], codes[j], weight=a)\n",
+ "\n",
+ " return G"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e673657f",
+ "metadata": {},
+ "source": [
+ "### eigenvector_centrality"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "90190208",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def eigenvector_centrality(A, k=40, authority=False):\n",
+ " \"\"\"\n",
+ " Computes the dominant eigenvector of A. Assumes A is \n",
+ " primitive and uses the power method. \n",
+ " \"\"\"\n",
+ " A_temp = A.T if authority else A\n",
+ " n = len(A_temp)\n",
+ " r = spec_rad(A_temp)\n",
+ " e = r**(-k) * (np.linalg.matrix_power(A_temp, k) @ np.ones(n))\n",
+ " return e / np.sum(e)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "78f99a4d",
+ "metadata": {},
+ "source": [
+ "### katz_centrality"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "842342f6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def katz_centrality(A, b=1, authority=False):\n",
+ " \"\"\"\n",
+ " Computes the Katz centrality of A, defined as the x solving\n",
+ "\n",
+ " x = 1 + b A x (1 = vector of ones)\n",
+ "\n",
+ " Assumes that A is square.\n",
+ "\n",
+ " If authority=True, then A is replaced by its transpose.\n",
+ " \"\"\"\n",
+ " n = len(A)\n",
+ " I = np.identity(n)\n",
+ " C = I - b * A.T if authority else I - b * A\n",
+ " return np.linalg.solve(C, np.ones(n))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "76431864",
+ "metadata": {},
+ "source": [
+ "### build_unweighted_matrix"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0e638e6a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def build_unweighted_matrix(Z, tol=1e-5):\n",
+ " \"\"\"\n",
+ " return a unweighted adjacency matrix\n",
+ " \"\"\"\n",
+ " return 1*(Z>tol)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "db7270d9",
+ "metadata": {},
+ "source": [
+ "### erdos_renyi_graph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8a74e9db",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def erdos_renyi_graph(n=100, p=0.5, seed=1234):\n",
+ " \"Returns an Erdős-Rényi random graph.\"\n",
+ " \n",
+ " np.random.seed(seed)\n",
+ " edges = itertools.combinations(range(n), 2)\n",
+ " G = nx.Graph()\n",
+ " \n",
+ " for e in edges:\n",
+ " if np.random.rand() < p:\n",
+ " G.add_edge(*e)\n",
+ " return G"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "02443320",
+ "metadata": {},
+ "source": [
+ "### build_coefficient_matrices"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9316d715",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def build_coefficient_matrices(Z, X):\n",
+ " \"\"\"\n",
+ " Build coefficient matrices A and F from Z and X via \n",
+ " \n",
+ " A[i, j] = Z[i, j] / X[j] \n",
+ " F[i, j] = Z[i, j] / X[i]\n",
+ " \n",
+ " \"\"\"\n",
+ " A, F = np.empty_like(Z), np.empty_like(Z)\n",
+ " n = A.shape[0]\n",
+ " for i in range(n):\n",
+ " for j in range(n):\n",
+ " A[i, j] = Z[i, j] / X[j]\n",
+ " F[i, j] = Z[i, j] / X[i]\n",
+ "\n",
+ " return A, F"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "54010bf3",
+ "metadata": {},
+ "source": [
+ "## plotting\n",
+ "\n",
+ "### plot_graph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4439b087",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def plot_graph(A, \n",
+ " X,\n",
+ " ax,\n",
+ " codes,\n",
+ " node_color_list=None,\n",
+ " node_size_multiple=0.0005, \n",
+ " edge_size_multiple=14,\n",
+ " layout_type='circular',\n",
+ " layout_seed=1234,\n",
+ " tol=0.03): # clip entries below tol\n",
+ "\n",
+ " G = nx.DiGraph()\n",
+ " N = len(A)\n",
+ "\n",
+ " # Add nodes, with weights by sales of the sector\n",
+ " for i, w in enumerate(X):\n",
+ " G.add_node(codes[i], weight=w, name=codes[i])\n",
+ "\n",
+ " node_sizes = X * node_size_multiple\n",
+ "\n",
+ " # Position the nodes\n",
+ " if layout_type == 'circular':\n",
+ " node_pos_dict = nx.circular_layout(G)\n",
+ " elif layout_type == 'spring':\n",
+ " node_pos_dict = nx.spring_layout(G, seed=layout_seed)\n",
+ " elif layout_type == 'random':\n",
+ " node_pos_dict = nx.random_layout(G, seed=layout_seed)\n",
+ " elif layout_type == 'spiral':\n",
+ " node_pos_dict = nx.spiral_layout(G)\n",
+ "\n",
+ " # Add the edges, along with their colors and widths\n",
+ " edge_colors = []\n",
+ " edge_widths = []\n",
+ " for i in range(N):\n",
+ " for j in range(N):\n",
+ " a = A[i, j]\n",
+ " if a > tol:\n",
+ " G.add_edge(codes[i], codes[j])\n",
+ " edge_colors.append(node_color_list[i])\n",
+ " width = a * edge_size_multiple\n",
+ " edge_widths.append(width)\n",
+ " \n",
+ " # Plot the networks\n",
+ " nx.draw_networkx_nodes(G, \n",
+ " node_pos_dict, \n",
+ " node_color=node_color_list, \n",
+ " node_size=node_sizes, \n",
+ " edgecolors='grey', \n",
+ " linewidths=2, \n",
+ " alpha=0.6, \n",
+ " ax=ax)\n",
+ "\n",
+ " nx.draw_networkx_labels(G, \n",
+ " node_pos_dict, \n",
+ " font_size=10, \n",
+ " ax=ax)\n",
+ "\n",
+ " nx.draw_networkx_edges(G, \n",
+ " node_pos_dict, \n",
+ " edge_color=edge_colors, \n",
+ " width=edge_widths, \n",
+ " arrows=True, \n",
+ " arrowsize=20, \n",
+ " alpha=0.6, \n",
+ " ax=ax, \n",
+ " arrowstyle='->', \n",
+ " node_size=node_sizes, \n",
+ " connectionstyle='arc3,rad=0.15')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f879578e",
+ "metadata": {},
+ "source": [
+ "### plot_matrices"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2e619a96",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def plot_matrices(matrix,\n",
+ " codes,\n",
+ " ax,\n",
+ " font_size=12,\n",
+ " alpha=0.6, \n",
+ " colormap=cm.viridis, \n",
+ " color45d=None, \n",
+ " xlabel='sector $j$', \n",
+ " ylabel='sector $i$'):\n",
+ " \n",
+ " ticks = range(len(matrix))\n",
+ "\n",
+ " levels = np.sqrt(np.linspace(0, 0.75, 100))\n",
+ " \n",
+ " \n",
+ " if color45d != None:\n",
+ " co = ax.contourf(ticks, \n",
+ " ticks,\n",
+ " matrix,\n",
+ "# levels,\n",
+ " alpha=alpha, cmap=colormap)\n",
+ " ax.plot(ticks, ticks, color=color45d)\n",
+ " else:\n",
+ " co = ax.contourf(ticks, \n",
+ " ticks,\n",
+ " matrix,\n",
+ " levels,\n",
+ " alpha=alpha, cmap=colormap)\n",
+ "\n",
+ " #plt.colorbar(co)\n",
+ "\n",
+ " ax.set_xlabel(xlabel, fontsize=font_size)\n",
+ " ax.set_ylabel(ylabel, fontsize=font_size)\n",
+ " ax.set_yticks(ticks)\n",
+ " ax.set_yticklabels(codes)\n",
+ " ax.set_xticks(ticks)\n",
+ " ax.set_xticklabels(codes)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "88631141",
+ "metadata": {},
+ "source": [
+ "### unit_simplex"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2f4de6b0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def unit_simplex(angle):\n",
+ " \n",
+ " fig = plt.figure(figsize=(10, 8))\n",
+ " ax = fig.add_subplot(111, projection='3d')\n",
+ "\n",
+ " vtx = [[0, 0, 1],\n",
+ " [0, 1, 0], \n",
+ " [1, 0, 0]]\n",
+ " \n",
+ " tri = Poly3DCollection([vtx], color='darkblue', alpha=0.3)\n",
+ " tri.set_facecolor([0.5, 0.5, 1])\n",
+ " ax.add_collection3d(tri)\n",
+ "\n",
+ " ax.set(xlim=(0, 1), ylim=(0, 1), zlim=(0, 1), \n",
+ " xticks=(1,), yticks=(1,), zticks=(1,))\n",
+ "\n",
+ " ax.set_xticklabels(['$(1, 0, 0)$'], fontsize=16)\n",
+ " ax.set_yticklabels([f'$(0, 1, 0)$'], fontsize=16)\n",
+ " ax.set_zticklabels([f'$(0, 0, 1)$'], fontsize=16)\n",
+ "\n",
+ " ax.xaxis.majorTicks[0].set_pad(15)\n",
+ " ax.yaxis.majorTicks[0].set_pad(15)\n",
+ " ax.zaxis.majorTicks[0].set_pad(35)\n",
+ "\n",
+ " ax.view_init(30, angle)\n",
+ "\n",
+ " # Move axis to origin\n",
+ " ax.xaxis._axinfo['juggled'] = (0, 0, 0)\n",
+ " ax.yaxis._axinfo['juggled'] = (1, 1, 1)\n",
+ " ax.zaxis._axinfo['juggled'] = (2, 2, 0)\n",
+ " \n",
+ " ax.grid(False)\n",
+ " \n",
+ " return ax"
+ ]
+ }
+ ],
+ "metadata": {
+ "jupytext": {
+ "cell_metadata_filter": "-all"
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/frontpage/LICENSE.txt b/website/LICENSE.txt
similarity index 100%
rename from frontpage/LICENSE.txt
rename to website/LICENSE.txt
diff --git a/frontpage/README.txt b/website/README.txt
similarity index 100%
rename from frontpage/README.txt
rename to website/README.txt
diff --git a/frontpage/assets/css/fontawesome-all.min.css b/website/assets/css/fontawesome-all.min.css
similarity index 100%
rename from frontpage/assets/css/fontawesome-all.min.css
rename to website/assets/css/fontawesome-all.min.css
diff --git a/frontpage/assets/css/main.css b/website/assets/css/main.css
similarity index 99%
rename from frontpage/assets/css/main.css
rename to website/assets/css/main.css
index 1ddf66f..c8e02a0 100644
--- a/frontpage/assets/css/main.css
+++ b/website/assets/css/main.css
@@ -3879,4 +3879,6 @@ input, select, textarea {
padding: 4rem;
}
- }
\ No newline at end of file
+ }
+
+
diff --git a/frontpage/assets/js/breakpoints.min.js b/website/assets/js/breakpoints.min.js
similarity index 100%
rename from frontpage/assets/js/breakpoints.min.js
rename to website/assets/js/breakpoints.min.js
diff --git a/frontpage/assets/js/browser.min.js b/website/assets/js/browser.min.js
similarity index 100%
rename from frontpage/assets/js/browser.min.js
rename to website/assets/js/browser.min.js
diff --git a/frontpage/assets/js/jquery.min.js b/website/assets/js/jquery.min.js
similarity index 100%
rename from frontpage/assets/js/jquery.min.js
rename to website/assets/js/jquery.min.js
diff --git a/frontpage/assets/js/jquery.scrolly.min.js b/website/assets/js/jquery.scrolly.min.js
similarity index 100%
rename from frontpage/assets/js/jquery.scrolly.min.js
rename to website/assets/js/jquery.scrolly.min.js
diff --git a/frontpage/assets/js/main.js b/website/assets/js/main.js
similarity index 100%
rename from frontpage/assets/js/main.js
rename to website/assets/js/main.js
diff --git a/frontpage/assets/js/util.js b/website/assets/js/util.js
similarity index 100%
rename from frontpage/assets/js/util.js
rename to website/assets/js/util.js
diff --git a/frontpage/assets/sass/base/_page.scss b/website/assets/sass/base/_page.scss
similarity index 100%
rename from frontpage/assets/sass/base/_page.scss
rename to website/assets/sass/base/_page.scss
diff --git a/frontpage/assets/sass/base/_reset.scss b/website/assets/sass/base/_reset.scss
similarity index 100%
rename from frontpage/assets/sass/base/_reset.scss
rename to website/assets/sass/base/_reset.scss
diff --git a/frontpage/assets/sass/base/_typography.scss b/website/assets/sass/base/_typography.scss
similarity index 100%
rename from frontpage/assets/sass/base/_typography.scss
rename to website/assets/sass/base/_typography.scss
diff --git a/frontpage/assets/sass/components/_actions.scss b/website/assets/sass/components/_actions.scss
similarity index 100%
rename from frontpage/assets/sass/components/_actions.scss
rename to website/assets/sass/components/_actions.scss
diff --git a/frontpage/assets/sass/components/_arrow.scss b/website/assets/sass/components/_arrow.scss
similarity index 100%
rename from frontpage/assets/sass/components/_arrow.scss
rename to website/assets/sass/components/_arrow.scss
diff --git a/frontpage/assets/sass/components/_box.scss b/website/assets/sass/components/_box.scss
similarity index 100%
rename from frontpage/assets/sass/components/_box.scss
rename to website/assets/sass/components/_box.scss
diff --git a/frontpage/assets/sass/components/_button.scss b/website/assets/sass/components/_button.scss
similarity index 100%
rename from frontpage/assets/sass/components/_button.scss
rename to website/assets/sass/components/_button.scss
diff --git a/frontpage/assets/sass/components/_feature-icons.scss b/website/assets/sass/components/_feature-icons.scss
similarity index 100%
rename from frontpage/assets/sass/components/_feature-icons.scss
rename to website/assets/sass/components/_feature-icons.scss
diff --git a/frontpage/assets/sass/components/_form.scss b/website/assets/sass/components/_form.scss
similarity index 100%
rename from frontpage/assets/sass/components/_form.scss
rename to website/assets/sass/components/_form.scss
diff --git a/frontpage/assets/sass/components/_gallery.scss b/website/assets/sass/components/_gallery.scss
similarity index 100%
rename from frontpage/assets/sass/components/_gallery.scss
rename to website/assets/sass/components/_gallery.scss
diff --git a/frontpage/assets/sass/components/_icon.scss b/website/assets/sass/components/_icon.scss
similarity index 100%
rename from frontpage/assets/sass/components/_icon.scss
rename to website/assets/sass/components/_icon.scss
diff --git a/frontpage/assets/sass/components/_icons.scss b/website/assets/sass/components/_icons.scss
similarity index 100%
rename from frontpage/assets/sass/components/_icons.scss
rename to website/assets/sass/components/_icons.scss
diff --git a/frontpage/assets/sass/components/_image.scss b/website/assets/sass/components/_image.scss
similarity index 100%
rename from frontpage/assets/sass/components/_image.scss
rename to website/assets/sass/components/_image.scss
diff --git a/frontpage/assets/sass/components/_list.scss b/website/assets/sass/components/_list.scss
similarity index 100%
rename from frontpage/assets/sass/components/_list.scss
rename to website/assets/sass/components/_list.scss
diff --git a/frontpage/assets/sass/components/_row.scss b/website/assets/sass/components/_row.scss
similarity index 100%
rename from frontpage/assets/sass/components/_row.scss
rename to website/assets/sass/components/_row.scss
diff --git a/frontpage/assets/sass/components/_table.scss b/website/assets/sass/components/_table.scss
similarity index 100%
rename from frontpage/assets/sass/components/_table.scss
rename to website/assets/sass/components/_table.scss
diff --git a/frontpage/assets/sass/layout/_wrapper.scss b/website/assets/sass/layout/_wrapper.scss
similarity index 100%
rename from frontpage/assets/sass/layout/_wrapper.scss
rename to website/assets/sass/layout/_wrapper.scss
diff --git a/frontpage/assets/sass/libs/_breakpoints.scss b/website/assets/sass/libs/_breakpoints.scss
similarity index 100%
rename from frontpage/assets/sass/libs/_breakpoints.scss
rename to website/assets/sass/libs/_breakpoints.scss
diff --git a/frontpage/assets/sass/libs/_functions.scss b/website/assets/sass/libs/_functions.scss
similarity index 100%
rename from frontpage/assets/sass/libs/_functions.scss
rename to website/assets/sass/libs/_functions.scss
diff --git a/frontpage/assets/sass/libs/_html-grid.scss b/website/assets/sass/libs/_html-grid.scss
similarity index 100%
rename from frontpage/assets/sass/libs/_html-grid.scss
rename to website/assets/sass/libs/_html-grid.scss
diff --git a/frontpage/assets/sass/libs/_mixins.scss b/website/assets/sass/libs/_mixins.scss
similarity index 100%
rename from frontpage/assets/sass/libs/_mixins.scss
rename to website/assets/sass/libs/_mixins.scss
diff --git a/frontpage/assets/sass/libs/_vars.scss b/website/assets/sass/libs/_vars.scss
similarity index 100%
rename from frontpage/assets/sass/libs/_vars.scss
rename to website/assets/sass/libs/_vars.scss
diff --git a/frontpage/assets/sass/libs/_vendor.scss b/website/assets/sass/libs/_vendor.scss
similarity index 100%
rename from frontpage/assets/sass/libs/_vendor.scss
rename to website/assets/sass/libs/_vendor.scss
diff --git a/frontpage/assets/sass/main.scss b/website/assets/sass/main.scss
similarity index 100%
rename from frontpage/assets/sass/main.scss
rename to website/assets/sass/main.scss
diff --git a/frontpage/assets/webfonts/fa-brands-400.eot b/website/assets/webfonts/fa-brands-400.eot
similarity index 100%
rename from frontpage/assets/webfonts/fa-brands-400.eot
rename to website/assets/webfonts/fa-brands-400.eot
diff --git a/frontpage/assets/webfonts/fa-brands-400.svg b/website/assets/webfonts/fa-brands-400.svg
similarity index 100%
rename from frontpage/assets/webfonts/fa-brands-400.svg
rename to website/assets/webfonts/fa-brands-400.svg
diff --git a/frontpage/assets/webfonts/fa-brands-400.ttf b/website/assets/webfonts/fa-brands-400.ttf
similarity index 100%
rename from frontpage/assets/webfonts/fa-brands-400.ttf
rename to website/assets/webfonts/fa-brands-400.ttf
diff --git a/frontpage/assets/webfonts/fa-brands-400.woff b/website/assets/webfonts/fa-brands-400.woff
similarity index 100%
rename from frontpage/assets/webfonts/fa-brands-400.woff
rename to website/assets/webfonts/fa-brands-400.woff
diff --git a/frontpage/assets/webfonts/fa-brands-400.woff2 b/website/assets/webfonts/fa-brands-400.woff2
similarity index 100%
rename from frontpage/assets/webfonts/fa-brands-400.woff2
rename to website/assets/webfonts/fa-brands-400.woff2
diff --git a/frontpage/assets/webfonts/fa-regular-400.eot b/website/assets/webfonts/fa-regular-400.eot
similarity index 100%
rename from frontpage/assets/webfonts/fa-regular-400.eot
rename to website/assets/webfonts/fa-regular-400.eot
diff --git a/frontpage/assets/webfonts/fa-regular-400.svg b/website/assets/webfonts/fa-regular-400.svg
similarity index 100%
rename from frontpage/assets/webfonts/fa-regular-400.svg
rename to website/assets/webfonts/fa-regular-400.svg
diff --git a/frontpage/assets/webfonts/fa-regular-400.ttf b/website/assets/webfonts/fa-regular-400.ttf
similarity index 100%
rename from frontpage/assets/webfonts/fa-regular-400.ttf
rename to website/assets/webfonts/fa-regular-400.ttf
diff --git a/frontpage/assets/webfonts/fa-regular-400.woff b/website/assets/webfonts/fa-regular-400.woff
similarity index 100%
rename from frontpage/assets/webfonts/fa-regular-400.woff
rename to website/assets/webfonts/fa-regular-400.woff
diff --git a/frontpage/assets/webfonts/fa-regular-400.woff2 b/website/assets/webfonts/fa-regular-400.woff2
similarity index 100%
rename from frontpage/assets/webfonts/fa-regular-400.woff2
rename to website/assets/webfonts/fa-regular-400.woff2
diff --git a/frontpage/assets/webfonts/fa-solid-900.eot b/website/assets/webfonts/fa-solid-900.eot
similarity index 100%
rename from frontpage/assets/webfonts/fa-solid-900.eot
rename to website/assets/webfonts/fa-solid-900.eot
diff --git a/frontpage/assets/webfonts/fa-solid-900.svg b/website/assets/webfonts/fa-solid-900.svg
similarity index 100%
rename from frontpage/assets/webfonts/fa-solid-900.svg
rename to website/assets/webfonts/fa-solid-900.svg
diff --git a/frontpage/assets/webfonts/fa-solid-900.ttf b/website/assets/webfonts/fa-solid-900.ttf
similarity index 100%
rename from frontpage/assets/webfonts/fa-solid-900.ttf
rename to website/assets/webfonts/fa-solid-900.ttf
diff --git a/frontpage/assets/webfonts/fa-solid-900.woff b/website/assets/webfonts/fa-solid-900.woff
similarity index 100%
rename from frontpage/assets/webfonts/fa-solid-900.woff
rename to website/assets/webfonts/fa-solid-900.woff
diff --git a/frontpage/assets/webfonts/fa-solid-900.woff2 b/website/assets/webfonts/fa-solid-900.woff2
similarity index 100%
rename from frontpage/assets/webfonts/fa-solid-900.woff2
rename to website/assets/webfonts/fa-solid-900.woff2
diff --git a/frontpage/images/gallery/fulls/01.jpg b/website/images/gallery/fulls/01.jpg
similarity index 100%
rename from frontpage/images/gallery/fulls/01.jpg
rename to website/images/gallery/fulls/01.jpg
diff --git a/frontpage/images/gallery/fulls/02.jpg b/website/images/gallery/fulls/02.jpg
similarity index 100%
rename from frontpage/images/gallery/fulls/02.jpg
rename to website/images/gallery/fulls/02.jpg
diff --git a/frontpage/images/gallery/fulls/03.jpg b/website/images/gallery/fulls/03.jpg
similarity index 100%
rename from frontpage/images/gallery/fulls/03.jpg
rename to website/images/gallery/fulls/03.jpg
diff --git a/frontpage/images/gallery/fulls/04.jpg b/website/images/gallery/fulls/04.jpg
similarity index 100%
rename from frontpage/images/gallery/fulls/04.jpg
rename to website/images/gallery/fulls/04.jpg
diff --git a/frontpage/images/gallery/fulls/05.jpg b/website/images/gallery/fulls/05.jpg
similarity index 100%
rename from frontpage/images/gallery/fulls/05.jpg
rename to website/images/gallery/fulls/05.jpg
diff --git a/frontpage/images/gallery/fulls/06.jpg b/website/images/gallery/fulls/06.jpg
similarity index 100%
rename from frontpage/images/gallery/fulls/06.jpg
rename to website/images/gallery/fulls/06.jpg
diff --git a/frontpage/images/gallery/fulls/07.jpg b/website/images/gallery/fulls/07.jpg
similarity index 100%
rename from frontpage/images/gallery/fulls/07.jpg
rename to website/images/gallery/fulls/07.jpg
diff --git a/frontpage/images/gallery/fulls/08.jpg b/website/images/gallery/fulls/08.jpg
similarity index 100%
rename from frontpage/images/gallery/fulls/08.jpg
rename to website/images/gallery/fulls/08.jpg
diff --git a/frontpage/images/gallery/fulls/09.jpg b/website/images/gallery/fulls/09.jpg
similarity index 100%
rename from frontpage/images/gallery/fulls/09.jpg
rename to website/images/gallery/fulls/09.jpg
diff --git a/frontpage/images/gallery/fulls/10.jpg b/website/images/gallery/fulls/10.jpg
similarity index 100%
rename from frontpage/images/gallery/fulls/10.jpg
rename to website/images/gallery/fulls/10.jpg
diff --git a/frontpage/images/gallery/thumbs/01.jpg b/website/images/gallery/thumbs/01.jpg
similarity index 100%
rename from frontpage/images/gallery/thumbs/01.jpg
rename to website/images/gallery/thumbs/01.jpg
diff --git a/frontpage/images/gallery/thumbs/02.jpg b/website/images/gallery/thumbs/02.jpg
similarity index 100%
rename from frontpage/images/gallery/thumbs/02.jpg
rename to website/images/gallery/thumbs/02.jpg
diff --git a/frontpage/images/gallery/thumbs/03.jpg b/website/images/gallery/thumbs/03.jpg
similarity index 100%
rename from frontpage/images/gallery/thumbs/03.jpg
rename to website/images/gallery/thumbs/03.jpg
diff --git a/frontpage/images/gallery/thumbs/04.jpg b/website/images/gallery/thumbs/04.jpg
similarity index 100%
rename from frontpage/images/gallery/thumbs/04.jpg
rename to website/images/gallery/thumbs/04.jpg
diff --git a/frontpage/images/gallery/thumbs/05.jpg b/website/images/gallery/thumbs/05.jpg
similarity index 100%
rename from frontpage/images/gallery/thumbs/05.jpg
rename to website/images/gallery/thumbs/05.jpg
diff --git a/frontpage/images/gallery/thumbs/06.jpg b/website/images/gallery/thumbs/06.jpg
similarity index 100%
rename from frontpage/images/gallery/thumbs/06.jpg
rename to website/images/gallery/thumbs/06.jpg
diff --git a/frontpage/images/gallery/thumbs/07.jpg b/website/images/gallery/thumbs/07.jpg
similarity index 100%
rename from frontpage/images/gallery/thumbs/07.jpg
rename to website/images/gallery/thumbs/07.jpg
diff --git a/frontpage/images/gallery/thumbs/08.jpg b/website/images/gallery/thumbs/08.jpg
similarity index 100%
rename from frontpage/images/gallery/thumbs/08.jpg
rename to website/images/gallery/thumbs/08.jpg
diff --git a/frontpage/images/gallery/thumbs/09.jpg b/website/images/gallery/thumbs/09.jpg
similarity index 100%
rename from frontpage/images/gallery/thumbs/09.jpg
rename to website/images/gallery/thumbs/09.jpg
diff --git a/frontpage/images/gallery/thumbs/10.jpg b/website/images/gallery/thumbs/10.jpg
similarity index 100%
rename from frontpage/images/gallery/thumbs/10.jpg
rename to website/images/gallery/thumbs/10.jpg
diff --git a/frontpage/images/github-mark.png b/website/images/github-mark.png
similarity index 100%
rename from frontpage/images/github-mark.png
rename to website/images/github-mark.png
diff --git a/frontpage/images/logo-square.svg b/website/images/logo-square.svg
similarity index 100%
rename from frontpage/images/logo-square.svg
rename to website/images/logo-square.svg
diff --git a/frontpage/images/mail.png b/website/images/mail.png
similarity index 100%
rename from frontpage/images/mail.png
rename to website/images/mail.png
diff --git a/frontpage/images/network3.jpg b/website/images/network3.jpg
similarity index 100%
rename from frontpage/images/network3.jpg
rename to website/images/network3.jpg
diff --git a/frontpage/images/pic01.jpg b/website/images/pic01.jpg
similarity index 100%
rename from frontpage/images/pic01.jpg
rename to website/images/pic01.jpg
diff --git a/frontpage/images/pic02.jpg b/website/images/pic02.jpg
similarity index 100%
rename from frontpage/images/pic02.jpg
rename to website/images/pic02.jpg
diff --git a/frontpage/images/qe-logo-small.png b/website/images/qe-logo-small.png
similarity index 100%
rename from frontpage/images/qe-logo-small.png
rename to website/images/qe-logo-small.png
diff --git a/frontpage/index.html b/website/index.html
similarity index 88%
rename from frontpage/index.html
rename to website/index.html
index 7c61998..1c82b78 100644
--- a/frontpage/index.html
+++ b/website/index.html
@@ -24,8 +24,8 @@
Economic Networks
Theory and Computation
Thomas J. Sargent and John Stachurski
- Download the textbook here
-
你可以点击 这里 下载电子版教材
+
Download the textbook here
+
你可以点击 这里 下载电子版教材
- Next
@@ -92,7 +92,7 @@ Links
-
- Code Book
+ Notebooks
-
GitHub
diff --git a/website/notebooks.html b/website/notebooks.html
new file mode 100644
index 0000000..c70638d
--- /dev/null
+++ b/website/notebooks.html
@@ -0,0 +1,85 @@
+
+
+
+
+
+ Notebooks
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file