From 70306e6608f69e7bdbb96262a6829ff085167c58 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Tue, 8 Jun 2021 09:47:01 -0500 Subject: [PATCH 01/35] Created copy of het_block. --- src/sequence_jacobian/blocks/hetdc_block.py | 922 ++++++++++++++++++++ 1 file changed, 922 insertions(+) create mode 100644 src/sequence_jacobian/blocks/hetdc_block.py diff --git a/src/sequence_jacobian/blocks/hetdc_block.py b/src/sequence_jacobian/blocks/hetdc_block.py new file mode 100644 index 0000000..259d4d8 --- /dev/null +++ b/src/sequence_jacobian/blocks/hetdc_block.py @@ -0,0 +1,922 @@ +import warnings +import copy +import numpy as np + +from .support.impulse import ImpulseDict +from ..primitives import Block +from .. import utilities as utils +from ..steady_state.classes import SteadyStateDict +from ..jacobian.classes import JacobianDict +from ..devtools.deprecate import rename_output_list_to_outputs +from ..utilities.misc import verify_saved_jacobian + + +def het(exogenous, policy, backward, backward_init=None): + def decorator(back_step_fun): + return HetBlock(back_step_fun, exogenous, policy, backward, backward_init=backward_init) + return decorator + + +class HetBlock(Block): + """Part 1: Initializer for HetBlock, intended to be called via @het() decorator on backward step function. + + IMPORTANT: All `policy` and non-aggregate output variables of this HetBlock need to be *lower-case*, since + the methods that compute steady state, transitional dynamics, and Jacobians for HetBlocks automatically handle + aggregation of non-aggregate outputs across the distribution and return aggregates as upper-case equivalents + of the `policy` and non-aggregate output variables specified in the backward step function. + """ + + def __init__(self, back_step_fun, exogenous, policy, backward, backward_init=None): + """Construct HetBlock from backward iteration function. + + Parameters + ---------- + back_step_fun : function + backward iteration function + exogenous : str + name of Markov transition matrix for exogenous variable + (now only single allowed for simplicity; use Kronecker product for more) + policy : str or sequence of str + names of policy variables of endogenous, continuous state variables + e.g. assets 'a', must be returned by function + backward : str or sequence of str + variables that together comprise the 'v' that we use for iterating backward + must appear both as outputs and as arguments + + It is assumed that every output of the function (except possibly backward), including policy, + will be on a grid of dimension 1 + len(policy), where the first dimension is the exogenous + variable and then the remaining dimensions are each of the continuous policy variables, in the + same order they are listed in 'policy'. + + The Markov transition matrix between the current and future period and backward iteration + variables should appear in the backward iteration function with '_p' subscripts ("prime") to + indicate that they come from the next period. + + Currently, we only support up to two policy variables. + """ + self.name = back_step_fun.__name__ + + # self.back_step_fun is one iteration of the backward step function pertaining to a given HetBlock. + # i.e. the function pertaining to equation (14) in the paper: v_t = curlyV(v_{t+1}, X_t) + self.back_step_fun = back_step_fun + + # self.back_step_outputs and self.back_step_inputs are all of the output and input arguments of + # self.back_step_fun, the variables used in the backward iteration, + # which generally include value and/or policy functions. + self.back_step_output_list = utils.misc.output_list(back_step_fun) + self.back_step_outputs = set(self.back_step_output_list) + self.back_step_inputs = set(utils.misc.input_list(back_step_fun)) + + # See the docstring of HetBlock for details on the attributes directly below + self.exogenous = exogenous + self.policy, self.back_iter_vars = (utils.misc.make_tuple(x) for x in (policy, backward)) + + # self.inputs_to_be_primed indicates all variables that enter into self.back_step_fun whose name has "_p" + # (read as prime). Because it's the case that the initial dict of input arguments for self.back_step_fun + # contains the names of these variables that omit the "_p", we need to swap the key from the unprimed to + # the primed key name, such that self.back_step_fun will properly call those variables. + # e.g. the key "Va" will become "Va_p", associated to the same value. + self.inputs_to_be_primed = {self.exogenous} | set(self.back_iter_vars) + + # self.non_back_iter_outputs are all of the outputs from self.back_step_fun excluding the backward + # iteration variables themselves. + self.non_back_iter_outputs = self.back_step_outputs - set(self.back_iter_vars) + + # self.outputs and self.inputs are the *aggregate* outputs and inputs of this HetBlock, which are used + # in utils.graph.block_sort to topologically sort blocks along the DAG + # according to their aggregate outputs and inputs. + self.outputs = {o.capitalize() for o in self.non_back_iter_outputs} + self.inputs = self.back_step_inputs - {k + '_p' for k in self.back_iter_vars} + self.inputs.remove(exogenous + '_p') + self.inputs.add(exogenous) + + # A HetBlock can have heterogeneous inputs and heterogeneous outputs, henceforth `hetinput` and `hetoutput`. + # See docstring for methods `add_hetinput` and `add_hetoutput` for more details. + self.hetinput = None + self.hetinput_inputs = set() + self.hetinput_outputs = set() + self.hetinput_outputs_order = tuple() + + # start without a hetoutput + self.hetoutput = None + self.hetoutput_inputs = set() + self.hetoutput_outputs = set() + self.hetoutput_outputs_order = tuple() + + # The set of variables that will be wrapped in a separate namespace for this HetBlock + # as opposed to being available at the top level + self.internal = utils.misc.smart_set(self.back_step_outputs) | utils.misc.smart_set(self.exogenous) | {"D"} + + if len(self.policy) > 2: + raise ValueError(f"More than two endogenous policies in {back_step_fun.__name__}, not yet supported") + + # Checking that the various inputs/outputs attributes are correctly set + if self.exogenous + '_p' not in self.back_step_inputs: + raise ValueError(f"Markov matrix '{self.exogenous}_p' not included as argument in {back_step_fun.__name__}") + + for pol in self.policy: + if pol not in self.back_step_outputs: + raise ValueError(f"Policy '{pol}' not included as output in {back_step_fun.__name__}") + if pol[0].isupper(): + raise ValueError(f"Policy '{pol}' is uppercase in {back_step_fun.__name__}, which is not allowed") + + for back in self.back_iter_vars: + if back + '_p' not in self.back_step_inputs: + raise ValueError(f"Backward variable '{back}_p' not included as argument in {back_step_fun.__name__}") + + if back not in self.back_step_outputs: + raise ValueError(f"Backward variable '{back}' not included as output in {back_step_fun.__name__}") + + for out in self.non_back_iter_outputs: + if out[0].isupper(): + raise ValueError("Output '{out}' is uppercase in {back_step_fun.__name__}, which is not allowed") + + # Add the backward iteration initializer function (the initial guesses for self.back_iter_vars) + if backward_init is None: + # TODO: Think about implementing some "automated way" of providing + # an initial guess for the backward iteration. + self.backward_init = backward_init + else: + self.backward_init = backward_init + + # note: should do more input checking to ensure certain choices not made: 'D' not input, etc. + + def __repr__(self): + """Nice string representation of HetBlock for printing to console""" + if self.hetinput is not None: + if self.hetoutput is not None: + return f"" + else: + return f"" + else: + return f"" + + '''Part 2: high-level routines, with first three called analogously to SimpleBlock counterparts + - ss : do backward and forward iteration until convergence to get complete steady state + - td : do backward and forward iteration up to T to compute dynamics given some shocks + - jac : compute jacobians of outputs with respect to shocked inputs, using fake news algorithm + - ajac : compute asymptotic columns of jacobians output by jac, also using fake news algorithm + + - add_hetinput : add a hetinput to the HetBlock that first processes inputs through function hetinput + - add_hetoutput: add a hetoutput to the HetBlock that is computed after the entire ss computation, or after + each backward iteration step in td + ''' + + # TODO: Deprecated methods, to be removed! + def ss(self, **kwargs): + warnings.warn("This method has been deprecated. Please invoke by calling .steady_state", DeprecationWarning) + return self.steady_state(kwargs) + + def td(self, ss, **kwargs): + warnings.warn("This method has been deprecated. Please invoke by calling .impulse_nonlinear", + DeprecationWarning) + return self.impulse_nonlinear(ss, **kwargs) + + def jac(self, ss, shock_list=None, T=None, **kwargs): + if shock_list is None: + shock_list = list(self.inputs) + warnings.warn("This method has been deprecated. Please invoke by calling .jacobian.\n" + "Also, note that the kwarg `shock_list` in .jacobian has been renamed to `shocked_vars`", + DeprecationWarning) + return self.jacobian(ss, shock_list, T, **kwargs) + + def steady_state(self, calibration, backward_tol=1E-8, backward_maxit=5000, + forward_tol=1E-10, forward_maxit=100_000, hetoutput=False): + """Evaluate steady state HetBlock using keyword args for all inputs. Analog to SimpleBlock.ss. + + Parameters + ---------- + backward_tol : [optional] float + in backward iteration, max abs diff between policy in consecutive steps needed for convergence + backward_maxit : [optional] int + maximum number of backward iterations, if 'backward_tol' not reached by then, raise error + forward_tol : [optional] float + in forward iteration, max abs diff between dist in consecutive steps needed for convergence + forward_maxit : [optional] int + maximum number of forward iterations, if 'forward_tol' not reached by then, raise error + + kwargs : dict + The following inputs are required as keyword arguments, which show up in 'kwargs': + - The exogenous Markov matrix, e.g. Pi=... if self.exogenous=='Pi' + - A seed for each backward variable, e.g. Va=... and Vb=... if self.back_iter_vars==('Va','Vb') + - A grid for each policy variable, e.g. a_grid=... and b_grid=... if self.policy==('a','b') + - All other inputs to the backward iteration function self.back_step_fun, except _p added to + for self.exogenous and self.back_iter_vars, for which the method uses steady-state values. + If there is a self.hetinput, then we need the inputs to that, not to self.back_step_fun. + + Other inputs in 'kwargs' are optional: + - A seed for the distribution: D=... + - If no seed for the distribution is provided, a seed for the invariant distribution + of the Markov process, e.g. Pi_seed=... if self.exogenous=='Pi' + + Returns + ---------- + ss : dict, contains + - ss inputs of self.back_step_fun and (if present) self.hetinput + - ss outputs of self.back_step_fun + - ss distribution 'D' + - ss aggregates (in uppercase) for all outputs of self.back_step_fun except self.back_iter_vars + """ + + ss = copy.deepcopy(calibration) + + # extract information from calibration + Pi = calibration[self.exogenous] + grid = {k: calibration[k+'_grid'] for k in self.policy} + D_seed = calibration.get('D', None) + pi_seed = calibration.get(self.exogenous + '_seed', None) + + # run backward iteration + sspol = self.policy_ss(calibration, tol=backward_tol, maxit=backward_maxit) + ss.update(sspol) + + # run forward iteration + D = self.dist_ss(Pi, sspol, grid, forward_tol, forward_maxit, D_seed, pi_seed) + ss.update({"D": D}) + + # aggregate all outputs other than backward variables on grid, capitalize + aggregates = {o.capitalize(): np.vdot(D, sspol[o]) for o in self.non_back_iter_outputs} + ss.update(aggregates) + + if hetoutput and self.hetoutput is not None: + hetoutputs = self.hetoutput.evaluate(ss) + aggregate_hetoutputs = self.hetoutput.aggregate(hetoutputs, D, ss, mode="ss") + else: + hetoutputs = {} + aggregate_hetoutputs = {} + ss.update({**hetoutputs, **aggregate_hetoutputs}) + + return SteadyStateDict(ss, internal=self) + + def impulse_nonlinear(self, ss, exogenous, monotonic=False, returnindividual=False, grid_paths=None): + """Evaluate transitional dynamics for HetBlock given dynamic paths for inputs in kwargs, + assuming that we start and end in steady state ss, and that all inputs not specified in + kwargs are constant at their ss values. Analog to SimpleBlock.td. + + CANNOT provide time-varying paths of grid or Markov transition matrix for now. + + Parameters + ---------- + ss : SteadyStateDict + all steady-state info, intended to be from .ss() + exogenous : dict of {str : array(T, ...)} + all time-varying inputs here (in deviations), with first dimension being time + this must have same length T for all entries (all outputs will be calculated up to T) + monotonic : [optional] bool + flag indicating date-t policies are monotonic in same date-(t-1) policies, allows us + to use faster interpolation routines, otherwise use slower robust to nonmonotonicity + returnindividual : [optional] bool + return distribution and full outputs on grid + grid_paths: [optional] dict of {str: array(T, Number of grid points)} + time-varying grids for policies + + Returns + ---------- + td : dict + if returnindividual = False, time paths for aggregates (uppercase) for all outputs + of self.back_step_fun except self.back_iter_vars + if returnindividual = True, additionally time paths for distribution and for all outputs + of self.back_Step_fun on the full grid + """ + # infer T from exogenous, check that all shocks have same length + shock_lengths = [x.shape[0] for x in exogenous.values()] + if shock_lengths[1:] != shock_lengths[:-1]: + raise ValueError('Not all shocks in kwargs (exogenous) are same length!') + T = shock_lengths[0] + + # copy from ss info + Pi_T = ss[self.exogenous].T.copy() + D = ss.internal[self.name]['D'] + + # construct grids for policy variables either from the steady state grid if the grid is meant to be + # non-time-varying or from the provided `grid_path` if the grid is meant to be time-varying. + grid = {} + use_ss_grid = {} + for k in self.policy: + if grid_paths is not None and k in grid_paths: + grid[k] = grid_paths[k] + use_ss_grid[k] = False + else: + grid[k] = ss[k+"_grid"] + use_ss_grid[k] = True + + # allocate empty arrays to store result, assume all like D + individual_paths = {k: np.empty((T,) + D.shape) for k in self.non_back_iter_outputs} + hetoutput_paths = {k: np.empty((T,) + D.shape) for k in self.hetoutput_outputs} + + # backward iteration + backdict = dict(ss.items()) + backdict.update(copy.deepcopy(ss.internal[self.name])) + for t in reversed(range(T)): + # be careful: if you include vars from self.back_iter_vars in exogenous, agents will use them! + backdict.update({k: ss[k] + v[t, ...] for k, v in exogenous.items()}) + individual = {k: v for k, v in zip(self.back_step_output_list, + self.back_step_fun(**self.make_inputs(backdict)))} + backdict.update({k: individual[k] for k in self.back_iter_vars}) + + if self.hetoutput is not None: + hetoutput = self.hetoutput.evaluate(backdict) + for k in self.hetoutput_outputs: + hetoutput_paths[k][t, ...] = hetoutput[k] + + for k in self.non_back_iter_outputs: + individual_paths[k][t, ...] = individual[k] + + D_path = np.empty((T,) + D.shape) + D_path[0, ...] = D + for t in range(T-1): + # have to interpolate policy separately for each t to get sparse transition matrices + sspol_i = {} + sspol_pi = {} + for pol in self.policy: + if use_ss_grid[pol]: + grid_var = grid[pol] + else: + grid_var = grid[pol][t, ...] + if monotonic: + # TODO: change for two-asset case so assumption is monotonicity in own asset, not anything else + sspol_i[pol], sspol_pi[pol] = utils.interpolate.interpolate_coord(grid_var, + individual_paths[pol][t, ...]) + else: + sspol_i[pol], sspol_pi[pol] =\ + utils.interpolate.interpolate_coord_robust(grid_var, individual_paths[pol][t, ...]) + + # step forward + D_path[t+1, ...] = self.forward_step(D_path[t, ...], Pi_T, sspol_i, sspol_pi) + + # obtain aggregates of all outputs, made uppercase + aggregates = {o.capitalize(): utils.optimized_routines.fast_aggregate(D_path, individual_paths[o]) + for o in self.non_back_iter_outputs} + if self.hetoutput: + aggregate_hetoutputs = self.hetoutput.aggregate(hetoutput_paths, D_path, backdict, mode="td") + else: + aggregate_hetoutputs = {} + + # return either this, or also include distributional information + if returnindividual: + return ImpulseDict({**aggregates, **aggregate_hetoutputs, **individual_paths, **hetoutput_paths, + 'D': D_path}) - ss + else: + return ImpulseDict({**aggregates, **aggregate_hetoutputs}) - ss + + def impulse_linear(self, ss, exogenous, T=None, Js=None, **kwargs): + # infer T from exogenous, check that all shocks have same length + shock_lengths = [x.shape[0] for x in exogenous.values()] + if shock_lengths[1:] != shock_lengths[:-1]: + raise ValueError('Not all shocks in kwargs (exogenous) are same length!') + T = shock_lengths[0] + + return ImpulseDict(self.jacobian(ss, list(exogenous.keys()), T=T, Js=Js, **kwargs).apply(exogenous)) + + def jacobian(self, ss, exogenous=None, T=300, outputs=None, output_list=None, Js=None, h=1E-4): + """Assemble nested dict of Jacobians of agg outputs vs. inputs, using fake news algorithm. + + Parameters + ---------- + ss : dict, + all steady-state info, intended to be from .ss() + T : [optional] int + number of time periods for T*T Jacobian + exogenous : list of str + names of input variables to differentiate wrt (main cost scales with # of inputs) + outputs : list of str + names of output variables to get derivatives of, if not provided assume all outputs of + self.back_step_fun except self.back_iter_vars + h : [optional] float + h for numerical differentiation of backward iteration + Js : [optional] dict of {str: JacobianDict}} + supply saved Jacobians + + Returns + ------- + J : dict of {str: dict of {str: array(T,T)}} + J[o][i] for output o and input i gives T*T Jacobian of o with respect to i + """ + # The default set of outputs are all outputs of the backward iteration function + # except for the backward iteration variables themselves + if exogenous is None: + exogenous = list(self.inputs) + if outputs is None or output_list is None: + outputs = self.non_back_iter_outputs + else: + outputs = rename_output_list_to_outputs(outputs=outputs, output_list=output_list) + + relevant_shocks = [i for i in self.back_step_inputs | self.hetinput_inputs if i in exogenous] + + # if we supply Jacobians, use them if possible, warn if they cannot be used + if Js is not None: + outputs_cap = [o.capitalize() for o in outputs] + if verify_saved_jacobian(self.name, Js, outputs_cap, relevant_shocks, T): + return Js[self.name] + + # step 0: preliminary processing of steady state + (ssin_dict, Pi, ssout_list, ss_for_hetinput, sspol_i, sspol_pi, sspol_space) = self.jac_prelim(ss) + + # step 1 of fake news algorithm + # compute curlyY and curlyD (backward iteration) for each input i + curlyYs, curlyDs = {}, {} + for i in relevant_shocks: + curlyYs[i], curlyDs[i] = self.backward_iteration_fakenews(i, outputs, ssin_dict, ssout_list, + ss.internal[self.name]['D'], Pi.T.copy(), + sspol_i, sspol_pi, sspol_space, T, h, + ss_for_hetinput) + + # step 2 of fake news algorithm + # compute prediction vectors curlyP (forward iteration) for each outcome o + curlyPs = {} + for o in outputs: + curlyPs[o] = self.forward_iteration_fakenews(ss.internal[self.name][o], Pi, sspol_i, sspol_pi, T-1) + + # steps 3-4 of fake news algorithm + # make fake news matrix and Jacobian for each outcome-input pair + F, J = {}, {} + for o in outputs: + for i in relevant_shocks: + if o.capitalize() not in F: + F[o.capitalize()] = {} + if o.capitalize() not in J: + J[o.capitalize()] = {} + F[o.capitalize()][i] = HetBlock.build_F(curlyYs[i][o], curlyDs[i], curlyPs[o]) + J[o.capitalize()][i] = HetBlock.J_from_F(F[o.capitalize()][i]) + + return JacobianDict(J, name=self.name) + + def add_hetinput(self, hetinput, overwrite=False, verbose=True): + """Add a hetinput to this HetBlock. Any call to self.back_step_fun will first process + inputs through the hetinput function. + + A `hetinput` is any non-scalar-valued input argument provided to the HetBlock's backward iteration function, + self.back_step_fun, which is of the same dimensions as the distribution of agents in the HetBlock over + the relevant idiosyncratic state variables, generally referred to as `D`. e.g. The one asset HANK model + example provided in the models directory of sequence_jacobian has a hetinput `T`, which is skill-specific + transfers. + """ + if self.hetinput is not None and overwrite is False: + raise ValueError('Trying to attach hetinput when one already exists!') + else: + if verbose: + if self.hetinput is not None and overwrite is True: + print(f"Overwriting current hetinput, {self.hetinput.__name__} with new hetinput," + f" {hetinput.__name__}!") + else: + print(f"Added hetinput {hetinput.__name__} to the {self.back_step_fun.__name__} HetBlock") + + self.hetinput = hetinput + self.hetinput_inputs = set(utils.misc.input_list(hetinput)) + self.hetinput_outputs = set(utils.misc.output_list(hetinput)) + self.hetinput_outputs_order = utils.misc.output_list(hetinput) + + # modify inputs to include hetinput's additional inputs, remove outputs + self.inputs |= self.hetinput_inputs + self.inputs -= self.hetinput_outputs + + self.internal |= self.hetinput_outputs + + def add_hetoutput(self, hetoutput, overwrite=False, verbose=True): + """Add a hetoutput to this HetBlock. Any call to self.back_step_fun will first process + inputs through the hetoutput function. + + A `hetoutput` is any *non-scalar-value* output that the user might desire to be calculated from + the output arguments of the HetBlock's backward iteration function. Importantly, as of now the `hetoutput` + cannot be a function of time displaced values of the HetBlock's outputs but rather must be able to + be calculated from the outputs statically. e.g. The two asset HANK model example provided in the models + directory of sequence_jacobian has a hetoutput, `chi`, the adjustment costs for any initial level of assets + `a`, to any new level of assets `a'`. + """ + if self.hetoutput is not None and overwrite is False: + raise ValueError('Trying to attach hetoutput when one already exists!') + else: + if verbose: + if self.hetoutput is not None and overwrite is True: + print(f"Overwriting current hetoutput, {self.hetoutput.name} with new hetoutput," + f" {hetoutput.name}!") + else: + print(f"Added hetoutput {hetoutput.name} to the {self.back_step_fun.__name__} HetBlock") + + self.hetoutput = hetoutput + self.hetoutput_inputs = set(hetoutput.input_list) + self.hetoutput_outputs = set(hetoutput.output_list) + self.hetoutput_outputs_order = hetoutput.output_list + + # Modify the HetBlock's inputs to include additional inputs required for computing both the hetoutput + # and aggregating the hetoutput, but do not include: + # 1) objects computed within the HetBlock's backward iteration that enter into the hetoutput computation + # 2) objects computed within hetoutput that enter into hetoutput's aggregation (self.hetoutput.outputs) + # 3) D, the cross-sectional distribution of agents, which is used in the hetoutput aggregation + # but is computed after the backward iteration + self.inputs |= (self.hetoutput_inputs - self.hetinput_outputs - self.back_step_outputs - self.hetoutput_outputs - set("D")) + # Modify the HetBlock's outputs to include the aggregated hetoutputs + self.outputs |= set([o.capitalize() for o in self.hetoutput_outputs]) + + self.internal |= self.hetoutput_outputs + + '''Part 3: components of ss(): + - policy_ss : backward iteration to get steady-state policies and other outcomes + - dist_ss : forward iteration to get steady-state distribution and compute aggregates + ''' + + def policy_ss(self, ssin, tol=1E-8, maxit=5000): + """Find steady-state policies and backward variables through backward iteration until convergence. + + Parameters + ---------- + ssin : dict + all steady-state inputs to back_step_fun, including seed values for backward variables + tol : [optional] float + max diff between consecutive iterations of policy variables needed for convergence + maxit : [optional] int + maximum number of iterations, if 'tol' not reached by then, raise error + + Returns + ---------- + sspol : dict + all steady-state outputs of backward iteration, combined with inputs to backward iteration + """ + + # find initial values for backward iteration and account for hetinputs + original_ssin = ssin + ssin = self.make_inputs(ssin) + + old = {} + for it in range(maxit): + try: + # run and store results of backward iteration, which come as tuple, in dict + sspol = {k: v for k, v in zip(self.back_step_output_list, self.back_step_fun(**ssin))} + except KeyError as e: + print(f'Missing input {e} to {self.back_step_fun.__name__}!') + raise + + # only check convergence every 10 iterations for efficiency + if it % 10 == 1 and all(utils.optimized_routines.within_tolerance(sspol[k], old[k], tol) + for k in self.policy): + break + + # update 'old' for comparison during next iteration, prepare 'ssin' as input for next iteration + old.update({k: sspol[k] for k in self.policy}) + ssin.update({k + '_p': sspol[k] for k in self.back_iter_vars}) + else: + raise ValueError(f'No convergence of policy functions after {maxit} backward iterations!') + + # want to record inputs in ssin, but remove _p, add in hetinput inputs if there + for k in self.inputs_to_be_primed: + ssin[k] = ssin[k + '_p'] + del ssin[k + '_p'] + if self.hetinput is not None: + for k in self.hetinput_inputs: + if k in original_ssin: + ssin[k] = original_ssin[k] + return {**ssin, **sspol} + + def dist_ss(self, Pi, sspol, grid, tol=1E-10, maxit=100_000, D_seed=None, pi_seed=None): + """Find steady-state distribution through forward iteration until convergence. + + Parameters + ---------- + Pi : array + steady-state Markov matrix for exogenous variable + sspol : dict + steady-state policies on grid for all policy variables in self.policy + grid : dict + grids for all policy variables in self.policy + tol : [optional] float + absolute tolerance for max diff between consecutive iterations for distribution + maxit : [optional] int + maximum number of iterations, if 'tol' not reached by then, raise error + D_seed : [optional] array + initial seed for overall distribution + pi_seed : [optional] array + initial seed for stationary dist of Pi, if no D_seed + + Returns + ---------- + D : array + steady-state distribution + """ + + # first obtain initial distribution D + if D_seed is None: + # compute stationary distribution for exogenous variable + pi = utils.discretize.stationary(Pi, pi_seed) + + # now initialize full distribution with this, assuming uniform distribution on endogenous vars + endogenous_dims = [grid[k].shape[0] for k in self.policy] + D = np.tile(pi, endogenous_dims[::-1] + [1]).T / np.prod(endogenous_dims) + else: + D = D_seed + + # obtain interpolated policy rule for each dimension of endogenous policy + sspol_i = {} + sspol_pi = {} + for pol in self.policy: + # use robust binary search-based method that only requires grids, not policies, to be monotonic + sspol_i[pol], sspol_pi[pol] = utils.interpolate.interpolate_coord_robust(grid[pol], sspol[pol]) + + # iterate until convergence by tol, or maxit + Pi_T = Pi.T.copy() + for it in range(maxit): + Dnew = self.forward_step(D, Pi_T, sspol_i, sspol_pi) + + # only check convergence every 10 iterations for efficiency + if it % 10 == 0 and utils.optimized_routines.within_tolerance(D, Dnew, tol): + break + D = Dnew + else: + raise ValueError(f'No convergence after {maxit} forward iterations!') + + return D + + '''Part 4: components of jac(), corresponding to *4 steps of fake news algorithm* in paper + - Step 1: backward_step_fakenews and backward_iteration_fakenews to get curlyYs and curlyDs + - Step 2: forward_iteration_fakenews to get curlyPs + - Step 3: build_F to get fake news matrix from curlyYs, curlyDs, curlyPs + - Step 4: J_from_F to get Jacobian from fake news matrix + ''' + + def backward_step_fakenews(self, din_dict, output_list, ssin_dict, ssout_list, + Dss, Pi_T, sspol_i, sspol_pi, sspol_space, h=1E-4): + # shock perturbs outputs + shocked_outputs = {k: v for k, v in zip(self.back_step_output_list, + utils.differentiate.numerical_diff(self.back_step_fun, + ssin_dict, din_dict, h, + ssout_list))} + curlyV = {k: shocked_outputs[k] for k in self.back_iter_vars} + + # which affects the distribution tomorrow + pol_pi_shock = {k: -shocked_outputs[k] / sspol_space[k] for k in self.policy} + + # Include an additional term to account for the effect of a deleveraging shock affecting the grid + if "delev_exante" in din_dict: + dx = np.zeros_like(sspol_pi["a"]) + dx[sspol_i["a"] == 0] = 1. + add_term = sspol_pi["a"] * dx / sspol_space["a"] + pol_pi_shock["a"] += add_term + + curlyD = self.forward_step_shock(Dss, Pi_T, sspol_i, sspol_pi, pol_pi_shock) + + # and the aggregate outcomes today + curlyY = {k: np.vdot(Dss, shocked_outputs[k]) for k in output_list} + + return curlyV, curlyD, curlyY + + def backward_iteration_fakenews(self, input_shocked, output_list, ssin_dict, ssout_list, Dss, Pi_T, + sspol_i, sspol_pi, sspol_space, T, h=1E-4, ss_for_hetinput=None): + """Iterate policy steps backward T times for a single shock.""" + # TODO: Might need to add a check for ss_for_hetinput if self.hetinput is not None + # since unless self.hetinput_inputs is exactly equal to input_shocked, calling + # self.hetinput() inside the symmetric differentiation function will throw an error. + # It's probably better/more informative to throw that error out here. + if self.hetinput is not None and input_shocked in self.hetinput_inputs: + # if input_shocked is an input to hetinput, take numerical diff to get response + din_dict = dict(zip(self.hetinput_outputs_order, + utils.differentiate.numerical_diff_symmetric(self.hetinput, + ss_for_hetinput, {input_shocked: 1}, h))) + else: + # otherwise, we just have that one shock + din_dict = {input_shocked: 1} + + # contemporaneous response to unit scalar shock + curlyV, curlyD, curlyY = self.backward_step_fakenews(din_dict, output_list, ssin_dict, ssout_list, + Dss, Pi_T, sspol_i, sspol_pi, sspol_space, h=h) + + # infer dimensions from this and initialize empty arrays + curlyDs = np.empty((T,) + curlyD.shape) + curlyYs = {k: np.empty(T) for k in curlyY.keys()} + + # fill in current effect of shock + curlyDs[0, ...] = curlyD + for k in curlyY.keys(): + curlyYs[k][0] = curlyY[k] + + # fill in anticipation effects + for t in range(1, T): + curlyV, curlyDs[t, ...], curlyY = self.backward_step_fakenews({k+'_p': v for k, v in curlyV.items()}, + output_list, ssin_dict, ssout_list, + Dss, Pi_T, sspol_i, sspol_pi, sspol_space, h) + for k in curlyY.keys(): + curlyYs[k][t] = curlyY[k] + + return curlyYs, curlyDs + + def forward_iteration_fakenews(self, o_ss, Pi, pol_i_ss, pol_pi_ss, T): + """Iterate transpose forward T steps to get full set of curlyPs for a given outcome. + + Note we depart from definition in paper by applying the demeaning operator in addition to Lambda + at each step. This does not affect products with curlyD (which are the only way curlyPs enter + Jacobian) since perturbations to distribution always have mean zero. It has numerical benefits + since curlyPs now go to zero for high t (used in paper in proof of Proposition 1). + """ + curlyPs = np.empty((T,) + o_ss.shape) + curlyPs[0, ...] = utils.misc.demean(o_ss) + for t in range(1, T): + curlyPs[t, ...] = utils.misc.demean(self.forward_step_transpose(curlyPs[t - 1, ...], + Pi, pol_i_ss, pol_pi_ss)) + return curlyPs + + @staticmethod + def build_F(curlyYs, curlyDs, curlyPs): + T = curlyDs.shape[0] + Tpost = curlyPs.shape[0] - T + 2 + F = np.empty((Tpost + T - 1, T)) + F[0, :] = curlyYs + F[1:, :] = curlyPs.reshape((Tpost + T - 2, -1)) @ curlyDs.reshape((T, -1)).T + return F + + @staticmethod + def J_from_F(F): + J = F.copy() + for t in range(1, J.shape[1]): + J[1:, t] += J[:-1, t - 1] + return J + + '''Part 5: helpers for .jac and .ajac: preliminary processing''' + + def jac_prelim(self, ss): + """Helper that does preliminary processing of steady state for fake news algorithm. + + Parameters + ---------- + ss : dict, all steady-state info, intended to be from .ss() + + Returns + ---------- + ssin_dict : dict, ss vals of exactly the inputs needed by self.back_step_fun for backward step + Pi : array (S*S), Markov matrix for exogenous state + ssout_list : tuple, what self.back_step_fun returns when given ssin_dict (not exactly the same + as steady-state numerically since SS convergence was to some tolerance threshold) + ss_for_hetinput : dict, ss vals of exactly the inputs needed by self.hetinput (if it exists) + sspol_i : dict, indices on lower bracketing gridpoint for all in self.policy + sspol_pi : dict, weights on lower bracketing gridpoint for all in self.policy + sspol_space : dict, space between lower and upper bracketing gridpoints for all in self.policy + """ + # preliminary a: obtain ss inputs and other info, run once to get baseline for numerical differentiation + ssin_dict = self.make_inputs(ss) + Pi = ss[self.exogenous] + grid = {k: ss[k+'_grid'] for k in self.policy} + ssout_list = self.back_step_fun(**ssin_dict) + + ss_for_hetinput = None + if self.hetinput is not None: + ss_for_hetinput = {k: ss[k] for k in self.hetinput_inputs if k in ss} + + # preliminary b: get sparse representations of policy rules, and distance between neighboring policy gridpoints + sspol_i = {} + sspol_pi = {} + sspol_space = {} + for pol in self.policy: + # use robust binary-search-based method that only requires grids to be monotonic + sspol_i[pol], sspol_pi[pol] = utils.interpolate.interpolate_coord_robust(grid[pol], ss.internal[self.name][pol]) + sspol_space[pol] = grid[pol][sspol_i[pol]+1] - grid[pol][sspol_i[pol]] + + toreturn = (ssin_dict, Pi, ssout_list, ss_for_hetinput, sspol_i, sspol_pi, sspol_space) + + return toreturn + + '''Part 6: helper to extract inputs and potentially process them through hetinput''' + + def make_inputs(self, back_step_inputs_dict): + """Extract from back_step_inputs_dict exactly the inputs needed for self.back_step_fun, + process stuff through self.hetinput first if it's there. + """ + input_dict = copy.deepcopy(back_step_inputs_dict) + + # TODO: This make_inputs function needs to be revisited since it creates inputs both for initial steady + # state computation as well as for Jacobian/impulse evaluation for HetBlocks, + # where in the former the hetinputs and value function have yet to be computed, + # whereas in the latter they have already been computed + # and hence do not need to be recomputed. There may be room to clean this function up a bit. + if isinstance(back_step_inputs_dict, SteadyStateDict): + input_dict = copy.deepcopy(back_step_inputs_dict.toplevel) + input_dict.update({k: v for k, v in back_step_inputs_dict.internal[self.name].items()}) + else: + # If this HetBlock has a hetinput, then we need to compute the outputs of the hetinput first and include + # them as inputs for self.back_step_fun + if self.hetinput is not None: + outputs_as_tuple = utils.misc.make_tuple(self.hetinput(**{k: input_dict[k] + for k in self.hetinput_inputs if k in input_dict})) + input_dict.update(dict(zip(self.hetinput_outputs_order, outputs_as_tuple))) + + # Check if there are entries in indict corresponding to self.inputs_to_be_primed. + # In particular, we are interested in knowing if an initial value + # for the backward iteration variable has been provided. + # If it has not been provided, then use self.backward_init to calculate the initial values. + if not self.inputs_to_be_primed.issubset(set(input_dict.keys())): + initial_value_input_args = [input_dict[arg_name] for arg_name in utils.misc.input_list(self.backward_init)] + input_dict.update(zip(utils.misc.output_list(self.backward_init), + utils.misc.make_tuple(self.backward_init(*initial_value_input_args)))) + + for i_p in self.inputs_to_be_primed: + input_dict[i_p + "_p"] = input_dict[i_p] + del input_dict[i_p] + + try: + return {k: input_dict[k] for k in self.back_step_inputs if k in input_dict} + except KeyError as e: + print(f'Missing backward variable or Markov matrix {e} for {self.back_step_fun.__name__}!') + raise + + '''Part 7: routines to do forward steps of different kinds, all wrap functions in utils''' + + def forward_step(self, D, Pi_T, pol_i, pol_pi): + """Update distribution, calling on 1d and 2d-specific compiled routines. + + Parameters + ---------- + D : array, beginning-of-period distribution + Pi_T : array, transpose Markov matrix + pol_i : dict, indices on lower bracketing gridpoint for all in self.policy + pol_pi : dict, weights on lower bracketing gridpoint for all in self.policy + + Returns + ---------- + Dnew : array, beginning-of-next-period distribution + """ + if len(self.policy) == 1: + p, = self.policy + return utils.forward_step.forward_step_1d(D, Pi_T, pol_i[p], pol_pi[p]) + elif len(self.policy) == 2: + p1, p2 = self.policy + return utils.forward_step.forward_step_2d(D, Pi_T, pol_i[p1], pol_i[p2], pol_pi[p1], pol_pi[p2]) + else: + raise ValueError(f"{len(self.policy)} policy variables, only up to 2 implemented!") + + def forward_step_transpose(self, D, Pi, pol_i, pol_pi): + """Transpose of forward_step (note: this takes Pi rather than Pi_T as argument!)""" + if len(self.policy) == 1: + p, = self.policy + return utils.forward_step.forward_step_transpose_1d(D, Pi, pol_i[p], pol_pi[p]) + elif len(self.policy) == 2: + p1, p2 = self.policy + return utils.forward_step.forward_step_transpose_2d(D, Pi, pol_i[p1], pol_i[p2], pol_pi[p1], pol_pi[p2]) + else: + raise ValueError(f"{len(self.policy)} policy variables, only up to 2 implemented!") + + def forward_step_shock(self, Dss, Pi_T, pol_i_ss, pol_pi_ss, pol_pi_shock): + """Forward_step linearized with respect to pol_pi""" + if len(self.policy) == 1: + p, = self.policy + return utils.forward_step.forward_step_shock_1d(Dss, Pi_T, pol_i_ss[p], pol_pi_shock[p]) + elif len(self.policy) == 2: + p1, p2 = self.policy + return utils.forward_step.forward_step_shock_2d(Dss, Pi_T, pol_i_ss[p1], pol_i_ss[p2], + pol_pi_ss[p1], pol_pi_ss[p2], + pol_pi_shock[p1], pol_pi_shock[p2]) + else: + raise ValueError(f"{len(self.policy)} policy variables, only up to 2 implemented!") + + +def hetoutput(custom_aggregation=None): + def decorator(f): + return HetOutput(f, custom_aggregation=custom_aggregation) + return decorator + + +class HetOutput: + def __init__(self, f, custom_aggregation=None): + self.name = f.__name__ + self.f = f + self.eval_input_list = utils.misc.input_list(f) + + self.custom_aggregation = custom_aggregation + self.agg_input_list = [] if custom_aggregation is None else utils.misc.input_list(custom_aggregation) + + # We are distinguishing between the eval_input_list and agg_input_list because custom aggregation may require + # certain arguments that are not required for simply evaluating the hetoutput + self.input_list = list(set(self.eval_input_list).union(set(self.agg_input_list))) + self.output_list = utils.misc.output_list(f) + + def evaluate(self, arg_dict): + hetoutputs = dict(zip(self.output_list, utils.misc.make_tuple(self.f(*[arg_dict[i] for i + in self.eval_input_list])))) + return hetoutputs + + def aggregate(self, hetoutputs, D, custom_aggregation_args, mode="ss"): + if self.custom_aggregation is not None: + hetoutputs_w_std_aggregation = list(set(self.output_list) - + set([utils.misc.uncapitalize(o) for o + in utils.misc.output_list(self.custom_aggregation)])) + hetoutputs_w_custom_aggregation = list(set(self.output_list) - set(hetoutputs_w_std_aggregation)) + else: + hetoutputs_w_std_aggregation = self.output_list + hetoutputs_w_custom_aggregation = [] + + # TODO: May need to check if this works properly for td + if self.custom_aggregation is not None: + hetoutputs_w_custom_aggregation_args = dict(zip(hetoutputs_w_custom_aggregation, + [hetoutputs[i] for i in hetoutputs_w_custom_aggregation])) + custom_agg_inputs = {"D": D, **hetoutputs_w_custom_aggregation_args, **custom_aggregation_args} + custom_aggregates = dict(zip([o.capitalize() for o in hetoutputs_w_custom_aggregation], + utils.misc.make_tuple(self.custom_aggregation(*[custom_agg_inputs[i] for i + in self.agg_input_list])))) + else: + custom_aggregates = {} + + if mode == "ss": + std_aggregates = {o.capitalize(): np.vdot(D, hetoutputs[o]) for o in hetoutputs_w_std_aggregation} + elif mode == "td": + std_aggregates = {o.capitalize(): utils.optimized_routines.fast_aggregate(D, hetoutputs[o]) + for o in hetoutputs_w_std_aggregation} + else: + raise RuntimeError(f"Mode {mode} is not supported in HetOutput aggregation. Choose either 'ss' or 'td'") + + return {**std_aggregates, **custom_aggregates} From 5c8a20dfd712525d7b6230277935a04c13f33e42 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Tue, 8 Jun 2021 13:44:08 -0500 Subject: [PATCH 02/35] DiscontBlock: steady state works. --- .../{hetdc_block.py => discont_block.py} | 225 +++++++++--------- .../utilities/forward_step.py | 41 ++++ src/sequence_jacobian/utilities/misc.py | 37 +++ 3 files changed, 186 insertions(+), 117 deletions(-) rename src/sequence_jacobian/blocks/{hetdc_block.py => discont_block.py} (86%) diff --git a/src/sequence_jacobian/blocks/hetdc_block.py b/src/sequence_jacobian/blocks/discont_block.py similarity index 86% rename from src/sequence_jacobian/blocks/hetdc_block.py rename to src/sequence_jacobian/blocks/discont_block.py index 259d4d8..8194365 100644 --- a/src/sequence_jacobian/blocks/hetdc_block.py +++ b/src/sequence_jacobian/blocks/discont_block.py @@ -11,14 +11,14 @@ from ..utilities.misc import verify_saved_jacobian -def het(exogenous, policy, backward, backward_init=None): +def discont(exogenous, policy, disc_policy, backward, backward_init=None): def decorator(back_step_fun): - return HetBlock(back_step_fun, exogenous, policy, backward, backward_init=backward_init) + return DiscontBlock(back_step_fun, exogenous, policy, disc_policy, backward, backward_init=backward_init) return decorator -class HetBlock(Block): - """Part 1: Initializer for HetBlock, intended to be called via @het() decorator on backward step function. +class DiscontBlock(Block): + """Part 1: Initializer for DiscontBlock, intended to be called via @hetdc() decorator on backward step function. IMPORTANT: All `policy` and non-aggregate output variables of this HetBlock need to be *lower-case*, since the methods that compute steady state, transitional dynamics, and Jacobians for HetBlocks automatically handle @@ -26,7 +26,7 @@ class HetBlock(Block): of the `policy` and non-aggregate output variables specified in the backward step function. """ - def __init__(self, back_step_fun, exogenous, policy, backward, backward_init=None): + def __init__(self, back_step_fun, exogenous, policy, disc_policy, backward, backward_init=None): """Construct HetBlock from backward iteration function. Parameters @@ -39,6 +39,8 @@ def __init__(self, back_step_fun, exogenous, policy, backward, backward_init=Non policy : str or sequence of str names of policy variables of endogenous, continuous state variables e.g. assets 'a', must be returned by function + disc_policy: str + name of policy function for discrete choices (probabilities) backward : str or sequence of str variables that together comprise the 'v' that we use for iterating backward must appear both as outputs and as arguments @@ -68,27 +70,29 @@ def __init__(self, back_step_fun, exogenous, policy, backward, backward_init=Non self.back_step_inputs = set(utils.misc.input_list(back_step_fun)) # See the docstring of HetBlock for details on the attributes directly below - self.exogenous = exogenous - self.policy, self.back_iter_vars = (utils.misc.make_tuple(x) for x in (policy, backward)) + self.disc_policy = disc_policy + self.policy = policy + self.exogenous, self.back_iter_vars = (utils.misc.make_tuple(x) for x in (exogenous, backward)) # self.inputs_to_be_primed indicates all variables that enter into self.back_step_fun whose name has "_p" # (read as prime). Because it's the case that the initial dict of input arguments for self.back_step_fun # contains the names of these variables that omit the "_p", we need to swap the key from the unprimed to # the primed key name, such that self.back_step_fun will properly call those variables. # e.g. the key "Va" will become "Va_p", associated to the same value. - self.inputs_to_be_primed = {self.exogenous} | set(self.back_iter_vars) + self.inputs_to_be_primed = set(self.exogenous) | set(self.back_iter_vars) # self.non_back_iter_outputs are all of the outputs from self.back_step_fun excluding the backward # iteration variables themselves. - self.non_back_iter_outputs = self.back_step_outputs - set(self.back_iter_vars) + self.non_back_iter_outputs = self.back_step_outputs - set(self.back_iter_vars) - set(self.disc_policy) # self.outputs and self.inputs are the *aggregate* outputs and inputs of this HetBlock, which are used # in utils.graph.block_sort to topologically sort blocks along the DAG # according to their aggregate outputs and inputs. self.outputs = {o.capitalize() for o in self.non_back_iter_outputs} self.inputs = self.back_step_inputs - {k + '_p' for k in self.back_iter_vars} - self.inputs.remove(exogenous + '_p') - self.inputs.add(exogenous) + for ex in self.exogenous: + self.inputs.remove(ex + '_p') + self.inputs.add(ex) # A HetBlock can have heterogeneous inputs and heterogeneous outputs, henceforth `hetinput` and `hetoutput`. # See docstring for methods `add_hetinput` and `add_hetoutput` for more details. @@ -107,18 +111,18 @@ def __init__(self, back_step_fun, exogenous, policy, backward, backward_init=Non # as opposed to being available at the top level self.internal = utils.misc.smart_set(self.back_step_outputs) | utils.misc.smart_set(self.exogenous) | {"D"} - if len(self.policy) > 2: - raise ValueError(f"More than two endogenous policies in {back_step_fun.__name__}, not yet supported") + if len(self.policy) > 1: + raise ValueError(f"More than one continuous states in {back_step_fun.__name__}, not yet supported") # Checking that the various inputs/outputs attributes are correctly set - if self.exogenous + '_p' not in self.back_step_inputs: - raise ValueError(f"Markov matrix '{self.exogenous}_p' not included as argument in {back_step_fun.__name__}") + for ex in self.exogenous: + if ex + '_p' not in self.back_step_inputs: + raise ValueError(f"Markov matrix '{ex}_p' not included as argument in {back_step_fun.__name__}") - for pol in self.policy: - if pol not in self.back_step_outputs: - raise ValueError(f"Policy '{pol}' not included as output in {back_step_fun.__name__}") - if pol[0].isupper(): - raise ValueError(f"Policy '{pol}' is uppercase in {back_step_fun.__name__}, which is not allowed") + if self.policy not in self.back_step_outputs: + raise ValueError(f"Policy '{self.policy}' not included as output in {back_step_fun.__name__}") + if self.policy[0].isupper(): + raise ValueError(f"Policy '{self.policy}' is uppercase in {back_step_fun.__name__}, which is not allowed") for back in self.back_iter_vars: if back + '_p' not in self.back_step_inputs: @@ -145,12 +149,12 @@ def __repr__(self): """Nice string representation of HetBlock for printing to console""" if self.hetinput is not None: if self.hetoutput is not None: - return f"" else: - return f"" + return f"" else: - return f"" + return f"" '''Part 2: high-level routines, with first three called analogously to SimpleBlock counterparts - ss : do backward and forward iteration until convergence to get complete steady state @@ -163,23 +167,6 @@ def __repr__(self): each backward iteration step in td ''' - # TODO: Deprecated methods, to be removed! - def ss(self, **kwargs): - warnings.warn("This method has been deprecated. Please invoke by calling .steady_state", DeprecationWarning) - return self.steady_state(kwargs) - - def td(self, ss, **kwargs): - warnings.warn("This method has been deprecated. Please invoke by calling .impulse_nonlinear", - DeprecationWarning) - return self.impulse_nonlinear(ss, **kwargs) - - def jac(self, ss, shock_list=None, T=None, **kwargs): - if shock_list is None: - shock_list = list(self.inputs) - warnings.warn("This method has been deprecated. Please invoke by calling .jacobian.\n" - "Also, note that the kwarg `shock_list` in .jacobian has been renamed to `shocked_vars`", - DeprecationWarning) - return self.jacobian(ss, shock_list, T, **kwargs) def steady_state(self, calibration, backward_tol=1E-8, backward_maxit=5000, forward_tol=1E-10, forward_maxit=100_000, hetoutput=False): @@ -222,17 +209,15 @@ def steady_state(self, calibration, backward_tol=1E-8, backward_maxit=5000, ss = copy.deepcopy(calibration) # extract information from calibration - Pi = calibration[self.exogenous] - grid = {k: calibration[k+'_grid'] for k in self.policy} + grid = calibration[self.policy + '_grid'] D_seed = calibration.get('D', None) - pi_seed = calibration.get(self.exogenous + '_seed', None) # run backward iteration sspol = self.policy_ss(calibration, tol=backward_tol, maxit=backward_maxit) ss.update(sspol) # run forward iteration - D = self.dist_ss(Pi, sspol, grid, forward_tol, forward_maxit, D_seed, pi_seed) + D = self.dist_ss(sspol, grid, forward_tol, forward_maxit, D_seed) ss.update({"D": D}) # aggregate all outputs other than backward variables on grid, capitalize @@ -250,11 +235,9 @@ def steady_state(self, calibration, backward_tol=1E-8, backward_maxit=5000, return SteadyStateDict(ss, internal=self) def impulse_nonlinear(self, ss, exogenous, monotonic=False, returnindividual=False, grid_paths=None): - """Evaluate transitional dynamics for HetBlock given dynamic paths for inputs in kwargs, + """Evaluate transitional dynamics for DiscontBlock given dynamic paths for inputs in exogenous, assuming that we start and end in steady state ss, and that all inputs not specified in - kwargs are constant at their ss values. Analog to SimpleBlock.td. - - CANNOT provide time-varying paths of grid or Markov transition matrix for now. + exogenous are constant at their ss values. Analog to SimpleBlock.td. Parameters ---------- @@ -286,24 +269,33 @@ def impulse_nonlinear(self, ss, exogenous, monotonic=False, returnindividual=Fal T = shock_lengths[0] # copy from ss info - Pi_T = ss[self.exogenous].T.copy() - D = ss.internal[self.name]['D'] + D, P = ss['D'], ss[self.disc_policy] # construct grids for policy variables either from the steady state grid if the grid is meant to be # non-time-varying or from the provided `grid_path` if the grid is meant to be time-varying. - grid = {} - use_ss_grid = {} - for k in self.policy: - if grid_paths is not None and k in grid_paths: - grid[k] = grid_paths[k] - use_ss_grid[k] = False - else: - grid[k] = ss[k+"_grid"] - use_ss_grid[k] = True + if grid_paths is not None and self.policy in grid_paths: + grid = grid_paths[self.policy] + use_ss_grid = False + else: + grid = ss[self.policy + "_grid"] + use_ss_grid = True + # sspol_i, sspol_pi = utils.interpolate_coord_robust(grid, ss[self.policy]) # allocate empty arrays to store result, assume all like D individual_paths = {k: np.empty((T,) + D.shape) for k in self.non_back_iter_outputs} hetoutput_paths = {k: np.empty((T,) + D.shape) for k in self.hetoutput_outputs} + P_path = np.empty((T,) + P.shape) + + # obtain full path of multidimensional inputs + multidim_inputs = {k: np.empty((T,) + ss[k].shape) for k in self.hetinput_outputs_order} + if self.hetinput is not None: + indict = dict(ss.items()) + for t in range(T): + indict.update({k: ss[k] + v[t, ...] for k, v in exogenous.items()}) + hetout = dict(zip(self.hetinput_outputs_order, + self.hetinput(**{k: indict[k] for k in self.hetinput_inputs}))) + for k in self.hetinput_outputs_order: + multidim_inputs[k][t, ...] = hetout[k] # backward iteration backdict = dict(ss.items()) @@ -311,39 +303,56 @@ def impulse_nonlinear(self, ss, exogenous, monotonic=False, returnindividual=Fal for t in reversed(range(T)): # be careful: if you include vars from self.back_iter_vars in exogenous, agents will use them! backdict.update({k: ss[k] + v[t, ...] for k, v in exogenous.items()}) + + # add in multidimensional inputs EXCEPT exogenous state transitions (at lead 0) + backdict.update({k: ss[k] + v[t, ...] for k, v in multidim_inputs.items() if k not in self.exogenous}) + + # add in multidimensional inputs FOR exogenous state transitions (at lead 1) + if t < T - 1: + backdict.update({k: ss[k] + v[t+1, ...] for k, v in multidim_inputs.items() if k in self.exogenous}) + + # step back individual = {k: v for k, v in zip(self.back_step_output_list, self.back_step_fun(**self.make_inputs(backdict)))} + + # update backward variables backdict.update({k: individual[k] for k in self.back_iter_vars}) + # compute hetoutputs if self.hetoutput is not None: hetoutput = self.hetoutput.evaluate(backdict) for k in self.hetoutput_outputs: hetoutput_paths[k][t, ...] = hetoutput[k] + # save individual outputs of interest + P_path[t, ...] = individual[self.disc_policy] for k in self.non_back_iter_outputs: individual_paths[k][t, ...] = individual[k] + # forward iteration + # initial markov matrix (may have been shocked) + Pi_path = [[multidim_inputs[k][0, ...] if k in self.hetinput_outputs_order else ss[k] for k in self.exogenous]] + + # on impact: assets are predetermined, but Pi could be shocked, and P can change D_path = np.empty((T,) + D.shape) - D_path[0, ...] = D + if use_ss_grid: + grid_var = grid + else: + grid_var = grid[0, ...] + sspol_i, sspol_pi = utils.interpolate.interpolate_coord_robust(grid_var, ss[self.policy]) + D_path[0, ...] = self.forward_step(D, P_path[0, ...], Pi_path[0], sspol_i, sspol_pi) for t in range(T-1): # have to interpolate policy separately for each t to get sparse transition matrices - sspol_i = {} - sspol_pi = {} - for pol in self.policy: - if use_ss_grid[pol]: - grid_var = grid[pol] - else: - grid_var = grid[pol][t, ...] - if monotonic: - # TODO: change for two-asset case so assumption is monotonicity in own asset, not anything else - sspol_i[pol], sspol_pi[pol] = utils.interpolate.interpolate_coord(grid_var, - individual_paths[pol][t, ...]) - else: - sspol_i[pol], sspol_pi[pol] =\ - utils.interpolate.interpolate_coord_robust(grid_var, individual_paths[pol][t, ...]) + if not use_ss_grid: + grid_var = grid[t, ...] + pol_i, pol_pi = utils.interpolate.interpolate_coord_robust(grid_var, individual_paths[self.policy][t, ...]) + + # update exogenous Markov matrices + Pi = [multidim_inputs[k][t+1, ...] if k in self.hetinput_outputs_order else ss[k] for k in self.exogenous] + Pi_path.append(Pi) # step forward - D_path[t+1, ...] = self.forward_step(D_path[t, ...], Pi_T, sspol_i, sspol_pi) + D_path[t+1, ...] = self.forward_step(D_path[t, ...], P_path[t+1, ...], Pi_path[t+1, ...], pol_i, pol_pi) # obtain aggregates of all outputs, made uppercase aggregates = {o.capitalize(): utils.optimized_routines.fast_aggregate(D_path, individual_paths[o]) @@ -355,8 +364,8 @@ def impulse_nonlinear(self, ss, exogenous, monotonic=False, returnindividual=Fal # return either this, or also include distributional information if returnindividual: - return ImpulseDict({**aggregates, **aggregate_hetoutputs, **individual_paths, **hetoutput_paths, - 'D': D_path}) - ss + return ImpulseDict({**aggregates, **aggregate_hetoutputs, **individual_paths, **multidim_inputs, + **hetoutput_paths, 'D': D_path, 'P_path': P_path, 'Pi_path': Pi_path}) - ss else: return ImpulseDict({**aggregates, **aggregate_hetoutputs}) - ss @@ -369,6 +378,7 @@ def impulse_linear(self, ss, exogenous, T=None, Js=None, **kwargs): return ImpulseDict(self.jacobian(ss, list(exogenous.keys()), T=T, Js=Js, **kwargs).apply(exogenous)) + # TODO: update this def jacobian(self, ss, exogenous=None, T=300, outputs=None, output_list=None, Js=None, h=1E-4): """Assemble nested dict of Jacobians of agg outputs vs. inputs, using fake news algorithm. @@ -549,11 +559,11 @@ def policy_ss(self, ssin, tol=1E-8, maxit=5000): # only check convergence every 10 iterations for efficiency if it % 10 == 1 and all(utils.optimized_routines.within_tolerance(sspol[k], old[k], tol) - for k in self.policy): + for k in self.back_iter_vars): break # update 'old' for comparison during next iteration, prepare 'ssin' as input for next iteration - old.update({k: sspol[k] for k in self.policy}) + old.update({k: sspol[k] for k in self.back_iter_vars}) ssin.update({k + '_p': sspol[k] for k in self.back_iter_vars}) else: raise ValueError(f'No convergence of policy functions after {maxit} backward iterations!') @@ -568,13 +578,11 @@ def policy_ss(self, ssin, tol=1E-8, maxit=5000): ssin[k] = original_ssin[k] return {**ssin, **sspol} - def dist_ss(self, Pi, sspol, grid, tol=1E-10, maxit=100_000, D_seed=None, pi_seed=None): + def dist_ss(self, sspol, grid, tol=1E-10, maxit=100_000, D_seed=None): """Find steady-state distribution through forward iteration until convergence. Parameters ---------- - Pi : array - steady-state Markov matrix for exogenous variable sspol : dict steady-state policies on grid for all policy variables in self.policy grid : dict @@ -585,37 +593,29 @@ def dist_ss(self, Pi, sspol, grid, tol=1E-10, maxit=100_000, D_seed=None, pi_see maximum number of iterations, if 'tol' not reached by then, raise error D_seed : [optional] array initial seed for overall distribution - pi_seed : [optional] array - initial seed for stationary dist of Pi, if no D_seed Returns ---------- D : array steady-state distribution """ + # extract transition matrix for exogenous states + Pi = [sspol[k] for k in self.exogenous] + P = sspol[self.disc_policy] # first obtain initial distribution D if D_seed is None: - # compute stationary distribution for exogenous variable - pi = utils.discretize.stationary(Pi, pi_seed) - - # now initialize full distribution with this, assuming uniform distribution on endogenous vars - endogenous_dims = [grid[k].shape[0] for k in self.policy] - D = np.tile(pi, endogenous_dims[::-1] + [1]).T / np.prod(endogenous_dims) + # initialize at uniform distribution + D = np.ones_like(sspol[self.policy]) / sspol[self.policy].size else: D = D_seed # obtain interpolated policy rule for each dimension of endogenous policy - sspol_i = {} - sspol_pi = {} - for pol in self.policy: - # use robust binary search-based method that only requires grids, not policies, to be monotonic - sspol_i[pol], sspol_pi[pol] = utils.interpolate.interpolate_coord_robust(grid[pol], sspol[pol]) + sspol_i, sspol_pi = utils.interpolate.interpolate_coord_robust(grid, sspol[self.policy]) # iterate until convergence by tol, or maxit - Pi_T = Pi.T.copy() for it in range(maxit): - Dnew = self.forward_step(D, Pi_T, sspol_i, sspol_pi) + Dnew = self.forward_step(D, P, Pi, sspol_i, sspol_pi) # only check convergence every 10 iterations for efficiency if it % 10 == 0 and utils.optimized_routines.within_tolerance(D, Dnew, tol): @@ -817,28 +817,19 @@ def make_inputs(self, back_step_inputs_dict): '''Part 7: routines to do forward steps of different kinds, all wrap functions in utils''' - def forward_step(self, D, Pi_T, pol_i, pol_pi): - """Update distribution, calling on 1d and 2d-specific compiled routines. - Parameters - ---------- - D : array, beginning-of-period distribution - Pi_T : array, transpose Markov matrix - pol_i : dict, indices on lower bracketing gridpoint for all in self.policy - pol_pi : dict, weights on lower bracketing gridpoint for all in self.policy + def forward_step(self, D3_prev, P, Pi, a_i, a_pi): + """Update distribution from (s[0], z[0], a[-1]) to (s[1], z[1], a[0])""" + # update with continuous policy of last period + D4 = utils.forward_step.forward_step_cpol(D3_prev, a_i, a_pi) + + # update with exogenous shocks today + D2 = utils.forward_step.forward_step_exo(D4, Pi) + + # update with discrete choice today + D3 = utils.forward_step.forward_step_dpol(D2, P) + return D3 - Returns - ---------- - Dnew : array, beginning-of-next-period distribution - """ - if len(self.policy) == 1: - p, = self.policy - return utils.forward_step.forward_step_1d(D, Pi_T, pol_i[p], pol_pi[p]) - elif len(self.policy) == 2: - p1, p2 = self.policy - return utils.forward_step.forward_step_2d(D, Pi_T, pol_i[p1], pol_i[p2], pol_pi[p1], pol_pi[p2]) - else: - raise ValueError(f"{len(self.policy)} policy variables, only up to 2 implemented!") def forward_step_transpose(self, D, Pi, pol_i, pol_pi): """Transpose of forward_step (note: this takes Pi rather than Pi_T as argument!)""" diff --git a/src/sequence_jacobian/utilities/forward_step.py b/src/sequence_jacobian/utilities/forward_step.py index a80b7bf..3d73a32 100644 --- a/src/sequence_jacobian/utilities/forward_step.py +++ b/src/sequence_jacobian/utilities/forward_step.py @@ -171,3 +171,44 @@ def forward_step_transpose_endo_2d(D, x_i, y_i, x_pi, y_pi): (1-alpha) * beta * D[iz, ixp+1, iyp] + (1-alpha) * (1-beta) * D[iz, ixp+1, iyp+1]) return Dnew + + +''' +For HetDC block. + +D0 : s[-1], z[-1], a[-1] (end of last period) +D1 : x[0], z[-1], a[-1] (employment shock) +D2 : x[0], z[0], a[-1] (productivity shock) +D3 : s[0], z[0], a[-1] (discrete choice) REFERENCE STAGE +D4 : s[0], z[0], a[0] (cont choice) + +''' + + +def forward_step_exo(D0, Pi): + """Update distribution s[-1], z[-1], a[-1] to x[0], z[0], a[-1].""" + D1 = np.einsum('sza,sx->xza', D0, Pi[0]) # s[-1] -> x[0] + D2 = np.einsum('xza,zp->xpa', D1, Pi[1]) # z[-1] -> z[0] + return D2 + + +def forward_step_dpol(D2, P): + """Update distribution x[0], z[0], a[-1] to s[0], z[0], a[-1].""" + D3 = np.einsum('xza,xsza->sza', D2, P) + return D3 + + +@njit +def forward_step_cpol(D3, a_i, a_pi): + """Update distribution s[0], z[0], a[-1] to s[0], z[0], a[0].""" + nS, nZ, nA = D3.shape + D4 = np.zeros_like(D3) + for iw in range(nS): + for iz in range(nZ): + for ia in range(nA): + i = a_i[iw, iz, ia] + pi = a_pi[iw, iz, ia] + d = D3[iw, iz, ia] + D4[iw, iz, i] += d * pi + D4[iw, iz, i+1] += d * (1 - pi) + return D4 \ No newline at end of file diff --git a/src/sequence_jacobian/utilities/misc.py b/src/sequence_jacobian/utilities/misc.py index cf47e7b..8ff9ff0 100644 --- a/src/sequence_jacobian/utilities/misc.py +++ b/src/sequence_jacobian/utilities/misc.py @@ -173,3 +173,40 @@ def verify_saved_jacobian(block_name, Js, outputs, inputs, T): return False return True + + +'''Tools for taste shocks used in discrete choice problems''' + + +def choice_prob(vfun, lam): + """ + Logit choice probability of choosing along first axis. + + Parameters + ---------- + vfun : array(Ns, Nz, Na): discrete choice specific value function + lam : float, scale of taste shock + + Returns + ------- + prob : array (Ns, Nz, nA): choice probability + """ + # rescale values for numeric robustness + vmax = np.max(vfun, axis=0) + vfun_norm = vfun - vmax + + # apply formula (could be njitted in separate function) + P = np.exp(vfun_norm / lam) / np.sum(np.exp(vfun_norm / lam), axis=0) + return P + + +def logsum(vfun, lam): + """Logsum formula for expected continuation value.""" + + # rescale values for numeric robustness + vmax = np.max(vfun, axis=0) + vfun_norm = vfun - vmax + + # apply formula (could be njitted in separate function) + VE = vmax + lam * np.log(np.sum(np.exp(vfun_norm / lam), axis=0)) + return VE From 8d5baa359ff4307cdc1851b757b9c33b3e919a59 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Tue, 8 Jun 2021 14:08:54 -0500 Subject: [PATCH 03/35] DiscontBlock: impulse_nonlinear works. --- src/sequence_jacobian/blocks/discont_block.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/src/sequence_jacobian/blocks/discont_block.py b/src/sequence_jacobian/blocks/discont_block.py index 8194365..f43b1e6 100644 --- a/src/sequence_jacobian/blocks/discont_block.py +++ b/src/sequence_jacobian/blocks/discont_block.py @@ -234,7 +234,7 @@ def steady_state(self, calibration, backward_tol=1E-8, backward_maxit=5000, return SteadyStateDict(ss, internal=self) - def impulse_nonlinear(self, ss, exogenous, monotonic=False, returnindividual=False, grid_paths=None): + def impulse_nonlinear(self, ss, exogenous, returnindividual=False, grid_paths=None): """Evaluate transitional dynamics for DiscontBlock given dynamic paths for inputs in exogenous, assuming that we start and end in steady state ss, and that all inputs not specified in exogenous are constant at their ss values. Analog to SimpleBlock.td. @@ -246,9 +246,6 @@ def impulse_nonlinear(self, ss, exogenous, monotonic=False, returnindividual=Fal exogenous : dict of {str : array(T, ...)} all time-varying inputs here (in deviations), with first dimension being time this must have same length T for all entries (all outputs will be calculated up to T) - monotonic : [optional] bool - flag indicating date-t policies are monotonic in same date-(t-1) policies, allows us - to use faster interpolation routines, otherwise use slower robust to nonmonotonicity returnindividual : [optional] bool return distribution and full outputs on grid grid_paths: [optional] dict of {str: array(T, Number of grid points)} @@ -269,7 +266,7 @@ def impulse_nonlinear(self, ss, exogenous, monotonic=False, returnindividual=Fal T = shock_lengths[0] # copy from ss info - D, P = ss['D'], ss[self.disc_policy] + D, P = ss.internal[self.name]['D'], ss.internal[self.name][self.disc_policy] # construct grids for policy variables either from the steady state grid if the grid is meant to be # non-time-varying or from the provided `grid_path` if the grid is meant to be time-varying. @@ -287,7 +284,7 @@ def impulse_nonlinear(self, ss, exogenous, monotonic=False, returnindividual=Fal P_path = np.empty((T,) + P.shape) # obtain full path of multidimensional inputs - multidim_inputs = {k: np.empty((T,) + ss[k].shape) for k in self.hetinput_outputs_order} + multidim_inputs = {k: np.empty((T,) + ss.internal[self.name][k].shape) for k in self.hetinput_outputs_order} if self.hetinput is not None: indict = dict(ss.items()) for t in range(T): @@ -305,11 +302,11 @@ def impulse_nonlinear(self, ss, exogenous, monotonic=False, returnindividual=Fal backdict.update({k: ss[k] + v[t, ...] for k, v in exogenous.items()}) # add in multidimensional inputs EXCEPT exogenous state transitions (at lead 0) - backdict.update({k: ss[k] + v[t, ...] for k, v in multidim_inputs.items() if k not in self.exogenous}) + backdict.update({k: ss.internal[self.name][k] + v[t, ...] for k, v in multidim_inputs.items() if k not in self.exogenous}) # add in multidimensional inputs FOR exogenous state transitions (at lead 1) if t < T - 1: - backdict.update({k: ss[k] + v[t+1, ...] for k, v in multidim_inputs.items() if k in self.exogenous}) + backdict.update({k: ss.internal[self.name][k] + v[t+1, ...] for k, v in multidim_inputs.items() if k in self.exogenous}) # step back individual = {k: v for k, v in zip(self.back_step_output_list, @@ -339,7 +336,7 @@ def impulse_nonlinear(self, ss, exogenous, monotonic=False, returnindividual=Fal grid_var = grid else: grid_var = grid[0, ...] - sspol_i, sspol_pi = utils.interpolate.interpolate_coord_robust(grid_var, ss[self.policy]) + sspol_i, sspol_pi = utils.interpolate.interpolate_coord_robust(grid_var, ss.internal[self.name][self.policy]) D_path[0, ...] = self.forward_step(D, P_path[0, ...], Pi_path[0], sspol_i, sspol_pi) for t in range(T-1): # have to interpolate policy separately for each t to get sparse transition matrices @@ -352,7 +349,7 @@ def impulse_nonlinear(self, ss, exogenous, monotonic=False, returnindividual=Fal Pi_path.append(Pi) # step forward - D_path[t+1, ...] = self.forward_step(D_path[t, ...], P_path[t+1, ...], Pi_path[t+1, ...], pol_i, pol_pi) + D_path[t+1, ...] = self.forward_step(D_path[t, ...], P_path[t+1, ...], Pi, pol_i, pol_pi) # obtain aggregates of all outputs, made uppercase aggregates = {o.capitalize(): utils.optimized_routines.fast_aggregate(D_path, individual_paths[o]) From 6444a0739fc37229ec6b087af9a96b7a2a3237aa Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Tue, 8 Jun 2021 18:21:48 -0500 Subject: [PATCH 04/35] DiscontBlock: jacobian works. Ready to roll! --- src/sequence_jacobian/blocks/discont_block.py | 238 ++++++++++-------- .../utilities/forward_step.py | 31 ++- 2 files changed, 167 insertions(+), 102 deletions(-) diff --git a/src/sequence_jacobian/blocks/discont_block.py b/src/sequence_jacobian/blocks/discont_block.py index f43b1e6..f0e3873 100644 --- a/src/sequence_jacobian/blocks/discont_block.py +++ b/src/sequence_jacobian/blocks/discont_block.py @@ -7,7 +7,6 @@ from .. import utilities as utils from ..steady_state.classes import SteadyStateDict from ..jacobian.classes import JacobianDict -from ..devtools.deprecate import rename_output_list_to_outputs from ..utilities.misc import verify_saved_jacobian @@ -375,8 +374,7 @@ def impulse_linear(self, ss, exogenous, T=None, Js=None, **kwargs): return ImpulseDict(self.jacobian(ss, list(exogenous.keys()), T=T, Js=Js, **kwargs).apply(exogenous)) - # TODO: update this - def jacobian(self, ss, exogenous=None, T=300, outputs=None, output_list=None, Js=None, h=1E-4): + def jacobian(self, ss, exogenous=None, T=300, outputs=None, Js=None, h=1E-4): """Assemble nested dict of Jacobians of agg outputs vs. inputs, using fake news algorithm. Parameters @@ -404,10 +402,8 @@ def jacobian(self, ss, exogenous=None, T=300, outputs=None, output_list=None, Js # except for the backward iteration variables themselves if exogenous is None: exogenous = list(self.inputs) - if outputs is None or output_list is None: + if outputs is None: outputs = self.non_back_iter_outputs - else: - outputs = rename_output_list_to_outputs(outputs=outputs, output_list=output_list) relevant_shocks = [i for i in self.back_step_inputs | self.hetinput_inputs if i in exogenous] @@ -418,22 +414,23 @@ def jacobian(self, ss, exogenous=None, T=300, outputs=None, output_list=None, Js return Js[self.name] # step 0: preliminary processing of steady state - (ssin_dict, Pi, ssout_list, ss_for_hetinput, sspol_i, sspol_pi, sspol_space) = self.jac_prelim(ss) + (ssin_dict, ssout_list, ss_for_hetinput, sspol_i, sspol_pi, sspol_space, D0, D2, Pi, P) = self.jac_prelim(ss) # step 1 of fake news algorithm # compute curlyY and curlyD (backward iteration) for each input i - curlyYs, curlyDs = {}, {} + dYs, dDs, dD_ps, dD_direct = {}, {}, {}, {} for i in relevant_shocks: - curlyYs[i], curlyDs[i] = self.backward_iteration_fakenews(i, outputs, ssin_dict, ssout_list, - ss.internal[self.name]['D'], Pi.T.copy(), - sspol_i, sspol_pi, sspol_space, T, h, - ss_for_hetinput) + dYs[i], dDs[i], dD_ps[i], dD_direct[i] = self.backward_iteration_fakenews(i, outputs, ssin_dict, ssout_list, + ss.internal[self.name]['D'], + D0, D2, P, Pi, sspol_i, sspol_pi, + sspol_space, T, h, + ss_for_hetinput) # step 2 of fake news algorithm # compute prediction vectors curlyP (forward iteration) for each outcome o curlyPs = {} for o in outputs: - curlyPs[o] = self.forward_iteration_fakenews(ss.internal[self.name][o], Pi, sspol_i, sspol_pi, T-1) + curlyPs[o] = self.forward_iteration_fakenews(ss.internal[self.name][o], Pi, P, sspol_i, sspol_pi, T) # steps 3-4 of fake news algorithm # make fake news matrix and Jacobian for each outcome-input pair @@ -444,8 +441,8 @@ def jacobian(self, ss, exogenous=None, T=300, outputs=None, output_list=None, Js F[o.capitalize()] = {} if o.capitalize() not in J: J[o.capitalize()] = {} - F[o.capitalize()][i] = HetBlock.build_F(curlyYs[i][o], curlyDs[i], curlyPs[o]) - J[o.capitalize()][i] = HetBlock.J_from_F(F[o.capitalize()][i]) + F[o.capitalize()][i] = DiscontBlock.build_F(dYs[i][o], dD_ps[i], curlyPs[o], dD_direct[i], dDs[i]) + J[o.capitalize()][i] = DiscontBlock.J_from_F(F[o.capitalize()][i]) return JacobianDict(J, name=self.name) @@ -630,72 +627,109 @@ def dist_ss(self, sspol, grid, tol=1E-10, maxit=100_000, D_seed=None): - Step 4: J_from_F to get Jacobian from fake news matrix ''' + def shock_timing(self, input_shocked, D0, Pi_ss, P, ss_for_hetinput, h): + """Figure out the details of how the scalar shock feeds into back_step_fun. + + Main complication: shocks to Pi transmit via hetinput with a lead of 1. + """ + if self.hetinput is not None and input_shocked in self.hetinput_inputs: + # if input_shocked is an input to hetinput, take numerical diff to get response + din_dict = dict(zip(self.hetinput_outputs_order, + utils.differentiate.numerical_diff_symmetric(self.hetinput, ss_for_hetinput, + {input_shocked: 1}, h))) + + if all(k not in din_dict.keys() for k in self.exogenous): + # if Pi is not generated by hetinput, no work to be done + lead = 0 + dD3_direct = None + elif all(np.count_nonzero(din_dict[k]) == 0 for k in self.exogenous if k in din_dict): + # if Pi is generated by hetinput but input_shocked does not affect it, replace Pi with Pi_p + lead = 0 + dD3_direct = None + for k in self.exogenous: + if k in din_dict.keys(): + din_dict[k + '_p'] = din_dict.pop(k) + else: + # if Pi is generated by hetinput and input_shocked affects it, replace that with Pi_p at lead 1 + lead = 1 + Pi = [din_dict[k] if k in din_dict else Pi_ss[num] for num, k in enumerate(self.exogenous)] + dD2_direct = utils.forward_step.forward_step_exo(D0, Pi) + dD3_direct = utils.forward_step.forward_step_dpol(dD2_direct, P) + for k in self.exogenous: + if k in din_dict.keys(): + din_dict[k + '_p'] = din_dict.pop(k) + else: + # if input_shocked feeds directly into back_step_fun with lead 0, no work to be done + lead = 0 + din_dict = {input_shocked: 1} + dD3_direct = None + + return din_dict, lead, dD3_direct + def backward_step_fakenews(self, din_dict, output_list, ssin_dict, ssout_list, - Dss, Pi_T, sspol_i, sspol_pi, sspol_space, h=1E-4): - # shock perturbs outputs + Dss, D2, P, Pi, sspol_i, sspol_pi, sspol_space, h=1E-4): + # 1. shock perturbs outputs shocked_outputs = {k: v for k, v in zip(self.back_step_output_list, utils.differentiate.numerical_diff(self.back_step_fun, ssin_dict, din_dict, h, ssout_list))} - curlyV = {k: shocked_outputs[k] for k in self.back_iter_vars} - - # which affects the distribution tomorrow - pol_pi_shock = {k: -shocked_outputs[k] / sspol_space[k] for k in self.policy} + dV = {k: shocked_outputs[k] for k in self.back_iter_vars} - # Include an additional term to account for the effect of a deleveraging shock affecting the grid + # 2. which affects the distribution tomorrow via the savings policy + pol_pi_shock = -shocked_outputs[self.policy] / sspol_space if "delev_exante" in din_dict: - dx = np.zeros_like(sspol_pi["a"]) - dx[sspol_i["a"] == 0] = 1. - add_term = sspol_pi["a"] * dx / sspol_space["a"] - pol_pi_shock["a"] += add_term + # include an additional term to account for the effect of a deleveraging shock affecting the grid + dx = np.zeros_like(sspol_pi) + dx[sspol_i == 0] = 1. + add_term = sspol_pi * dx / sspol_space + pol_pi_shock += add_term + dD3_p = self.forward_step_shock(Dss, sspol_i, pol_pi_shock, Pi, P) - curlyD = self.forward_step_shock(Dss, Pi_T, sspol_i, sspol_pi, pol_pi_shock) + # 3. and the distribution today (and Dmid tomorrow) via the discrete choice + P_shock = shocked_outputs[self.disc_policy] + dD3 = utils.forward_step.forward_step_dpol(D2, P_shock) # s[0], z[0], a[-1] - # and the aggregate outcomes today - curlyY = {k: np.vdot(Dss, shocked_outputs[k]) for k in output_list} + # 4. and the aggregate outcomes today (ignoring dD and dD_direct) + dY = {k: np.vdot(Dss, shocked_outputs[k]) for k in output_list} - return curlyV, curlyD, curlyY + return dV, dD3, dD3_p, dY - def backward_iteration_fakenews(self, input_shocked, output_list, ssin_dict, ssout_list, Dss, Pi_T, + def backward_iteration_fakenews(self, input_shocked, output_list, ssin_dict, ssout_list, Dss, D0, D2, P, Pi, sspol_i, sspol_pi, sspol_space, T, h=1E-4, ss_for_hetinput=None): """Iterate policy steps backward T times for a single shock.""" - # TODO: Might need to add a check for ss_for_hetinput if self.hetinput is not None - # since unless self.hetinput_inputs is exactly equal to input_shocked, calling - # self.hetinput() inside the symmetric differentiation function will throw an error. - # It's probably better/more informative to throw that error out here. - if self.hetinput is not None and input_shocked in self.hetinput_inputs: - # if input_shocked is an input to hetinput, take numerical diff to get response - din_dict = dict(zip(self.hetinput_outputs_order, - utils.differentiate.numerical_diff_symmetric(self.hetinput, - ss_for_hetinput, {input_shocked: 1}, h))) - else: - # otherwise, we just have that one shock - din_dict = {input_shocked: 1} + # map name of shocked input into a perturbation of the inputs of back_step_fun + din_dict, lead, dD_direct = self.shock_timing(input_shocked, D0, Pi.copy(), P, ss_for_hetinput, h) # contemporaneous response to unit scalar shock - curlyV, curlyD, curlyY = self.backward_step_fakenews(din_dict, output_list, ssin_dict, ssout_list, - Dss, Pi_T, sspol_i, sspol_pi, sspol_space, h=h) + dV, dD, dD_p, dY = self.backward_step_fakenews(din_dict, output_list, ssin_dict, ssout_list, + Dss, D2, P, Pi, sspol_i, sspol_pi, sspol_space, h=h) # infer dimensions from this and initialize empty arrays - curlyDs = np.empty((T,) + curlyD.shape) - curlyYs = {k: np.empty(T) for k in curlyY.keys()} + dDs = np.empty((T,) + dD.shape) + dD_ps = np.empty((T,) + dD_p.shape) + dYs = {k: np.empty(T) for k in dY.keys()} - # fill in current effect of shock - curlyDs[0, ...] = curlyD - for k in curlyY.keys(): - curlyYs[k][0] = curlyY[k] + # fill in current effect of shock (be careful to handle lead = 1) + dDs[:lead, ...], dD_ps[:lead, ...] = 0, 0 + dDs[lead, ...], dD_ps[lead, ...] = dD, dD_p + for k in dY.keys(): + dYs[k][:lead] = 0 + dYs[k][lead] = dY[k] # fill in anticipation effects - for t in range(1, T): - curlyV, curlyDs[t, ...], curlyY = self.backward_step_fakenews({k+'_p': v for k, v in curlyV.items()}, - output_list, ssin_dict, ssout_list, - Dss, Pi_T, sspol_i, sspol_pi, sspol_space, h) - for k in curlyY.keys(): - curlyYs[k][t] = curlyY[k] + for t in range(lead + 1, T): + dV, dDs[t, ...], dD_ps[t, ...], dY = self.backward_step_fakenews({k + '_p': + v for k, v in dV.items()}, + output_list, ssin_dict, ssout_list, + Dss, D2, P, Pi, sspol_i, sspol_pi, + sspol_space, h) + + for k in dY.keys(): + dYs[k][t] = dY[k] - return curlyYs, curlyDs + return dYs, dDs, dD_ps, dD_direct - def forward_iteration_fakenews(self, o_ss, Pi, pol_i_ss, pol_pi_ss, T): + def forward_iteration_fakenews(self, o_ss, Pi, P, pol_i_ss, pol_pi_ss, T): """Iterate transpose forward T steps to get full set of curlyPs for a given outcome. Note we depart from definition in paper by applying the demeaning operator in addition to Lambda @@ -707,16 +741,26 @@ def forward_iteration_fakenews(self, o_ss, Pi, pol_i_ss, pol_pi_ss, T): curlyPs[0, ...] = utils.misc.demean(o_ss) for t in range(1, T): curlyPs[t, ...] = utils.misc.demean(self.forward_step_transpose(curlyPs[t - 1, ...], - Pi, pol_i_ss, pol_pi_ss)) + P, Pi, pol_i_ss, pol_pi_ss)) return curlyPs @staticmethod - def build_F(curlyYs, curlyDs, curlyPs): - T = curlyDs.shape[0] - Tpost = curlyPs.shape[0] - T + 2 - F = np.empty((Tpost + T - 1, T)) - F[0, :] = curlyYs - F[1:, :] = curlyPs.reshape((Tpost + T - 2, -1)) @ curlyDs.reshape((T, -1)).T + def build_F(dYs, dD_ps, curlyPs, dD_direct, dDs): + T = dYs.shape[0] + F = np.empty((T, T)) + + # standard effect + F[0, :] = dYs + F[1:, :] = curlyPs[:T-1, ...].reshape((T-1, -1)) @ dD_ps.reshape((T, -1)).T + + # contemporaneous effect via discrete choice + if dDs is not None: + F += curlyPs.reshape((T, -1)) @ dDs.reshape((T, -1)).T + + # direct effect of shock + if dD_direct is not None: + F[:, 0] += curlyPs.reshape((T, -1)) @ dD_direct.ravel() + return F @staticmethod @@ -738,7 +782,7 @@ def jac_prelim(self, ss): Returns ---------- ssin_dict : dict, ss vals of exactly the inputs needed by self.back_step_fun for backward step - Pi : array (S*S), Markov matrix for exogenous state + D0 : array (nS, nZ, nA), distribution over s[-1], z[-1], a[-1] ssout_list : tuple, what self.back_step_fun returns when given ssin_dict (not exactly the same as steady-state numerically since SS convergence was to some tolerance threshold) ss_for_hetinput : dict, ss vals of exactly the inputs needed by self.hetinput (if it exists) @@ -748,24 +792,26 @@ def jac_prelim(self, ss): """ # preliminary a: obtain ss inputs and other info, run once to get baseline for numerical differentiation ssin_dict = self.make_inputs(ss) - Pi = ss[self.exogenous] - grid = {k: ss[k+'_grid'] for k in self.policy} ssout_list = self.back_step_fun(**ssin_dict) ss_for_hetinput = None if self.hetinput is not None: ss_for_hetinput = {k: ss[k] for k in self.hetinput_inputs if k in ss} - # preliminary b: get sparse representations of policy rules, and distance between neighboring policy gridpoints - sspol_i = {} - sspol_pi = {} - sspol_space = {} - for pol in self.policy: - # use robust binary-search-based method that only requires grids to be monotonic - sspol_i[pol], sspol_pi[pol] = utils.interpolate.interpolate_coord_robust(grid[pol], ss.internal[self.name][pol]) - sspol_space[pol] = grid[pol][sspol_i[pol]+1] - grid[pol][sspol_i[pol]] + # preliminary b: get sparse representations of policy rules and distance between neighboring policy gridpoints + grid = ss[self.policy + '_grid'] + sspol_i, sspol_pi = utils.interpolate.interpolate_coord_robust(grid, ss.internal[self.name][self.policy]) + sspol_space = grid[sspol_i + 1] - grid[sspol_i] - toreturn = (ssin_dict, Pi, ssout_list, ss_for_hetinput, sspol_i, sspol_pi, sspol_space) + # preliminary c: get end-of-period distribution, need it when Pi is shocked + Pi = [ss.internal[self.name][k] for k in self.exogenous] + D = ss.internal[self.name]['D'] + D0 = utils.forward_step.forward_step_cpol(D, sspol_i, sspol_pi) + D2 = utils.forward_step.forward_step_exo(D0, Pi) + + Pss = ss.internal[self.name][self.disc_policy] + + toreturn = (ssin_dict, ssout_list, ss_for_hetinput, sspol_i, sspol_pi, sspol_space, D0, D2, Pi, Pss) return toreturn @@ -827,30 +873,20 @@ def forward_step(self, D3_prev, P, Pi, a_i, a_pi): D3 = utils.forward_step.forward_step_dpol(D2, P) return D3 + def forward_step_shock(self, D0, pol_i, pol_pi_shock, Pi, P): + """Forward_step linearized wrt pol_pi.""" + D4 = utils.forward_step.forward_step_cpol_shock(D0, pol_i, pol_pi_shock) + D2 = utils.forward_step.forward_step_exo(D4, Pi) + D3 = utils.forward_step.forward_step_dpol(D2, P) + return D3 - def forward_step_transpose(self, D, Pi, pol_i, pol_pi): - """Transpose of forward_step (note: this takes Pi rather than Pi_T as argument!)""" - if len(self.policy) == 1: - p, = self.policy - return utils.forward_step.forward_step_transpose_1d(D, Pi, pol_i[p], pol_pi[p]) - elif len(self.policy) == 2: - p1, p2 = self.policy - return utils.forward_step.forward_step_transpose_2d(D, Pi, pol_i[p1], pol_i[p2], pol_pi[p1], pol_pi[p2]) - else: - raise ValueError(f"{len(self.policy)} policy variables, only up to 2 implemented!") - - def forward_step_shock(self, Dss, Pi_T, pol_i_ss, pol_pi_ss, pol_pi_shock): - """Forward_step linearized with respect to pol_pi""" - if len(self.policy) == 1: - p, = self.policy - return utils.forward_step.forward_step_shock_1d(Dss, Pi_T, pol_i_ss[p], pol_pi_shock[p]) - elif len(self.policy) == 2: - p1, p2 = self.policy - return utils.forward_step.forward_step_shock_2d(Dss, Pi_T, pol_i_ss[p1], pol_i_ss[p2], - pol_pi_ss[p1], pol_pi_ss[p2], - pol_pi_shock[p1], pol_pi_shock[p2]) - else: - raise ValueError(f"{len(self.policy)} policy variables, only up to 2 implemented!") + def forward_step_transpose(self, D, P, Pi, a_i, a_pi): + """Transpose of forward_step.""" + D1 = np.einsum('sza,xsza->xza', D, P) + D2 = np.einsum('xpa,zp->xza', D1, Pi[1]) + D3 = np.einsum('xza,sx->sza', D2, Pi[0]) + D4 = utils.forward_step.forward_step_cpol_transpose(D3, a_i, a_pi) + return D4 def hetoutput(custom_aggregation=None): diff --git a/src/sequence_jacobian/utilities/forward_step.py b/src/sequence_jacobian/utilities/forward_step.py index 3d73a32..50d96db 100644 --- a/src/sequence_jacobian/utilities/forward_step.py +++ b/src/sequence_jacobian/utilities/forward_step.py @@ -211,4 +211,33 @@ def forward_step_cpol(D3, a_i, a_pi): d = D3[iw, iz, ia] D4[iw, iz, i] += d * pi D4[iw, iz, i+1] += d * (1 - pi) - return D4 \ No newline at end of file + return D4 + + +@njit +def forward_step_cpol_shock(D3, a_i_ss, a_pi_shock): + """forward_step_cpol linearized wrt a_pi""" + nS, nZ, nA = D3.shape + dD4 = np.zeros_like(D3) + for iw in range(nS): + for iz in range(nZ): + for ia in range(nA): + i = a_i_ss[iw, iz, ia] + dshock = a_pi_shock[iw, iz, ia] * D3[iw, iz, ia] + dD4[iw, iz, i] += dshock + dD4[iw, iz, i + 1] -= dshock + return dD4 + + +@njit +def forward_step_cpol_transpose(D3, a_i, a_pi): + """Transpose of forward_step_cpol""" + nS, nZ, nA = D3.shape + D4 = np.zeros_like(D3) + for iw in range(nS): + for iz in range(nZ): + for ia in range(nA): + i = a_i[iw, iz, ia] + pi = a_pi[iw, iz, ia] + D4[iw, iz, ia] = pi * D3[iw, iz, i] + (1 - pi) * D3[iw, iz, i + 1] + return D4 From 2d9703a53eea889c08033837c00df5665e453aa6 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Tue, 15 Jun 2021 11:04:49 -0500 Subject: [PATCH 05/35] Computing steady state of nested CombinedBlock now works. --- src/sequence_jacobian/blocks/combined_block.py | 3 ++- src/sequence_jacobian/steady_state/drivers.py | 7 ++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/sequence_jacobian/blocks/combined_block.py b/src/sequence_jacobian/blocks/combined_block.py index 3835b1f..5f8b662 100644 --- a/src/sequence_jacobian/blocks/combined_block.py +++ b/src/sequence_jacobian/blocks/combined_block.py @@ -9,6 +9,7 @@ from ..steady_state.drivers import eval_block_ss from ..steady_state.support import provide_solver_default from ..jacobian.classes import JacobianDict +from ..steady_state.classes import SteadyStateDict def combine(blocks, name="", model_alias=False): @@ -66,7 +67,7 @@ def steady_state(self, calibration, helper_blocks=None, **kwargs): ss_partial_eq = deepcopy(calibration) for i in topsorted: ss_partial_eq.update(eval_block_ss(blocks_all[i], ss_partial_eq, **kwargs)) - return ss_partial_eq + return SteadyStateDict(ss_partial_eq) def impulse_nonlinear(self, ss, exogenous, **kwargs): """Calculate a partial equilibrium, non-linear impulse response to a set of `exogenous` shocks from diff --git a/src/sequence_jacobian/steady_state/drivers.py b/src/sequence_jacobian/steady_state/drivers.py index dbe6e9c..e005c15 100644 --- a/src/sequence_jacobian/steady_state/drivers.py +++ b/src/sequence_jacobian/steady_state/drivers.py @@ -147,9 +147,14 @@ def eval_block_ss(block, calibration, toplevel_unknowns=None, dissolve=None, **k if toplevel_unknowns is None: toplevel_unknowns = {} block_unknowns_in_toplevel_unknowns = set(block.unknowns.keys()).issubset(set(toplevel_unknowns)) if hasattr(block, "unknowns") else False + if dissolve is None: + dissolve = [] # Add the block's internal variables as inputs, if the block has an internal attribute - input_arg_dict = {**calibration.toplevel, **calibration.internal[block.name]} if block.name in calibration.internal else calibration.toplevel + if isinstance(calibration, dict): + input_arg_dict = deepcopy(calibration) + else: + input_arg_dict = {**calibration.toplevel, **calibration.internal[block.name]} if block.name in calibration.internal else calibration.toplevel # Bypass the behavior for SolvedBlocks to numerically solve for their unknowns and simply evaluate them # at the provided set of unknowns if included in dissolve. From 384b4151c77b50dac024d9cd0b6e47e07f7092c9 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Tue, 15 Jun 2021 11:05:36 -0500 Subject: [PATCH 06/35] Testing DisContBlock: ss without helper works. --- dcblock.py | 447 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 447 insertions(+) create mode 100644 dcblock.py diff --git a/dcblock.py b/dcblock.py new file mode 100644 index 0000000..9778928 --- /dev/null +++ b/dcblock.py @@ -0,0 +1,447 @@ +import numpy as np +from numba import guvectorize, njit +import dfols + +from sequence_jacobian import simple, agrid, markov_rouwenhorst, create_model, solved +from sequence_jacobian.blocks.discont_block import discont +from sequence_jacobian.utilities.misc import choice_prob, logsum +from sequence_jacobian.steady_state.classes import SteadyStateDict + + +'''Core HA block''' + + +def household_init(a_grid, z_grid, b_grid, atw, transfer, rpost, eis, vphi, chi): + _, income = labor_income(z_grid, b_grid, atw, transfer, 0, 1, 1, 1) + coh = income[:, :, np.newaxis] + (1 + rpost) * a_grid[np.newaxis, np.newaxis, :] + c_guess = 0.3 * coh + V, Va = np.empty_like(coh), np.empty_like(coh) + for iw in range(4): + V[iw, ...] = util(c_guess[iw, ...], iw, eis, vphi, chi) + V = V / 0.1 + + # get Va by finite difference + Va[:, :, 1:-1] = (V[:, :, 2:] - V[:, :, :-2]) / (a_grid[2:] - a_grid[:-2]) + Va[:, :, 0] = (V[:, :, 1] - V[:, :, 0]) / (a_grid[1] - a_grid[0]) + Va[:, :, -1] = (V[:, :, -1] - V[:, :, -2]) / (a_grid[-1] - a_grid[-2]) + + return Va, V + + +@njit(fastmath=True) +def util(c, iw, eis, vphi, chi): + """Utility function.""" + # 1. utility from consumption. + if eis == 1: + u = np.log(c) + else: + u = c ** (1 - 1 / eis) / (1 - 1 / eis) + + # 2. disutility from work and search + if iw == 0: + u = u - vphi # E + elif (iw == 1) | (iw == 2): + u = u - chi # Ub, U + + return u + + +@discont(exogenous=('Pi_s', 'Pi_z'), policy='a', disc_policy='P', backward=('V', 'Va'), backward_init=household_init) +def household(V_p, Va_p, Pi_z_p, Pi_s_p, choice_set, a_grid, y_grid, z_grid, b_grid, lam_grid, eis, beta, rpost, + vphi, chi): + """ + Backward step function EGM with upper envelope. + + Dimensions: 0: labor market status, 1: productivity, 2: assets. + Status: 0: E, 1: Ub, 2: U, 3: O + State: 0: M, 1: B, 2: L + + Parameters + ---------- + V_p : array(Ns, Nz, Na), status-specific value function tomorrow + Va_p : array(Ns, Nz, Na), partial of status-specific value function tomorrow + Pi_s_p : array(Ns, Nx), Markov matrix for labor market shocks + Pi_z_p : array(Nz, Nz), (non-status-specific Markov) matrix for productivity + choice_set : list(Nz), discrete choices available in each state X + a_grid : array(Na), exogenous asset grid + y_grid : array(Ns, Nz), exogenous labor income grid + z_grid : array(Nz), productivity of employed (need for GE) + b_grid : array(Nz), productivity of unemployed (need for GE) + lam_grid : array(Nx), scale of taste shocks, specific to interim state + eis : float, EIS + beta : float, discount factor + rpost : float, ex-post interest rate + vphi : float, disutility of work + chi : float, disutility of search + + Returns + ------- + V : array(Ns, Nz, Na), status-specific value function today + Va : array(Ns, Nz, Na), partial of status-specific value function today + P : array(Nx, Ns, Nz, Na), probability of choosing status s in state x + c : array(Ns, Nz, Na), status-specific consumption policy today + a : array(Ns, Nz, Na), status-specific asset policy today + ze : array(Ns, Nz, Na), effective labor (average productivity if employed) + ui : array(Ns, Nz, Na), UI benefit claims (average productivity if unemployed) + """ + # shapes + Ns, Nz, Na = V_p.shape + Nx = Pi_s_p.shape[1] + + # PART 1: update value and policy functions + # a. discrete choice I expect to make tomorrow + V_p_X = np.empty((Nx, Nz, Na)) + Va_p_X = np.empty((Nx, Nz, Na)) + for ix in range(Nx): + V_p_ix = np.take(V_p, indices=choice_set[ix], axis=0) + Va_p_ix = np.take(Va_p, indices=choice_set[ix], axis=0) + P_p_ix = choice_prob(V_p_ix, lam_grid[ix]) + V_p_X[ix, ...] = logsum(V_p_ix, lam_grid[ix]) + Va_p_X[ix, ...] = np.sum(P_p_ix * Va_p_ix, axis=0) + + # b. compute expectation wrt labor market shock + V_p1 = np.einsum('ij,jkl->ikl', Pi_s_p, V_p_X) + Va_p1 = np.einsum('ij,jkl->ikl', Pi_s_p, Va_p_X) + + # b. compute expectation wrt productivity + V_p2 = np.einsum('ij,kjl->kil', Pi_z_p, V_p1) + Va_p2 = np.einsum('ij,kjl->kil', Pi_z_p, Va_p1) + + # d. consumption today on tomorrow's grid and endogenous asset grid today + W = beta * V_p2 + uc_nextgrid = beta * Va_p2 + c_nextgrid = uc_nextgrid ** (-eis) + a_nextgrid = (c_nextgrid + a_grid[np.newaxis, np.newaxis, :] - y_grid[:, :, np.newaxis]) / (1 + rpost) + + # e. upper envelope + # V, c = upper_envelope(W, a_nextgrid, c_nextgrid, a_grid, y_grid, rpost, eis, vphi, chi) + imin, imax = nonconcave(uc_nextgrid) # bounds of non-concave region + V, c = upper_envelope_fast(imin, imax, W, a_nextgrid, c_nextgrid, a_grid, y_grid, rpost, eis, vphi, chi) + + # f. update Va + uc = c ** (-1 / eis) + Va = (1 + rpost) * uc + + # PART 2: things we need for GE + + # 2/a. asset policy + a = (1 + rpost) * a_grid[np.newaxis, np.newaxis, :] + y_grid[:, :, np.newaxis] - c + + # 2/b. choice probabilities (don't need jacobian) + P = np.zeros((Nx, Ns, Nz, Na)) + for ix in range(Nx): + V_ix = np.take(V, indices=choice_set[ix], axis=0) + P[ix, choice_set[ix], ...] = choice_prob(V_ix, lam_grid[ix]) + + # 2/c. average productivity of employed + ze = np.zeros_like(a) + ze[0, ...] = z_grid[:, np.newaxis] + + # 2/d. UI claims + ui = np.zeros_like(a) + ui[1, ...] = b_grid[:, np.newaxis] + + return V, Va, a, c, P, ze, ui + + +def labor_income(z_grid, b_grid, atw, transfer, expiry, fU, fN, s): + # 1. income + yE = atw * z_grid + transfer + yUb = atw * b_grid + transfer + yN = np.zeros_like(yE) + transfer + y_grid = np.vstack((yE, yUb, yN, yN)) + + # 2. transition matrix for labor market status + Pi_s = np.array([[1 - s, s, 0], [fU, (1 - fU) * (1 - expiry), (1 - fU) * expiry], [fU, 0, 1 - fU], [fN, 0, 1 - fN]]) + + return Pi_s, y_grid + + +household.add_hetinput(labor_income, verbose=False) + + +"""Supporting functions for HA block""" + + +@guvectorize(['void(float64[:], uint32[:], uint32[:])'], '(nA) -> (),()', nopython=True) +def nonconcave(uc_nextgrid, imin, imax): + """Obtain bounds for non-concave region.""" + nA = uc_nextgrid.shape[-1] + vmin = np.inf + vmax = -np.inf + # step 1: find vmin & vmax + for ia in range(nA - 1): + if uc_nextgrid[ia + 1] > uc_nextgrid[ia]: + vmin_temp = uc_nextgrid[ia] + vmax_temp = uc_nextgrid[ia + 1] + if vmin_temp < vmin: + vmin = vmin_temp + if vmax_temp > vmax: + vmax = vmax_temp + + # 2/a Find imin (upper bound) + if vmin == np.inf: + imin_ = 0 + else: + ia = 0 + while ia < nA: + if uc_nextgrid[ia] < vmin: + break + ia += 1 + imin_ = ia + + # 2/b Find imax (lower bound) + if vmax == -np.inf: + imax_ = nA + else: + ia = nA + while ia > 0: + if uc_nextgrid[ia] > vmax: + break + ia -= 1 + imax_ = ia + + imin[:] = imin_ + imax[:] = imax_ + + +@njit +def upper_envelope_fast(imin, imax, W, a_nextgrid, c_nextgrid, a_grid, y_grid, rpost, *args): + """ + Interpolate consumption and value function to exogenous grid. Brute force but safe. + Parameters + ---------- + W : array(Ns, Nz, Na), status-specific end-of-period value function (on tomorrow's grid) + a_nextgrid : array(Ns, Nz, Na), endogenous asset grid (today's grid) + c_nextgrid : array(Ns, Nz, Na), consumption on endogenous grid (today's grid) + a_grid : array(Na), exogenous asset grid (tomorrow's grid) + y_grid : array(Ns, Nz), labor income + rpost : float, ex-post interest rate + args : (eis, vphi, chi) arguments for utility function + Returns + ------- + V : array(Ns, Nz, Na), status-specific value function on exogenous grid + c : array(Ns, Nz, Na), consumption on exogenous grid + """ + + # 0. initialize + Ns, Nz, Na = W.shape + c = np.zeros_like(W) + V = -np.inf * np.ones_like(W) + + # outer loop could run in parallel + for iw in range(Ns): + for iz in range(Nz): + ycur = y_grid[iw, iz] + imaxcur = imax[iw, iz] + imincur = imin[iw, iz] + + # 1. unconstrained case: loop through a_grid, find bracketing endogenous gridpoints and interpolate. + # in concave region: exploit monotonicity and don't look for extra solutions + for ia in range(Na): + acur = a_grid[ia] + + # Region 1: below non-concave: exploit monotonicity + if (ia <= imaxcur) | (ia >= imincur): + iap = 0 + ap_low = a_nextgrid[iw, iz, iap] + ap_high = a_nextgrid[iw, iz, iap + 1] + while iap < Na - 2: # can this go up all the way? + if ap_high >= acur: + break + iap += 1 + ap_low = ap_high + ap_high = a_nextgrid[iw, iz, iap + 1] + # found bracket, interpolate value function and consumption + w_low, w_high = W[iw, iz, iap], W[iw, iz, iap + 1] + c_low, c_high = c_nextgrid[iw, iz, iap], c_nextgrid[iw, iz, iap + 1] + w_slope = (w_high - w_low) / (ap_high - ap_low) + c_slope = (c_high - c_low) / (ap_high - ap_low) + c_guess = c_low + c_slope * (acur - ap_low) + w_guess = w_low + w_slope * (acur - ap_low) + V[iw, iz, ia] = util(c_guess, iw, *args) + w_guess + c[iw, iz, ia] = c_guess + + # Region 2: non-concave region + else: + # try out all segments of endogenous grid + for iap in range(Na - 1): + # does this endogenous segment bracket ia? + ap_low, ap_high = a_nextgrid[iw, iz, iap], a_nextgrid[iw, iz, iap + 1] + interp = (ap_low <= acur <= ap_high) or (ap_low >= acur >= ap_high) + + # does it need to be extrapolated above the endogenous grid? + # if needed to be extrapolated below, we would be in constrained case + extrap_above = (iap == Na - 2) and (acur > a_nextgrid[iw, iz, Na - 1]) + + if interp or extrap_above: + # interpolation slopes + w_low, w_high = W[iw, iz, iap], W[iw, iz, iap + 1] + c_low, c_high = c_nextgrid[iw, iz, iap], c_nextgrid[iw, iz, iap + 1] + w_slope = (w_high - w_low) / (ap_high - ap_low) + c_slope = (c_high - c_low) / (ap_high - ap_low) + + # implied guess + c_guess = c_low + c_slope * (acur - ap_low) + w_guess = w_low + w_slope * (acur - ap_low) + + # value + v_guess = util(c_guess, iw, *args) + w_guess + + # select best value for this segment + if v_guess > V[iw, iz, ia]: + V[iw, iz, ia] = v_guess + c[iw, iz, ia] = c_guess + + # 2. constrained case: remember that we have the inverse asset policy a(a') + ia = 0 + while ia < Na and a_grid[ia] <= a_nextgrid[iw, iz, 0]: + c[iw, iz, ia] = (1 + rpost) * a_grid[ia] + ycur + V[iw, iz, ia] = util(c[iw, iz, ia], iw, *args) + W[iw, iz, 0] + ia += 1 + + return V, c + + +'''Simple blocks''' + + +@simple +def income_state_vars(mean_z, rho_z, sd_z, nZ, uirate, uicap): + # productivity + z_grid, pi_z, Pi_z = markov_rouwenhorst(rho=rho_z, sigma=sd_z, N=nZ) + z_grid *= mean_z + + # unemployment benefits + b_grid = uirate * z_grid + b_grid[b_grid > uicap] = uicap + return z_grid, b_grid, pi_z, Pi_z + + +@simple +def employment_state_vars(lamM, lamB, lamL): + choice_set = [[0, 3], [1, 3], [2, 3]] + lam_grid = np.array([lamM, lamB, lamL]) + return choice_set, lam_grid + + +@simple +def asset_state_vars(amin, amax, nA): + a_grid = agrid(amin=amin, amax=amax, n=nA) + return a_grid + + +'''Rest of the model''' + + +@simple +def firm(Z, K, L, mc, alpha, delta, psi): + Y = Z * K(-1) ** alpha * L ** (1 - alpha) + w = (1 - alpha) * mc * Y / L + u = 1.0 + Q = 1 + psi * (K / K(-1) - 1) + I = K - (1 - delta) * K(-1) + psi / 2 * (K / K(-1) - 1) ** 2 * K(-1) + transfer = Y - w * L - I + return Y, w, u, Q, I, transfer + + +@simple +def monetary(pi, rstar, phi_pi): + # rpost = (1 + rstar(-1) + phi_pi * pi(-1)) / (1 + pi) - 1 + rpost = rstar + return rpost + + +@simple +def nkpc(pi, mc, eps, Y, rpost, kappa): + nkpc_res = kappa * (mc - (eps - 1) / eps) + Y(+1) / Y * np.log(1 + pi(+1)) / (1 + rpost(+1)) - np.log(1 + pi) + return nkpc_res + + +@simple +def valuation(rpost, mc, Y, K, Q, delta, psi, alpha): + val = alpha * mc(+1) * Y(+1) / K - (K(+1) / K - (1 - delta) + psi / 2 * (K(+1) / K - 1) ** 2) + \ + K(+1) / K * Q(+1) - (1 + rpost(+1)) * Q + return val + + +@solved(unknowns={'B': [0.0, 10.0]}, targets=['budget'], solver='brentq') +def fiscal(B, tax, w, rpost, G, Ze, Ui): + budget = (1 + rpost) * B + G + (1 - tax) * w * Ui - tax * w * Ze - B + # tax_rule = tax - tax.ss - phi * (B(-1) - B.ss) / Y.ss + tax_rule = B - B.ss + return budget, tax_rule + + +@simple +def fiscal2(w, tax): + atw = (1 - tax) * w + return atw + + +@solved(unknowns={'fU': 0.25, 'fN': 0.1, 's': 0.025}, targets=['fU_res', 'fN_res', 's_res'], solver='broyden_custom') +def flows(Y, fU, fN, s, fU_eps, fN_eps, s_eps): + fU_res = fU.ss * (Y / Y.ss) ** fU_eps - fU + fN_res = fN.ss * (Y / Y.ss) ** fN_eps - fN + s_res = s.ss * (Y / Y.ss) ** s_eps - s + return fU_res, fN_res, s_res + + +@simple +def mkt_clearing(A, B, Y, C, I, G, L, Ze): + asset_mkt = A - B + goods_mkt = Y - C - I - G + labor_mkt = L - Ze + return asset_mkt, goods_mkt, labor_mkt + + +@simple +def partial_ss(Y, Ze, rpost, alpha, delta, eps): + # uses u=1 + L = Ze + mc = (eps - 1) / eps + K = mc * Y * alpha / (rpost + delta) + # transfer = rpost * K + Y * (1 - mc) + + # depend on L + Z = Y / (K ** alpha * L ** (1 - alpha)) + w = (1 - alpha) * mc * Y / L + # atw = (1 - tax) * w + I = delta * K + Q = 1.0 + return mc, K, Z, w, I, Q + + +'''Try this''' + + +hh = create_model([income_state_vars, employment_state_vars, asset_state_vars, household], name='SingleMen') + +# calibration = {'beta': 0.99, 'eis': 1.0, 'vphi': 0.65, 'chi': 0.6, +# 'rpost': 0.002, 'uicap': 0.66, 'uirate': 0.5, 'atw': 0.9, 'transfer': 0.15, +# 'expiry': 1/6, 'fU': 0.25, 'fN': 0.1, 's': 0.025, +# 'amin': 0.0, 'amax': 500, 'nA': 100, +# 'lamM': 0.01, 'lamB': 0.01, 'lamL': 0.04, +# 'mean_z': 1.0, 'rho_z': 0.98, 'sd_z': 0.943*0.82, 'nZ': 7} +# +# hh_ss = hh.solve_steady_state(calibration, solver='hybr', unknowns={'mean_z': 1.2}, targets={'Ze': 1.0}) + +# J = household.jacobian(ss, exogenous=['beta', 'fU'], T=10) + +calibration = {'eis': 1.0, 'vphi': 0.65, 'chi': 0.6, + 'uicap': 0.66, 'uirate': 0.5, 'expiry': 1/6, 'fU': 0.25, 'fN': 0.1, 's': 0.025, + 'amin': 0.0, 'amax': 500, 'nA': 100, 'lamM': 0.01, 'lamB': 0.01, 'lamL': 0.04, + 'mean_z': 1.0, 'rho_z': 0.98, 'sd_z': 0.943*0.82, 'nZ': 7, + 'kappa': 0.03, 'phi_pi': 1.25, 'rstar': 0.002, 'pi': 0, 'alpha': 0.2, 'psi': 30, 'delta': 0.0083, + 'eps': 10, 'fU_eps': 10.0, 'fN_eps': 5.0, 's_eps': -8.0, 'tax': 0.3, 'B': 3.0} + +hank = create_model([hh, firm, monetary, valuation, nkpc, fiscal, fiscal2, flows, mkt_clearing], name='HANK') + +ss = hank.solve_steady_state(calibration, solver='hybr', dissolve=[fiscal, flows], + unknowns={'Z': 1.0, 'L': 1.0, 'mc': 0.9, 'G': 0.3, + 'beta': 0.99, 'K': 6.0}, + targets={'Y': 1.0, 'labor_mkt': 0.0, 'nkpc_res': 0.0, 'budget': 0.0, + 'asset_mkt': 0.0, 'val': 0.0}) + + + From 13b7f2ba8ba4b57cfbd0577b1e54b28612e5833c Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Tue, 15 Jun 2021 11:30:35 -0500 Subject: [PATCH 07/35] Testing DisContBlock: ss with helper works. --- dcblock.py | 64 +++++++++++++++++++++++++----------------------------- 1 file changed, 29 insertions(+), 35 deletions(-) diff --git a/dcblock.py b/dcblock.py index 9778928..5cc87b1 100644 --- a/dcblock.py +++ b/dcblock.py @@ -335,14 +335,16 @@ def asset_state_vars(amin, amax, nA): @simple -def firm(Z, K, L, mc, alpha, delta, psi): +def firm(Z, K, L, mc, alpha, delta0, delta1, psi): Y = Z * K(-1) ** alpha * L ** (1 - alpha) w = (1 - alpha) * mc * Y / L - u = 1.0 + u = (alpha / delta0 / delta1 * mc * Y / K(-1)) ** (1 / delta1) + # u = 1.0 + delta = delta0 * u ** delta1 Q = 1 + psi * (K / K(-1) - 1) I = K - (1 - delta) * K(-1) + psi / 2 * (K / K(-1) - 1) ** 2 * K(-1) transfer = Y - w * L - I - return Y, w, u, Q, I, transfer + return Y, w, u, delta, Q, I, transfer @simple @@ -366,7 +368,7 @@ def valuation(rpost, mc, Y, K, Q, delta, psi, alpha): @solved(unknowns={'B': [0.0, 10.0]}, targets=['budget'], solver='brentq') -def fiscal(B, tax, w, rpost, G, Ze, Ui): +def fiscal1(B, tax, w, rpost, G, Ze, Ui): budget = (1 + rpost) * B + G + (1 - tax) * w * Ui - tax * w * Ze - B # tax_rule = tax - tax.ss - phi * (B(-1) - B.ss) / Y.ss tax_rule = B - B.ss @@ -396,52 +398,44 @@ def mkt_clearing(A, B, Y, C, I, G, L, Ze): @simple -def partial_ss(Y, Ze, rpost, alpha, delta, eps): +def helper1(A, tax, w, Ze, rpost, Ui): + # after hh + B = A + G = tax * w * Ze - rpost * B - (1 - tax) * w * Ui + return B, G + + +@simple +def helper2(eps, rpost, Y, L, alpha, delta0): # uses u=1 - L = Ze mc = (eps - 1) / eps - K = mc * Y * alpha / (rpost + delta) - # transfer = rpost * K + Y * (1 - mc) - - # depend on L + K = mc * Y * alpha / (rpost + delta0) + delta1 = alpha / delta0 * mc * Y / K Z = Y / (K ** alpha * L ** (1 - alpha)) w = (1 - alpha) * mc * Y / L - # atw = (1 - tax) * w - I = delta * K - Q = 1.0 - return mc, K, Z, w, I, Q + return mc, K, delta1, Z, w '''Try this''' -hh = create_model([income_state_vars, employment_state_vars, asset_state_vars, household], name='SingleMen') - -# calibration = {'beta': 0.99, 'eis': 1.0, 'vphi': 0.65, 'chi': 0.6, -# 'rpost': 0.002, 'uicap': 0.66, 'uirate': 0.5, 'atw': 0.9, 'transfer': 0.15, -# 'expiry': 1/6, 'fU': 0.25, 'fN': 0.1, 's': 0.025, -# 'amin': 0.0, 'amax': 500, 'nA': 100, -# 'lamM': 0.01, 'lamB': 0.01, 'lamL': 0.04, -# 'mean_z': 1.0, 'rho_z': 0.98, 'sd_z': 0.943*0.82, 'nZ': 7} -# -# hh_ss = hh.solve_steady_state(calibration, solver='hybr', unknowns={'mean_z': 1.2}, targets={'Ze': 1.0}) - -# J = household.jacobian(ss, exogenous=['beta', 'fU'], T=10) - calibration = {'eis': 1.0, 'vphi': 0.65, 'chi': 0.6, 'uicap': 0.66, 'uirate': 0.5, 'expiry': 1/6, 'fU': 0.25, 'fN': 0.1, 's': 0.025, 'amin': 0.0, 'amax': 500, 'nA': 100, 'lamM': 0.01, 'lamB': 0.01, 'lamL': 0.04, - 'mean_z': 1.0, 'rho_z': 0.98, 'sd_z': 0.943*0.82, 'nZ': 7, - 'kappa': 0.03, 'phi_pi': 1.25, 'rstar': 0.002, 'pi': 0, 'alpha': 0.2, 'psi': 30, 'delta': 0.0083, - 'eps': 10, 'fU_eps': 10.0, 'fN_eps': 5.0, 's_eps': -8.0, 'tax': 0.3, 'B': 3.0} + 'mean_z': 1.0, 'rho_z': 0.98, 'sd_z': 0.943*0.82, 'nZ': 7, 'beta': 0.985, + 'kappa': 0.03, 'phi_pi': 1.25, 'rstar': 0.002, 'pi': 0, 'alpha': 0.2, 'psi': 30, 'delta0': 0.0083, + 'eps': 10, 'fU_eps': 10.0, 'fN_eps': 5.0, 's_eps': -8.0, 'tax': 0.3} -hank = create_model([hh, firm, monetary, valuation, nkpc, fiscal, fiscal2, flows, mkt_clearing], name='HANK') +hh = create_model([income_state_vars, employment_state_vars, asset_state_vars, household], name='SingleMen') +hank = create_model([hh, firm, monetary, valuation, nkpc, fiscal1, fiscal2, flows, mkt_clearing], name='HANK') -ss = hank.solve_steady_state(calibration, solver='hybr', dissolve=[fiscal, flows], - unknowns={'Z': 1.0, 'L': 1.0, 'mc': 0.9, 'G': 0.3, - 'beta': 0.99, 'K': 6.0}, +ss = hank.solve_steady_state(calibration, solver='hybr', dissolve=[fiscal1, flows], + unknowns={'Z': 0.63, 'L': 0.86, 'mc': 0.9, 'G': 0.2, 'delta1': 1.2, + 'B': 3.0, 'K': 17.0}, targets={'Y': 1.0, 'labor_mkt': 0.0, 'nkpc_res': 0.0, 'budget': 0.0, - 'asset_mkt': 0.0, 'val': 0.0}) + 'asset_mkt': 0.0, 'val': 0.0, 'u': 1.0}, + helper_blocks=[helper1, helper2], + helper_targets=['asset_mkt', 'budget', 'val', 'nkpc_res', 'Y', 'u']) From e0ef2a3cae19a0078bf4b5a0b9e0059284511b2f Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Tue, 15 Jun 2021 13:16:40 -0500 Subject: [PATCH 08/35] Bijection class from @mrognlie and basic tests. --- .../blocks/support/bijection.py | 44 +++++++++++++++++++ tests/base/test_public_classes.py | 15 +++++++ 2 files changed, 59 insertions(+) create mode 100644 src/sequence_jacobian/blocks/support/bijection.py diff --git a/src/sequence_jacobian/blocks/support/bijection.py b/src/sequence_jacobian/blocks/support/bijection.py new file mode 100644 index 0000000..38d1b39 --- /dev/null +++ b/src/sequence_jacobian/blocks/support/bijection.py @@ -0,0 +1,44 @@ +class Bijection: + def __init__(self, map): + # identity always implicit, remove if there explicitly + self.map = {k: v for k, v in map.items() if k != v} + invmap = {} + for k, v in map.items(): + if v in invmap: + raise ValueError(f'Duplicate value {v}, for keys {invmap[v]} and {k}') + invmap[v] = k + self.invmap = invmap + + @property + def inv(self): + invmap = Bijection.__new__(Bijection) # better way to do this? + invmap.map = self.invmap + invmap.invmap = self.map + return invmap + + def __repr__(self): + return f'Bijection({repr(self.map)})' + + def __getitem__(self, k): + return self.map.get(k, k) + + def __matmul__(self, x): + if isinstance(x, Bijection): + # compose self: v -> u with x: w -> v + # assume everything missing in either is the identity + M = {} + for v, u in self.map.items(): + w = x.invmap.get(v, v) + M[w] = u + for w, v in x.map.items(): + if v not in self.map: + M[w] = v + return Bijection(M) + elif isinstance(x, dict): + return {self[k]: v for k, v in x.items()} + elif isinstance(x, list): + return [self[k] for k in x] + elif isinstance(x, tuple): + return tuple(self[k] for k in x) + else: + return NotImplementedError diff --git a/tests/base/test_public_classes.py b/tests/base/test_public_classes.py index 57a86e8..3383393 100644 --- a/tests/base/test_public_classes.py +++ b/tests/base/test_public_classes.py @@ -1,10 +1,12 @@ """Test public-facing classes""" import numpy as np +import pytest from sequence_jacobian import het from sequence_jacobian.steady_state.classes import SteadyStateDict from sequence_jacobian.blocks.support.impulse import ImpulseDict +from sequence_jacobian.blocks.support.bijection import Bijection def test_steadystatedict(): @@ -72,3 +74,16 @@ def test_impulsedict(krusell_smith_dag): dC1 = 100 * ir_lin['C'] / ss['C'] dC2 = 100 * ir_lin[['C']] / ss assert np.allclose(dC1, dC2['C']) + + +def test_bijection(): + mymap = Bijection({'a1': 'a', 'b1': 'b'}) + mymapinv = mymap.inv + + assert mymap['a1'] == 'a' and mymap['b1'] == 'b' + assert mymapinv['a'] == 'a1' and mymapinv['b'] == 'b1' + + with pytest.raises(ValueError): + badmap = Bijection({'a1': 'a', 'b1': 'a'}) + + From c0fea5c6c3b0e9fa9a21563fac61e38018c8c862 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Tue, 15 Jun 2021 13:29:56 -0500 Subject: [PATCH 09/35] Bijection classL test composition as well. --- tests/base/test_public_classes.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tests/base/test_public_classes.py b/tests/base/test_public_classes.py index 3383393..2d0d0e6 100644 --- a/tests/base/test_public_classes.py +++ b/tests/base/test_public_classes.py @@ -77,13 +77,16 @@ def test_impulsedict(krusell_smith_dag): def test_bijection(): - mymap = Bijection({'a1': 'a', 'b1': 'b'}) + # generate and invert + mymap = Bijection({'a': 'a1', 'b': 'b1'}) mymapinv = mymap.inv + assert mymap['a'] == 'a1' and mymap['b'] == 'b1' + assert mymapinv['a1'] == 'a' and mymapinv['b1'] == 'b' - assert mymap['a1'] == 'a' and mymap['b1'] == 'b' - assert mymapinv['a'] == 'a1' and mymapinv['b'] == 'b1' - + # duplicate keys rejected with pytest.raises(ValueError): - badmap = Bijection({'a1': 'a', 'b1': 'a'}) - + Bijection({'a': 'a1', 'b': 'a1'}) + # composition + mymap2 = Bijection({'A': 'a'}) + assert (mymap @ mymap2)['A'] == 'a1' From a5029df0fce7dbe2e5bc0ecc628091c310e6f6e7 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Tue, 15 Jun 2021 14:26:01 -0500 Subject: [PATCH 10/35] Blocks have `.M` attribute that stores the bijection membrane. --- src/sequence_jacobian/blocks/discont_block.py | 1 + src/sequence_jacobian/blocks/het_block.py | 2 ++ src/sequence_jacobian/blocks/simple_block.py | 2 ++ src/sequence_jacobian/primitives.py | 7 +++++++ 4 files changed, 12 insertions(+) diff --git a/src/sequence_jacobian/blocks/discont_block.py b/src/sequence_jacobian/blocks/discont_block.py index f0e3873..b2888a3 100644 --- a/src/sequence_jacobian/blocks/discont_block.py +++ b/src/sequence_jacobian/blocks/discont_block.py @@ -56,6 +56,7 @@ def __init__(self, back_step_fun, exogenous, policy, disc_policy, backward, back Currently, we only support up to two policy variables. """ self.name = back_step_fun.__name__ + self.M = Bijection({}) # self.back_step_fun is one iteration of the backward step function pertaining to a given HetBlock. # i.e. the function pertaining to equation (14) in the paper: v_t = curlyV(v_{t+1}, X_t) diff --git a/src/sequence_jacobian/blocks/het_block.py b/src/sequence_jacobian/blocks/het_block.py index 259d4d8..11b1c4d 100644 --- a/src/sequence_jacobian/blocks/het_block.py +++ b/src/sequence_jacobian/blocks/het_block.py @@ -7,6 +7,7 @@ from .. import utilities as utils from ..steady_state.classes import SteadyStateDict from ..jacobian.classes import JacobianDict +from .support.bijection import Bijection from ..devtools.deprecate import rename_output_list_to_outputs from ..utilities.misc import verify_saved_jacobian @@ -55,6 +56,7 @@ def __init__(self, back_step_fun, exogenous, policy, backward, backward_init=Non Currently, we only support up to two policy variables. """ self.name = back_step_fun.__name__ + self.M = Bijection({}) # self.back_step_fun is one iteration of the backward step function pertaining to a given HetBlock. # i.e. the function pertaining to equation (14) in the paper: v_t = curlyV(v_{t+1}, X_t) diff --git a/src/sequence_jacobian/blocks/simple_block.py b/src/sequence_jacobian/blocks/simple_block.py index df43848..1f8cf16 100644 --- a/src/sequence_jacobian/blocks/simple_block.py +++ b/src/sequence_jacobian/blocks/simple_block.py @@ -6,6 +6,7 @@ from .support.simple_displacement import ignore, Displace, AccumulatedDerivative from .support.impulse import ImpulseDict +from .support.bijection import Bijection from ..primitives import Block from ..steady_state.classes import SteadyStateDict from ..jacobian.classes import JacobianDict, SimpleSparse, ZeroMatrix @@ -40,6 +41,7 @@ def __init__(self, f): self.output_list = misc.output_list(f) self.inputs = set(self.input_list) self.outputs = set(self.output_list) + self.M = Bijection({}) def __repr__(self): return f"" diff --git a/src/sequence_jacobian/primitives.py b/src/sequence_jacobian/primitives.py index 15a2def..6ea2095 100644 --- a/src/sequence_jacobian/primitives.py +++ b/src/sequence_jacobian/primitives.py @@ -4,6 +4,7 @@ from abc import ABCMeta as NativeABCMeta from numbers import Real from typing import Any, Dict, Union, Tuple, Optional, List +from copy import deepcopy from .steady_state.drivers import steady_state from .steady_state.support import provide_solver_default @@ -12,6 +13,7 @@ from .steady_state.classes import SteadyStateDict from .jacobian.classes import JacobianDict from .blocks.support.impulse import ImpulseDict +from .blocks.support.bijection import Bijection # Basic types Array = Any @@ -135,3 +137,8 @@ def solve_jacobian(self, ss: Dict[str, Union[Real, Array]], variables to be solved for and the target conditions that must hold in general equilibrium""" blocks = self.blocks if hasattr(self, "blocks") else [self] return get_G(blocks, exogenous, unknowns, targets, T=T, ss=ss, Js=Js, **kwargs) + + def remap(self, map): + remapped = deepcopy(self) + remapped.M = self.M @ Bijection(map) + return remapped From c9d31e77d7264b3cb251fd2ae3bf07794f33a6d2 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Wed, 16 Jun 2021 09:22:58 -0500 Subject: [PATCH 11/35] Fixed rogue block.name in steady state solver. --- src/sequence_jacobian/steady_state/drivers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sequence_jacobian/steady_state/drivers.py b/src/sequence_jacobian/steady_state/drivers.py index e005c15..4f7f154 100644 --- a/src/sequence_jacobian/steady_state/drivers.py +++ b/src/sequence_jacobian/steady_state/drivers.py @@ -163,7 +163,7 @@ def eval_block_ss(block, calibration, toplevel_unknowns=None, dissolve=None, **k if block in dissolve and "solver" in valid_input_kwargs: input_kwarg_dict["solver"] = "solved" input_kwarg_dict["unknowns"] = {k: v for k, v in calibration.items() if k in block.unknowns} - elif block.name not in dissolve and block_unknowns_in_toplevel_unknowns: + elif block not in dissolve and block_unknowns_in_toplevel_unknowns: raise RuntimeError(f"The block '{block.name}' is not in the kwarg `dissolve` but its unknowns," f" {set(block.unknowns.keys())} are a subset of the top-level unknowns," f" {set(toplevel_unknowns)}.\n" From 681c2644b3f520e3cde22d565b0d46e4df77481f Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Wed, 16 Jun 2021 09:59:51 -0500 Subject: [PATCH 12/35] Removing deprecated .ss, .jac, .td methods in progress. --- src/sequence_jacobian/blocks/discont_block.py | 12 ++++---- src/sequence_jacobian/blocks/het_block.py | 28 ++++------------- src/sequence_jacobian/blocks/simple_block.py | 18 ----------- src/sequence_jacobian/blocks/solved_block.py | 18 ----------- src/sequence_jacobian/models/krusell_smith.py | 22 +++++++++----- tests/base/test_jacobian.py | 30 +++++++++---------- tests/base/test_simple_block.py | 10 +++---- 7 files changed, 46 insertions(+), 92 deletions(-) diff --git a/src/sequence_jacobian/blocks/discont_block.py b/src/sequence_jacobian/blocks/discont_block.py index b2888a3..a9e4402 100644 --- a/src/sequence_jacobian/blocks/discont_block.py +++ b/src/sequence_jacobian/blocks/discont_block.py @@ -157,14 +157,14 @@ def __repr__(self): return f"" '''Part 2: high-level routines, with first three called analogously to SimpleBlock counterparts - - ss : do backward and forward iteration until convergence to get complete steady state - - td : do backward and forward iteration up to T to compute dynamics given some shocks - - jac : compute jacobians of outputs with respect to shocked inputs, using fake news algorithm - - ajac : compute asymptotic columns of jacobians output by jac, also using fake news algorithm + - steady_state : do backward and forward iteration until convergence to get complete steady state + - impulse_nonlinear : do backward and forward iteration up to T to compute dynamics given some shocks + - impulse_linear : apply jacobians to compute linearized dynamics given some shocks + - jacobian : compute jacobians of outputs with respect to shocked inputs, using fake news algorithm - add_hetinput : add a hetinput to the HetBlock that first processes inputs through function hetinput - add_hetoutput: add a hetoutput to the HetBlock that is computed after the entire ss computation, or after - each backward iteration step in td + each backward iteration step in td, jacobian is not computed for these! ''' @@ -771,7 +771,7 @@ def J_from_F(F): J[1:, t] += J[:-1, t - 1] return J - '''Part 5: helpers for .jac and .ajac: preliminary processing''' + '''Part 5: helpers for .jac: preliminary processing''' def jac_prelim(self, ss): """Helper that does preliminary processing of steady state for fake news algorithm. diff --git a/src/sequence_jacobian/blocks/het_block.py b/src/sequence_jacobian/blocks/het_block.py index 11b1c4d..bb76e11 100644 --- a/src/sequence_jacobian/blocks/het_block.py +++ b/src/sequence_jacobian/blocks/het_block.py @@ -155,34 +155,16 @@ def __repr__(self): return f"" '''Part 2: high-level routines, with first three called analogously to SimpleBlock counterparts - - ss : do backward and forward iteration until convergence to get complete steady state - - td : do backward and forward iteration up to T to compute dynamics given some shocks - - jac : compute jacobians of outputs with respect to shocked inputs, using fake news algorithm - - ajac : compute asymptotic columns of jacobians output by jac, also using fake news algorithm + - steady_state : do backward and forward iteration until convergence to get complete steady state + - impulse_nonlinear : do backward and forward iteration up to T to compute dynamics given some shocks + - impulse_linear : apply jacobians to compute linearized dynamics given some shocks + - jacobian : compute jacobians of outputs with respect to shocked inputs, using fake news algorithm - add_hetinput : add a hetinput to the HetBlock that first processes inputs through function hetinput - add_hetoutput: add a hetoutput to the HetBlock that is computed after the entire ss computation, or after - each backward iteration step in td + each backward iteration step in td, jacobian is not computed for these! ''' - # TODO: Deprecated methods, to be removed! - def ss(self, **kwargs): - warnings.warn("This method has been deprecated. Please invoke by calling .steady_state", DeprecationWarning) - return self.steady_state(kwargs) - - def td(self, ss, **kwargs): - warnings.warn("This method has been deprecated. Please invoke by calling .impulse_nonlinear", - DeprecationWarning) - return self.impulse_nonlinear(ss, **kwargs) - - def jac(self, ss, shock_list=None, T=None, **kwargs): - if shock_list is None: - shock_list = list(self.inputs) - warnings.warn("This method has been deprecated. Please invoke by calling .jacobian.\n" - "Also, note that the kwarg `shock_list` in .jacobian has been renamed to `shocked_vars`", - DeprecationWarning) - return self.jacobian(ss, shock_list, T, **kwargs) - def steady_state(self, calibration, backward_tol=1E-8, backward_maxit=5000, forward_tol=1E-10, forward_maxit=100_000, hetoutput=False): """Evaluate steady state HetBlock using keyword args for all inputs. Analog to SimpleBlock.ss. diff --git a/src/sequence_jacobian/blocks/simple_block.py b/src/sequence_jacobian/blocks/simple_block.py index 1f8cf16..25c28cc 100644 --- a/src/sequence_jacobian/blocks/simple_block.py +++ b/src/sequence_jacobian/blocks/simple_block.py @@ -46,24 +46,6 @@ def __init__(self, f): def __repr__(self): return f"" - # TODO: Deprecated methods, to be removed! - def ss(self, **kwargs): - warnings.warn("This method has been deprecated. Please invoke by calling .steady_state", DeprecationWarning) - return self.steady_state(kwargs) - - def td(self, ss, **kwargs): - warnings.warn("This method has been deprecated. Please invoke by calling .impulse_nonlinear", - DeprecationWarning) - return self.impulse_nonlinear(ss, exogenous=kwargs) - - def jac(self, ss, T=None, shock_list=None): - if shock_list is None: - shock_list = [] - warnings.warn("This method has been deprecated. Please invoke by calling .jacobian.\n" - "Also, note that the kwarg `shock_list` in .jacobian has been renamed to `shocked_vars`", - DeprecationWarning) - return self.jacobian(ss, exogenous=shock_list, T=T) - def steady_state(self, calibration): input_args = {k: ignore(v) for k, v in calibration.items() if k in misc.input_list(self.f)} output_vars = [misc.numeric_primitive(o) for o in self.f(**input_args)] if len(self.output_list) > 1 else [ diff --git a/src/sequence_jacobian/blocks/solved_block.py b/src/sequence_jacobian/blocks/solved_block.py index 3a2caa7..7f37cea 100644 --- a/src/sequence_jacobian/blocks/solved_block.py +++ b/src/sequence_jacobian/blocks/solved_block.py @@ -66,24 +66,6 @@ def __init__(self, blocks, name, unknowns, targets, solver=None, solver_kwargs={ def __repr__(self): return f"" - # TODO: Deprecated methods, to be removed! - def ss(self, **kwargs): - warnings.warn("This method has been deprecated. Please invoke by calling .steady_state", DeprecationWarning) - return self.steady_state(kwargs) - - def td(self, ss, **kwargs): - warnings.warn("This method has been deprecated. Please invoke by calling .impulse_nonlinear", - DeprecationWarning) - return self.impulse_nonlinear(ss, **kwargs) - - def jac(self, ss, T=None, shock_list=None, **kwargs): - if shock_list is None: - shock_list = [] - warnings.warn("This method has been deprecated. Please invoke by calling .jacobian.\n" - "Also, note that the kwarg `shock_list` in .jacobian has been renamed to `shocked_vars`", - DeprecationWarning) - return self.jacobian(ss, shock_list, T, **kwargs) - def steady_state(self, calibration, unknowns=None, helper_blocks=None, solver=None, consistency_check=False, ttol=1e-9, ctol=1e-9, verbose=False): # If this is the first time invoking steady_state/solve_steady_state, cache the sorted indices diff --git a/src/sequence_jacobian/models/krusell_smith.py b/src/sequence_jacobian/models/krusell_smith.py index 82e4559..7058661 100644 --- a/src/sequence_jacobian/models/krusell_smith.py +++ b/src/sequence_jacobian/models/krusell_smith.py @@ -4,6 +4,7 @@ from .. import utilities as utils from ..blocks.simple_block import simple from ..blocks.het_block import het +from ..steady_state.classes import SteadyStateDict '''Part 1: HA block''' @@ -105,21 +106,28 @@ def ks_ss(lb=0.98, ub=0.999, r=0.01, eis=1, delta=0.025, alpha=0.11, rho=0.966, w = (1 - alpha) * Z * (alpha * Z / rk) ** (alpha / (1 - alpha)) # figure out initializer - coh = (1 + r) * a_grid[np.newaxis, :] + w * e_grid[:, np.newaxis] - Va = (1 + r) * (0.1 * coh) ** (-1 / eis) + # coh = (1 + r) * a_grid[np.newaxis, :] + w * e_grid[:, np.newaxis] + # Va = (1 + r) * (0.1 * coh) ** (-1 / eis) + calibration = {'Pi': Pi, 'a_grid': a_grid, 'e_grid': e_grid, 'r': r, 'w': w, 'eis': eis} + calibration = SteadyStateDict(calibration) # solve for beta consistent with this beta_min = lb / (1 + r) beta_max = ub / (1 + r) - beta, sol = opt.brentq(lambda bet: household.ss(Pi=Pi, a_grid=a_grid, e_grid=e_grid, r=r, w=w, beta=bet, eis=eis, - Va=Va)['A'] - K, beta_min, beta_max, full_output=True) + def res(beta_): + calibration['beta'] = beta_ + return household.steady_state(calibration)['A'] - K + + beta, sol = opt.brentq(res, beta_min, beta_max, full_output=True) + if not sol.converged: raise ValueError('Steady-state solver did not converge.') # extra evaluation to report variables - ss = household.ss(Pi=Pi, a_grid=a_grid, e_grid=e_grid, r=r, w=w, beta=beta, eis=eis, Va=Va) - ss.update({'Pi': Pi, 'Z': Z, 'K': K, 'L': 1, 'Y': Y, 'alpha': alpha, 'delta': delta, + calibration['beta'] = beta + ss = household.steady_state(calibration) + ss.update({'Z': Z, 'K': K, 'L': 1.0, 'Y': Y, 'alpha': alpha, 'delta': delta, 'goods_mkt': Y - ss['C'] - delta * K, 'nA': nA, 'amax': amax, 'sigma': sigma, - 'rho': rho, 'nS': nS, 'asset_mkt': ss["A"] - K}) + 'rho': rho, 'nS': nS, 'asset_mkt': ss['A'] - K}) return ss diff --git a/tests/base/test_jacobian.py b/tests/base/test_jacobian.py index e097bb9..431b36d 100644 --- a/tests/base/test_jacobian.py +++ b/tests/base/test_jacobian.py @@ -15,8 +15,8 @@ def test_ks_jac(krusell_smith_dag): G2 = ks_model.solve_jacobian(ss, exogenous, unknowns, targets, T=T) # Manually calculate the general equilibrium Jacobian - J_firm = firm.jac(ss, shock_list=['K', 'Z']) - J_ha = household.jac(ss, T=T, shock_list=['r', 'w']) + J_firm = firm.jacobian(ss, exogenous=['K', 'Z']) + J_ha = household.jacobian(ss, T=T, exogenous=['r', 'w']) J_curlyK_K = J_ha['A']['r'] @ J_firm['r']['K'] + J_ha['A']['w'] @ J_firm['w']['K'] J_curlyK_Z = J_ha['A']['r'] @ J_firm['r']['Z'] + J_ha['A']['w'] @ J_firm['w']['Z'] J_curlyK = {'curlyK': {'K': J_curlyK_K, 'Z': J_curlyK_Z}} @@ -62,8 +62,8 @@ def test_fake_news_v_actual(one_asset_hank_dag): household = hank_model._blocks_unsorted[0] T = 40 - shock_list = ['w', 'r', 'Div', 'Tax'] - Js = household.jac(ss, shock_list, T) + exogenous = ['w', 'r', 'Div', 'Tax'] + Js = household.jacobian(ss, exogenous, T) output_list = household.non_back_iter_outputs # Preliminary processing of the steady state @@ -72,7 +72,7 @@ def test_fake_news_v_actual(one_asset_hank_dag): # Step 1 of fake news algorithm: backward iteration h = 1E-4 curlyYs, curlyDs = {}, {} - for i in shock_list: + for i in exogenous: curlyYs[i], curlyDs[i] = household.backward_iteration_fakenews(i, output_list, ssin_dict, ssout_list, ss.internal["household"]['D'], Pi.T.copy(), sspol_i, sspol_pi, sspol_space, @@ -94,7 +94,7 @@ def test_fake_news_v_actual(one_asset_hank_dag): # Step 3 of fake news algorithm: combine everything to make the fake news matrix for each output-input pair Fs = {o.capitalize(): {} for o in output_list} for o in output_list: - for i in shock_list: + for i in exogenous: F = np.empty((T,T)) F[0, ...] = curlyYs[i][o] F[1:, ...] = curlyPs[o].reshape(T-1, -1) @ curlyDs[i].reshape(T, -1).T @@ -109,7 +109,7 @@ def test_fake_news_v_actual(one_asset_hank_dag): Js_original = Js Js = {o.capitalize(): {} for o in output_list} for o in output_list: - for i in shock_list: + for i in exogenous: # implement recursion (30): start with J=F and accumulate terms along diagonal J = Fs[o.capitalize()][i].copy() for t in range(1, J.shape[1]): @@ -117,7 +117,7 @@ def test_fake_news_v_actual(one_asset_hank_dag): Js[o.capitalize()][i] = J for o in output_list: - for i in shock_list: + for i in exogenous: assert np.array_equal(Js[o.capitalize()][i], Js_original[o.capitalize()][i]) @@ -126,23 +126,23 @@ def test_fake_news_v_direct_method(one_asset_hank_dag): household = hank_model._blocks_unsorted[0] T = 40 - shock_list = 'r' + exogenous = 'r' output_list = household.non_back_iter_outputs h = 1E-4 - Js = household.jac(ss, shock_list, T) - Js_direct = {o.capitalize(): {i: np.empty((T, T)) for i in shock_list} for o in output_list} + Js = household.jacobian(ss, exogenous, T) + Js_direct = {o.capitalize(): {i: np.empty((T, T)) for i in exogenous} for o in output_list} # run td once without any shocks to get paths to subtract against # (better than subtracting by ss since ss not exact) # monotonic=True lets us know there is monotonicity of policy rule, makes TD run faster - # .td requires at least one input 'shock', so we put in steady-state w - td_noshock = household.td(ss, exogenous={"w": np.zeros(T)}, monotonic=True) + # .impulse_nonlinear requires at least one input 'shock', so we put in steady-state w + td_noshock = household.impulse_nonlinear(ss, exogenous={'w': np.zeros(T)}, monotonic=True) - for i in shock_list: + for i in exogenous: # simulate with respect to a shock at each date up to T for t in range(T): - td_out = household.td(ss, exogenous={i: h * (np.arange(T) == t)}) + td_out = household.impulse_nonlinear(ss, exogenous={i: h * (np.arange(T) == t)}) # store results as column t of J[o][i] for each outcome o for o in output_list: diff --git a/tests/base/test_simple_block.py b/tests/base/test_simple_block.py index 6610dba..9c6fda4 100644 --- a/tests/base/test_simple_block.py +++ b/tests/base/test_simple_block.py @@ -38,15 +38,15 @@ def test_block_consistency(block, ss): """Make sure ss, td, and jac methods are all consistent with each other. Requires that all inputs of simple block allow calculating Jacobians""" # get ss output - ss_results = block.ss(**ss) + ss_results = block.steady_state(ss) # now if we put in constant inputs, td should give us the same! - td_results = block.td(ss_results, **{k: np.zeros(20) for k in ss.keys()}) + td_results = block.impulse_nonlinear(ss_results, exogenous={k: np.zeros(20) for k in ss.keys()}) for k, v in td_results.impulse.items(): assert np.all(v == 0) # now get the Jacobian - J = block.jac(ss, shock_list=block.input_list) + J = block.jacobian(ss, exogenous=block.input_list) # now perturb the steady state by small random vectors # and verify that the second-order numerical derivative implied by .td @@ -54,8 +54,8 @@ def test_block_consistency(block, ss): h = 1E-5 all_shocks = {i: np.random.rand(10) for i in block.input_list} - td_up = block.td(ss_results, **{i: h*shock for i, shock in all_shocks.items()}) - td_dn = block.td(ss_results, **{i: -h*shock for i, shock in all_shocks.items()}) + td_up = block.impulse_nonlinear(ss_results, exogenous={i: h*shock for i, shock in all_shocks.items()}) + td_dn = block.impulse_nonlinear(ss_results, exogenous={i: -h*shock for i, shock in all_shocks.items()}) linear_impulses = {o: (td_up.impulse[o] - td_dn.impulse[o])/(2*h) for o in td_up.impulse} linear_impulses_from_jac = {o: sum(J[o][i] @ all_shocks[i] for i in all_shocks if i in J[o]) for o in td_up.impulse} From 18f4527038611bd0bbe04be518ca16786b23f14d Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Wed, 16 Jun 2021 11:41:07 -0500 Subject: [PATCH 13/35] Deprecated .ss, .jac, .td methods removed. All tests pass. --- .../blocks/support/bijection.py | 4 ++-- src/sequence_jacobian/models/hank.py | 15 ++++++++------- src/sequence_jacobian/models/krusell_smith.py | 12 ++++-------- src/sequence_jacobian/models/two_asset.py | 17 +++++++++-------- tests/base/test_two_asset.py | 10 ++++------ 5 files changed, 27 insertions(+), 31 deletions(-) diff --git a/src/sequence_jacobian/blocks/support/bijection.py b/src/sequence_jacobian/blocks/support/bijection.py index 38d1b39..b8bf815 100644 --- a/src/sequence_jacobian/blocks/support/bijection.py +++ b/src/sequence_jacobian/blocks/support/bijection.py @@ -40,5 +40,5 @@ def __matmul__(self, x): return [self[k] for k in x] elif isinstance(x, tuple): return tuple(self[k] for k in x) - else: - return NotImplementedError + # else: + # return NotImplementedError diff --git a/src/sequence_jacobian/models/hank.py b/src/sequence_jacobian/models/hank.py index 8ee9576..a38df0e 100644 --- a/src/sequence_jacobian/models/hank.py +++ b/src/sequence_jacobian/models/hank.py @@ -190,9 +190,8 @@ def hank_ss(beta_guess=0.986, vphi_guess=0.8, r=0.005, eis=0.5, frisch=0.5, mu=1 Tax = r * B T = transfers(pi_e, Div, Tax, e_grid) - # initialize guess for policy function iteration - coh = (1 + r) * a_grid[np.newaxis, :] + w * e_grid[:, np.newaxis] + T[:, np.newaxis] - Va = (1 + r) * (0.1 * coh) ** (-1 / eis) + calibration = {'Pi': Pi, 'a_grid': a_grid, 'e_grid': e_grid, 'pi_e': pi_e, 'w': w, 'r': r, 'eis': eis, 'Div': Div, + 'Tax': Tax, 'frisch': frisch} # residual function def res(x): @@ -200,16 +199,18 @@ def res(x): # precompute constrained c and n which don't depend on Va if beta_loc > 0.999 / (1 + r) or vphi_loc < 0.001: raise ValueError('Clearly invalid inputs') - out = household.ss(Va=Va, Pi=Pi, a_grid=a_grid, e_grid=e_grid, pi_e=pi_e, w=w, r=r, beta=beta_loc, - eis=eis, Div=Div, Tax=Tax, frisch=frisch, vphi=vphi_loc) + + calibration['beta'], calibration['vphi'] = beta_loc, vphi_loc + out = household.steady_state(calibration) + return np.array([out['A'] - B, out['N_e'] - 1]) # solve for beta, vphi (beta, vphi), _ = utils.solvers.broyden_solver(res, np.array([beta_guess, vphi_guess]), verbose=False) + calibration['beta'], calibration['vphi'] = beta, vphi # extra evaluation for reporting - ss = household.ss(Va=Va, Pi=Pi, a_grid=a_grid, e_grid=e_grid, pi_e=pi_e, w=w, r=r, beta=beta, eis=eis, - Div=Div, Tax=Tax, frisch=frisch, vphi=vphi) + ss = household.steady_state(calibration) # check Walras's law goods_mkt = 1 - ss['C'] diff --git a/src/sequence_jacobian/models/krusell_smith.py b/src/sequence_jacobian/models/krusell_smith.py index 7058661..378664f 100644 --- a/src/sequence_jacobian/models/krusell_smith.py +++ b/src/sequence_jacobian/models/krusell_smith.py @@ -105,28 +105,24 @@ def ks_ss(lb=0.98, ub=0.999, r=0.01, eis=1, delta=0.025, alpha=0.11, rho=0.966, Y = Z * K ** alpha w = (1 - alpha) * Z * (alpha * Z / rk) ** (alpha / (1 - alpha)) - # figure out initializer - # coh = (1 + r) * a_grid[np.newaxis, :] + w * e_grid[:, np.newaxis] - # Va = (1 + r) * (0.1 * coh) ** (-1 / eis) calibration = {'Pi': Pi, 'a_grid': a_grid, 'e_grid': e_grid, 'r': r, 'w': w, 'eis': eis} - calibration = SteadyStateDict(calibration) # solve for beta consistent with this beta_min = lb / (1 + r) beta_max = ub / (1 + r) - def res(beta_): - calibration['beta'] = beta_ + def res(beta_loc): + calibration['beta'] = beta_loc return household.steady_state(calibration)['A'] - K beta, sol = opt.brentq(res, beta_min, beta_max, full_output=True) + calibration['beta'] = beta if not sol.converged: raise ValueError('Steady-state solver did not converge.') # extra evaluation to report variables - calibration['beta'] = beta ss = household.steady_state(calibration) - ss.update({'Z': Z, 'K': K, 'L': 1.0, 'Y': Y, 'alpha': alpha, 'delta': delta, + ss.update({'Z': Z, 'K': K, 'L': 1.0, 'Y': Y, 'alpha': alpha, 'delta': delta, 'Pi': Pi, 'goods_mkt': Y - ss['C'] - delta * K, 'nA': nA, 'amax': amax, 'sigma': sigma, 'rho': rho, 'nS': nS, 'asset_mkt': ss['A'] - K}) diff --git a/src/sequence_jacobian/models/two_asset.py b/src/sequence_jacobian/models/two_asset.py index 403bdbe..9948f1c 100644 --- a/src/sequence_jacobian/models/two_asset.py +++ b/src/sequence_jacobian/models/two_asset.py @@ -359,26 +359,27 @@ def two_asset_ss(beta_guess=0.976, chi1_guess=6.5, r=0.0125, tot_wealth=14, K=10 rb = r - omega # figure out initializer - z_grid = income(e_grid, tax, w, 1) - Va = (0.6 + 1.1 * b_grid[:, np.newaxis] + a_grid) ** (-1 / eis) * np.ones((z_grid.shape[0], 1, 1)) - Vb = (0.5 + b_grid[:, np.newaxis] + 1.2 * a_grid) ** (-1 / eis) * np.ones((z_grid.shape[0], 1, 1)) + calibration = {'Pi': Pi, 'a_grid': a_grid, 'b_grid': b_grid, 'e_grid': e_grid, 'k_grid': k_grid, + 'N': 1.0, 'tax': tax, 'w': w, 'eis': eis, 'rb': rb, 'ra': ra, 'chi0': chi0, 'chi2': chi2} # residual function def res(x): beta_loc, chi1_loc = x + if beta_loc > 0.999 / (1 + r) or chi1_loc < 0.5: raise ValueError('Clearly invalid inputs') - out = household.ss(Va=Va, Vb=Vb, Pi=Pi, a_grid=a_grid, b_grid=b_grid, N=1, tax=tax, w=w, e_grid=e_grid, - k_grid=k_grid, beta=beta_loc, eis=eis, rb=rb, ra=ra, chi0=chi0, chi1=chi1_loc, chi2=chi2) + + calibration['beta'], calibration['chi1'] = beta_loc, chi1_loc + out = household.steady_state(calibration) asset_mkt = out['A'] + out['B'] - p - Bg return np.array([asset_mkt, out['B'] - Bh]) # solve for beta, vphi, omega (beta, chi1), _ = utils.solvers.broyden_solver(res, np.array([beta_guess, chi1_guess]), verbose=verbose) + calibration['beta'], calibration['chi1'] = beta, chi1 - # extra evaluation to report variables - ss = household.ss(Va=Va, Vb=Vb, Pi=Pi, a_grid=a_grid, b_grid=b_grid, N=1, tax=tax, w=w, e_grid=e_grid, - k_grid=k_grid, beta=beta, eis=eis, rb=rb, ra=ra, chi0=chi0, chi1=chi1, chi2=chi2) + # extra evaluation for reporting + ss = household.steady_state(calibration) # other things of interest vphi = (1 - tax) * w * ss['U'] / muw diff --git a/tests/base/test_two_asset.py b/tests/base/test_two_asset.py index 1454e5c..94ce995 100644 --- a/tests/base/test_two_asset.py +++ b/tests/base/test_two_asset.py @@ -36,13 +36,11 @@ def hank_ss_singlerun(beta=0.976, r=0.0125, tot_wealth=14, K=10, delta=0.02, Bg= rb = r - omega # figure out initializer - z_grid = two_asset.income(e_grid, tax, w, 1) - Va = (0.6 + 1.1 * b_grid[:, np.newaxis] + a_grid) ** (-1 / eis) * np.ones((z_grid.shape[0], 1, 1)) - Vb = (0.5 + b_grid[:, np.newaxis] + 1.2 * a_grid) ** (-1 / eis) * np.ones((z_grid.shape[0], 1, 1)) + calibration = {'Pi': Pi, 'a_grid': a_grid, 'b_grid': b_grid, 'e_grid': e_grid, 'k_grid': k_grid, + 'beta': beta, 'N': 1.0, 'tax': tax, 'w': w, 'eis': eis, 'rb': rb, 'ra': ra, + 'chi0': chi0, 'chi1': chi1, 'chi2': chi2} - out = two_asset.household.ss(Va=Va, Vb=Vb, Pi=Pi, a_grid=a_grid, b_grid=b_grid, - N=1, tax=tax, w=w, e_grid=e_grid, k_grid=k_grid, beta=beta, - eis=eis, rb=rb, ra=ra, chi0=chi0, chi1=chi1, chi2=chi2) + out = two_asset.household.steady_state(calibration) return out['A'], out['B'], out['U'] From f04a13ff4a7c432085630463eb1fdabe1b8bc3cd Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Wed, 16 Jun 2021 12:08:58 -0500 Subject: [PATCH 14/35] Left multiplication (SteadyStateDict @ Bijection) works with test. --- src/sequence_jacobian/primitives.py | 2 ++ src/sequence_jacobian/steady_state/classes.py | 11 +++++++++++ tests/base/test_public_classes.py | 12 +++++++++--- 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/src/sequence_jacobian/primitives.py b/src/sequence_jacobian/primitives.py index 6ea2095..c4a13a5 100644 --- a/src/sequence_jacobian/primitives.py +++ b/src/sequence_jacobian/primitives.py @@ -75,6 +75,8 @@ def outputs(self): def steady_state(self, calibration: Dict[str, Union[Real, Array]], **kwargs) -> SteadyStateDict: raise NotImplementedError(f'{type(self)} does not implement .steady_state()') + # def steady_state(self, calibration, **kwargs): + # return self.M @ self.steady_state(self.M.inv @ calibration, **kwargs) def impulse_nonlinear(self, ss: Dict[str, Union[Real, Array]], exogenous: Dict[str, Array], **kwargs) -> ImpulseDict: diff --git a/src/sequence_jacobian/steady_state/classes.py b/src/sequence_jacobian/steady_state/classes.py index cb8238f..e4ff000 100644 --- a/src/sequence_jacobian/steady_state/classes.py +++ b/src/sequence_jacobian/steady_state/classes.py @@ -3,6 +3,7 @@ from copy import deepcopy from ..utilities.misc import dict_diff +from ..blocks.support.bijection import Bijection class SteadyStateDict: @@ -32,6 +33,16 @@ def __getitem__(self, k): def __setitem__(self, k, v): self.toplevel[k] = v + def __matmul__(self, x): + # remap keys in toplevel + if isinstance(x, Bijection): + new = deepcopy(self) + new.toplevel = x @ self.toplevel + return new + + def __rmatmul__(self, x): + return self.__matmul__(x) + def keys(self): return self.toplevel.keys() diff --git a/tests/base/test_public_classes.py b/tests/base/test_public_classes.py index 2d0d0e6..1c325e8 100644 --- a/tests/base/test_public_classes.py +++ b/tests/base/test_public_classes.py @@ -87,6 +87,12 @@ def test_bijection(): with pytest.raises(ValueError): Bijection({'a': 'a1', 'b': 'a1'}) - # composition - mymap2 = Bijection({'A': 'a'}) - assert (mymap @ mymap2)['A'] == 'a1' + # composition with another bijection (flows backwards) + mymap2 = Bijection({'a1': 'a2'}) + assert (mymap2 @ mymap)['a'] == 'a2' + + # composition with SteadyStateDict (only left __matmul__ works as intended) + ss = SteadyStateDict({'a': 2.0, 'b': 1.0}, internal={}) + ss_remapped = ss @ mymap + assert isinstance(ss_remapped, SteadyStateDict) + assert ss_remapped['a1'] == ss['a'] and ss_remapped['b1'] == ss['b'] From 2d81597b4c1c1ba88112eb2df5ac96aa1fd9adfd Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Wed, 16 Jun 2021 14:51:26 -0500 Subject: [PATCH 15/35] Remap: .steady_state and .solve_steady_state work with some caveats. 1. Repeated blocks (low and high type) won't work out of the gate. 2. Noticed that dissolve does not work properly. Investigate separately. --- src/sequence_jacobian/blocks/combined_block.py | 4 +++- src/sequence_jacobian/blocks/discont_block.py | 5 +++-- src/sequence_jacobian/blocks/het_block.py | 7 ++++--- src/sequence_jacobian/blocks/simple_block.py | 2 +- src/sequence_jacobian/blocks/solved_block.py | 6 ++++-- src/sequence_jacobian/blocks/support/bijection.py | 9 +++++++++ src/sequence_jacobian/primitives.py | 14 +++++++------- src/sequence_jacobian/steady_state/drivers.py | 9 +++++++-- 8 files changed, 38 insertions(+), 18 deletions(-) diff --git a/src/sequence_jacobian/blocks/combined_block.py b/src/sequence_jacobian/blocks/combined_block.py index 5f8b662..1cbf863 100644 --- a/src/sequence_jacobian/blocks/combined_block.py +++ b/src/sequence_jacobian/blocks/combined_block.py @@ -10,6 +10,7 @@ from ..steady_state.support import provide_solver_default from ..jacobian.classes import JacobianDict from ..steady_state.classes import SteadyStateDict +from .support.bijection import Bijection def combine(blocks, name="", model_alias=False): @@ -34,6 +35,7 @@ def __init__(self, blocks, name="", model_alias=False): self._sorted_indices = utils.graph.block_sort(blocks) self._required = utils.graph.find_outputs_that_are_intermediate_inputs(blocks) self.blocks = [self._blocks_unsorted[i] for i in self._sorted_indices] + self.M = Bijection({}) if not name: self.name = f"{self.blocks[0].name}_to_{self.blocks[-1].name}_combined" @@ -56,7 +58,7 @@ def __repr__(self): else: return f"" - def steady_state(self, calibration, helper_blocks=None, **kwargs): + def _steady_state(self, calibration, helper_blocks=None, **kwargs): """Evaluate a partial equilibrium steady state of the CombinedBlock given a `calibration`""" if helper_blocks is None: helper_blocks = [] diff --git a/src/sequence_jacobian/blocks/discont_block.py b/src/sequence_jacobian/blocks/discont_block.py index a9e4402..e43cd33 100644 --- a/src/sequence_jacobian/blocks/discont_block.py +++ b/src/sequence_jacobian/blocks/discont_block.py @@ -8,6 +8,7 @@ from ..steady_state.classes import SteadyStateDict from ..jacobian.classes import JacobianDict from ..utilities.misc import verify_saved_jacobian +from .support.bijection import Bijection def discont(exogenous, policy, disc_policy, backward, backward_init=None): @@ -168,8 +169,8 @@ def __repr__(self): ''' - def steady_state(self, calibration, backward_tol=1E-8, backward_maxit=5000, - forward_tol=1E-10, forward_maxit=100_000, hetoutput=False): + def _steady_state(self, calibration, backward_tol=1E-8, backward_maxit=5000, + forward_tol=1E-10, forward_maxit=100_000, hetoutput=False): """Evaluate steady state HetBlock using keyword args for all inputs. Analog to SimpleBlock.ss. Parameters diff --git a/src/sequence_jacobian/blocks/het_block.py b/src/sequence_jacobian/blocks/het_block.py index bb76e11..fe45715 100644 --- a/src/sequence_jacobian/blocks/het_block.py +++ b/src/sequence_jacobian/blocks/het_block.py @@ -3,6 +3,7 @@ import numpy as np from .support.impulse import ImpulseDict +from .support.bijection import Bijection from ..primitives import Block from .. import utilities as utils from ..steady_state.classes import SteadyStateDict @@ -165,8 +166,8 @@ def __repr__(self): each backward iteration step in td, jacobian is not computed for these! ''' - def steady_state(self, calibration, backward_tol=1E-8, backward_maxit=5000, - forward_tol=1E-10, forward_maxit=100_000, hetoutput=False): + def _steady_state(self, calibration, backward_tol=1E-8, backward_maxit=5000, + forward_tol=1E-10, forward_maxit=100_000): """Evaluate steady state HetBlock using keyword args for all inputs. Analog to SimpleBlock.ss. Parameters @@ -223,7 +224,7 @@ def steady_state(self, calibration, backward_tol=1E-8, backward_maxit=5000, aggregates = {o.capitalize(): np.vdot(D, sspol[o]) for o in self.non_back_iter_outputs} ss.update(aggregates) - if hetoutput and self.hetoutput is not None: + if self.hetoutput is not None: hetoutputs = self.hetoutput.evaluate(ss) aggregate_hetoutputs = self.hetoutput.aggregate(hetoutputs, D, ss, mode="ss") else: diff --git a/src/sequence_jacobian/blocks/simple_block.py b/src/sequence_jacobian/blocks/simple_block.py index 25c28cc..412bde1 100644 --- a/src/sequence_jacobian/blocks/simple_block.py +++ b/src/sequence_jacobian/blocks/simple_block.py @@ -46,7 +46,7 @@ def __init__(self, f): def __repr__(self): return f"" - def steady_state(self, calibration): + def _steady_state(self, calibration): input_args = {k: ignore(v) for k, v in calibration.items() if k in misc.input_list(self.f)} output_vars = [misc.numeric_primitive(o) for o in self.f(**input_args)] if len(self.output_list) > 1 else [ misc.numeric_primitive(self.f(**input_args))] diff --git a/src/sequence_jacobian/blocks/solved_block.py b/src/sequence_jacobian/blocks/solved_block.py index 7f37cea..35e0769 100644 --- a/src/sequence_jacobian/blocks/solved_block.py +++ b/src/sequence_jacobian/blocks/solved_block.py @@ -3,6 +3,7 @@ from ..primitives import Block from ..blocks.simple_block import simple from ..utilities import graph +from .support.bijection import Bijection def solved(unknowns, targets, block_list=[], solver=None, solver_kwargs={}, name=""): @@ -39,6 +40,7 @@ class SolvedBlock(Block): def __init__(self, blocks, name, unknowns, targets, solver=None, solver_kwargs={}): # Store the actual blocks in ._blocks_unsorted, and use .blocks_w_helpers and .blocks to index from there. self._blocks_unsorted = blocks + self.M = Bijection({}) # don't inherit membrane from parent blocks (think more about this later) # Upon instantiation, we only have enough information to conduct a sort ignoring HelperBlocks # since we need a `calibration` to resolve cyclic dependencies when including HelperBlocks in a topological sort @@ -66,8 +68,8 @@ def __init__(self, blocks, name, unknowns, targets, solver=None, solver_kwargs={ def __repr__(self): return f"" - def steady_state(self, calibration, unknowns=None, helper_blocks=None, solver=None, - consistency_check=False, ttol=1e-9, ctol=1e-9, verbose=False): + def _steady_state(self, calibration, unknowns=None, helper_blocks=None, solver=None, + consistency_check=False, ttol=1e-9, ctol=1e-9, verbose=False): # If this is the first time invoking steady_state/solve_steady_state, cache the sorted indices # accounting for HelperBlocks if self._sorted_indices_w_helpers is None: diff --git a/src/sequence_jacobian/blocks/support/bijection.py b/src/sequence_jacobian/blocks/support/bijection.py index b8bf815..74c5477 100644 --- a/src/sequence_jacobian/blocks/support/bijection.py +++ b/src/sequence_jacobian/blocks/support/bijection.py @@ -41,4 +41,13 @@ def __matmul__(self, x): elif isinstance(x, tuple): return tuple(self[k] for k in x) # else: + # would this prevent calling SteadyStateDict.__rmatmul__? # return NotImplementedError + + def __rmatmul__(self, x): + if isinstance(x, dict): + return {self[k]: v for k, v in x.items()} + elif isinstance(x, list): + return [self[k] for k in x] + elif isinstance(x, tuple): + return tuple(self[k] for k in x) \ No newline at end of file diff --git a/src/sequence_jacobian/primitives.py b/src/sequence_jacobian/primitives.py index c4a13a5..a4309d9 100644 --- a/src/sequence_jacobian/primitives.py +++ b/src/sequence_jacobian/primitives.py @@ -6,7 +6,7 @@ from typing import Any, Dict, Union, Tuple, Optional, List from copy import deepcopy -from .steady_state.drivers import steady_state +from .steady_state.drivers import steady_state as ss from .steady_state.support import provide_solver_default from .nonlinear import td_solve from .jacobian.drivers import get_impulse, get_G @@ -72,11 +72,11 @@ def outputs(self): # Typing information is purely to inform future user-developed `Block` sub-classes to enforce a canonical # input and output argument structure - def steady_state(self, calibration: Dict[str, Union[Real, Array]], - **kwargs) -> SteadyStateDict: - raise NotImplementedError(f'{type(self)} does not implement .steady_state()') - # def steady_state(self, calibration, **kwargs): - # return self.M @ self.steady_state(self.M.inv @ calibration, **kwargs) + # def steady_state(self, calibration: Dict[str, Union[Real, Array]], + # **kwargs) -> SteadyStateDict: + # raise NotImplementedError(f'{type(self)} does not implement .steady_state()') + def steady_state(self, calibration, **kwargs): + return self._steady_state(calibration @ self.M.inv, **kwargs) @ self.M def impulse_nonlinear(self, ss: Dict[str, Union[Real, Array]], exogenous: Dict[str, Array], **kwargs) -> ImpulseDict: @@ -99,7 +99,7 @@ def solve_steady_state(self, calibration: Dict[str, Union[Real, Array]], the target conditions that must hold in general equilibrium""" blocks = self.blocks if hasattr(self, "blocks") else [self] solver = solver if solver else provide_solver_default(unknowns) - return steady_state(blocks, calibration, unknowns, targets, solver=solver, **kwargs) + return ss(blocks, calibration, unknowns, targets, solver=solver, **kwargs) def solve_impulse_nonlinear(self, ss: Dict[str, Union[Real, Array]], exogenous: Dict[str, Array], diff --git a/src/sequence_jacobian/steady_state/drivers.py b/src/sequence_jacobian/steady_state/drivers.py index 4f7f154..ee2667c 100644 --- a/src/sequence_jacobian/steady_state/drivers.py +++ b/src/sequence_jacobian/steady_state/drivers.py @@ -146,10 +146,13 @@ def eval_block_ss(block, calibration, toplevel_unknowns=None, dissolve=None, **k """Evaluate the .ss method of a block, given a dictionary of potential arguments""" if toplevel_unknowns is None: toplevel_unknowns = {} - block_unknowns_in_toplevel_unknowns = set(block.unknowns.keys()).issubset(set(toplevel_unknowns)) if hasattr(block, "unknowns") else False if dissolve is None: dissolve = [] + # Remapping + calibration = calibration @ block.M.inv + toplevel_unknowns = toplevel_unknowns @ block.M.inv + # Add the block's internal variables as inputs, if the block has an internal attribute if isinstance(calibration, dict): input_arg_dict = deepcopy(calibration) @@ -159,6 +162,8 @@ def eval_block_ss(block, calibration, toplevel_unknowns=None, dissolve=None, **k # Bypass the behavior for SolvedBlocks to numerically solve for their unknowns and simply evaluate them # at the provided set of unknowns if included in dissolve. valid_input_kwargs = misc.input_kwarg_list(block.steady_state) + block_unknowns_in_toplevel_unknowns = set(block.unknowns.keys()).issubset(set(toplevel_unknowns)) if hasattr( + block, "unknowns") else False input_kwarg_dict = {k: v for k, v in kwargs.items() if k in valid_input_kwargs} if block in dissolve and "solver" in valid_input_kwargs: input_kwarg_dict["solver"] = "solved" @@ -170,7 +175,7 @@ def eval_block_ss(block, calibration, toplevel_unknowns=None, dissolve=None, **k f"If the user provides a set of top-level unknowns that subsume block-level unknowns," f" it must be explicitly declared in `dissolve`.") - return block.steady_state({k: v for k, v in input_arg_dict.items() if k in block.inputs}, **input_kwarg_dict) + return block.steady_state(input_arg_dict, **input_kwarg_dict) @ block.M def _solve_for_unknowns(residual, unknowns, targets, solver, solver_kwargs, residual_kwargs=None, From b6751f441b062b2d9bfe50b79cf13d156cce7134 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Wed, 16 Jun 2021 18:52:42 -0500 Subject: [PATCH 16/35] Fixed dissolve. Bijection could not be multiplied with dict_keys. --- src/sequence_jacobian/steady_state/drivers.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/sequence_jacobian/steady_state/drivers.py b/src/sequence_jacobian/steady_state/drivers.py index ee2667c..b42bf5d 100644 --- a/src/sequence_jacobian/steady_state/drivers.py +++ b/src/sequence_jacobian/steady_state/drivers.py @@ -145,13 +145,13 @@ def residual(targets_dict, unknown_keys, unknown_values, include_helpers=True, c def eval_block_ss(block, calibration, toplevel_unknowns=None, dissolve=None, **kwargs): """Evaluate the .ss method of a block, given a dictionary of potential arguments""" if toplevel_unknowns is None: - toplevel_unknowns = {} + toplevel_unknowns = [] if dissolve is None: dissolve = [] # Remapping calibration = calibration @ block.M.inv - toplevel_unknowns = toplevel_unknowns @ block.M.inv + toplevel_unknowns = list(toplevel_unknowns) @ block.M.inv # Add the block's internal variables as inputs, if the block has an internal attribute if isinstance(calibration, dict): @@ -162,8 +162,7 @@ def eval_block_ss(block, calibration, toplevel_unknowns=None, dissolve=None, **k # Bypass the behavior for SolvedBlocks to numerically solve for their unknowns and simply evaluate them # at the provided set of unknowns if included in dissolve. valid_input_kwargs = misc.input_kwarg_list(block.steady_state) - block_unknowns_in_toplevel_unknowns = set(block.unknowns.keys()).issubset(set(toplevel_unknowns)) if hasattr( - block, "unknowns") else False + block_unknowns_in_toplevel_unknowns = set(block.unknowns.keys()).issubset(set(toplevel_unknowns)) if hasattr(block, "unknowns") else False input_kwarg_dict = {k: v for k, v in kwargs.items() if k in valid_input_kwargs} if block in dissolve and "solver" in valid_input_kwargs: input_kwarg_dict["solver"] = "solved" From bdf08e7118d008c379c7158bace4cf7764985c61 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Tue, 22 Jun 2021 09:28:24 -0500 Subject: [PATCH 17/35] Test models for remap. (temporary) --- dcblock.py => dcblock_test.py | 2 + remap_test.py | 136 ++++++++++++++++++++++++++++++++++ 2 files changed, 138 insertions(+) rename dcblock.py => dcblock_test.py (99%) create mode 100644 remap_test.py diff --git a/dcblock.py b/dcblock_test.py similarity index 99% rename from dcblock.py rename to dcblock_test.py index 5cc87b1..d238c37 100644 --- a/dcblock.py +++ b/dcblock_test.py @@ -1,3 +1,5 @@ +'''This model is to test DisContBlock''' + import numpy as np from numba import guvectorize, njit import dfols diff --git a/remap_test.py b/remap_test.py new file mode 100644 index 0000000..9891a3f --- /dev/null +++ b/remap_test.py @@ -0,0 +1,136 @@ +"""Simple model to test remapping & multiple hetblocks.""" + +import numpy as np +import copy + +from sequence_jacobian import utilities as utils +from sequence_jacobian import create_model, hetoutput, het, simple + + +'''Part 1: HA block''' + + +def household_init(a_grid, e_grid, r, w, eis): + coh = (1 + r) * a_grid[np.newaxis, :] + w * e_grid[:, np.newaxis] + Va = (1 + r) * (0.1 * coh) ** (-1 / eis) + return Va + + +@het(exogenous='Pi', policy='a', backward='Va', backward_init=household_init) +def household(Va_p, Pi_p, a_grid, e_grid, beta, r, w, eis): + """Single backward iteration step using endogenous gridpoint method for households with CRRA utility. + + Parameters + ---------- + Va_p : array (S, A), marginal value of assets tomorrow + Pi_p : array (S, S), Markov matrix for skills tomorrow + a_grid : array (A), asset grid + e_grid : array (S*B), skill grid + beta : scalar, discount rate today + r : scalar, ex-post real interest rate + w : scalar, wage + eis : scalar, elasticity of intertemporal substitution + + Returns + ---------- + Va : array (S*B, A), marginal value of assets today + a : array (S*B, A), asset policy today + c : array (S*B, A), consumption policy today + """ + uc_nextgrid = (beta * Pi_p) @ Va_p + c_nextgrid = uc_nextgrid ** (-eis) + coh = (1 + r) * a_grid[np.newaxis, :] + w * e_grid[:, np.newaxis] + a = utils.interpolate.interpolate_y(c_nextgrid + a_grid, coh, a_grid) + utils.optimized_routines.setmin(a, a_grid[0]) + c = coh - a + Va = (1 + r) * c ** (-1 / eis) + return Va, a, c + + +def get_mpcs(c, a, a_grid, rpost): + """Approximate mpc, with symmetric differences where possible, exactly setting mpc=1 for constrained agents.""" + mpcs = np.empty_like(c) + post_return = (1 + rpost) * a_grid + + # symmetric differences away from boundaries + mpcs[:, 1:-1] = (c[:, 2:] - c[:, 0:-2]) / (post_return[2:] - post_return[:-2]) + + # asymmetric first differences at boundaries + mpcs[:, 0] = (c[:, 1] - c[:, 0]) / (post_return[1] - post_return[0]) + mpcs[:, -1] = (c[:, -1] - c[:, -2]) / (post_return[-1] - post_return[-2]) + + # special case of constrained + mpcs[a == a_grid[0]] = 1 + + return mpcs + + +'''Part 2: Simple Blocks''' + + +@simple +def firm(K, L, Z, alpha, delta): + r = alpha * Z * (K(-1) / L) ** (alpha-1) - delta + w = (1 - alpha) * Z * (K(-1) / L) ** alpha + Y = Z * K(-1) ** alpha * L ** (1 - alpha) + return r, w, Y + + +@simple +def mkt_clearing(K, A, Y, C, delta): + asset_mkt = A - K + goods_mkt = Y - C - delta * K + return asset_mkt, goods_mkt + + +@simple +def income_state_vars(rho, sigma, nS): + e, _, Pi = utils.discretize.markov_rouwenhorst(rho=rho, sigma=sigma, N=nS) + return e, Pi + + +@simple +def asset_state_vars(amax, nA): + a_grid = utils.discretize.agrid(amax=amax, n=nA) + return a_grid + + +@simple +def firm_steady_state_solution(r, delta, alpha): + rk = r + delta + Z = (rk / alpha) ** alpha # normalize so that Y=1 + K = (alpha * Z / rk) ** (1 / (1 - alpha)) + Y = Z * K ** alpha + w = (1 - alpha) * Z * (alpha * Z / rk) ** (alpha / (1 - alpha)) + return Z, K, Y, w + + +'''Part 3: permanent heterogeneity''' + +# remap method takes a dict and returns new copies of blocks +to_map = ['beta', *household.outputs] +hh_patient = household.remap({k: k + '_patient' for k in to_map}) +hh_impatient = household.remap({k: k + '_impatient' for k in to_map}) + + +@simple +def aggregate(A_patient, A_impatient, C_patient, C_impatient, mass_patient): + C = mass_patient * C_patient + (1 - mass_patient) * C_impatient + A = mass_patient * A_patient + (1 - mass_patient) * A_impatient + return C, A + + +'''Steady state''' + +# DAG +blocks = [hh_patient, hh_impatient, firm, mkt_clearing, income_state_vars, asset_state_vars, aggregate] +helper_blocks = [firm_steady_state_solution] +ks_model = create_model(blocks, name="Krusell-Smith") + +# # Steady State +# calibration = {"eis": 1, "delta": 0.025, "alpha": 0.3, "rho": 0.966, "sigma": 0.5, "L": 1.0, +# "nS": 11, "nA": 100, "amax": 1000, "r": 0.01, 'beta_impatient': 0.98} +# unknowns_ss = {"beta_patient": (0.98/1.01, 0.999/1.01), "Z": 0.85, "K": 3.} +# targets_ss = {"asset_mkt": 0., "Y": 1., "r": 0.01} +# ss = ks_model.solve_steady_state(calibration, unknowns_ss, targets_ss, solver="brentq", +# helper_blocks=helper_blocks, helper_targets=["Y", "r"]) From fdb86cbe3f40e9f41a134a1c563a7f6dbc69c3cc Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Tue, 22 Jun 2021 14:10:29 -0500 Subject: [PATCH 18/35] Rename method for blocks. --- remap_test.py | 10 +++++----- src/sequence_jacobian/blocks/discont_block.py | 6 +++--- src/sequence_jacobian/blocks/het_block.py | 6 +++--- src/sequence_jacobian/blocks/simple_block.py | 2 +- src/sequence_jacobian/primitives.py | 5 +++++ 5 files changed, 17 insertions(+), 12 deletions(-) diff --git a/remap_test.py b/remap_test.py index 9891a3f..ce98027 100644 --- a/remap_test.py +++ b/remap_test.py @@ -109,8 +109,8 @@ def firm_steady_state_solution(r, delta, alpha): # remap method takes a dict and returns new copies of blocks to_map = ['beta', *household.outputs] -hh_patient = household.remap({k: k + '_patient' for k in to_map}) -hh_impatient = household.remap({k: k + '_impatient' for k in to_map}) +hh_patient = household.remap({k: k + '_patient' for k in to_map}).rename('patient household') +hh_impatient = household.remap({k: k + '_impatient' for k in to_map}).rename('impatient household') @simple @@ -123,9 +123,9 @@ def aggregate(A_patient, A_impatient, C_patient, C_impatient, mass_patient): '''Steady state''' # DAG -blocks = [hh_patient, hh_impatient, firm, mkt_clearing, income_state_vars, asset_state_vars, aggregate] -helper_blocks = [firm_steady_state_solution] -ks_model = create_model(blocks, name="Krusell-Smith") +# blocks = [hh_patient, hh_impatient, firm, mkt_clearing, income_state_vars, asset_state_vars, aggregate] +# helper_blocks = [firm_steady_state_solution] +# ks_model = create_model(blocks, name="Krusell-Smith") # # Steady State # calibration = {"eis": 1, "delta": 0.025, "alpha": 0.3, "rho": 0.966, "sigma": 0.5, "L": 1.0, diff --git a/src/sequence_jacobian/blocks/discont_block.py b/src/sequence_jacobian/blocks/discont_block.py index e43cd33..e31a462 100644 --- a/src/sequence_jacobian/blocks/discont_block.py +++ b/src/sequence_jacobian/blocks/discont_block.py @@ -150,12 +150,12 @@ def __repr__(self): """Nice string representation of HetBlock for printing to console""" if self.hetinput is not None: if self.hetoutput is not None: - return f"" else: - return f"" + return f"" else: - return f"" + return f"" '''Part 2: high-level routines, with first three called analogously to SimpleBlock counterparts - steady_state : do backward and forward iteration until convergence to get complete steady state diff --git a/src/sequence_jacobian/blocks/het_block.py b/src/sequence_jacobian/blocks/het_block.py index fe45715..52312f1 100644 --- a/src/sequence_jacobian/blocks/het_block.py +++ b/src/sequence_jacobian/blocks/het_block.py @@ -148,12 +148,12 @@ def __repr__(self): """Nice string representation of HetBlock for printing to console""" if self.hetinput is not None: if self.hetoutput is not None: - return f"" else: - return f"" + return f"" else: - return f"" + return f"" '''Part 2: high-level routines, with first three called analogously to SimpleBlock counterparts - steady_state : do backward and forward iteration until convergence to get complete steady state diff --git a/src/sequence_jacobian/blocks/simple_block.py b/src/sequence_jacobian/blocks/simple_block.py index 412bde1..3ac7f63 100644 --- a/src/sequence_jacobian/blocks/simple_block.py +++ b/src/sequence_jacobian/blocks/simple_block.py @@ -44,7 +44,7 @@ def __init__(self, f): self.M = Bijection({}) def __repr__(self): - return f"" + return f"" def _steady_state(self, calibration): input_args = {k: ignore(v) for k, v in calibration.items() if k in misc.input_list(self.f)} diff --git a/src/sequence_jacobian/primitives.py b/src/sequence_jacobian/primitives.py index a4309d9..557ac8b 100644 --- a/src/sequence_jacobian/primitives.py +++ b/src/sequence_jacobian/primitives.py @@ -144,3 +144,8 @@ def remap(self, map): remapped = deepcopy(self) remapped.M = self.M @ Bijection(map) return remapped + + def rename(self, name): + renamed = deepcopy(self) + renamed.name = name + return renamed From 2d567e8c82831c4c2e020bb329248c89305aa054 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Tue, 22 Jun 2021 14:19:08 -0500 Subject: [PATCH 19/35] Right multiplication of SteadyStateDict with Bijection works. --- src/sequence_jacobian/blocks/support/bijection.py | 5 ++--- src/sequence_jacobian/primitives.py | 5 +---- src/sequence_jacobian/steady_state/drivers.py | 6 +++--- 3 files changed, 6 insertions(+), 10 deletions(-) diff --git a/src/sequence_jacobian/blocks/support/bijection.py b/src/sequence_jacobian/blocks/support/bijection.py index 74c5477..dabd449 100644 --- a/src/sequence_jacobian/blocks/support/bijection.py +++ b/src/sequence_jacobian/blocks/support/bijection.py @@ -40,9 +40,8 @@ def __matmul__(self, x): return [self[k] for k in x] elif isinstance(x, tuple): return tuple(self[k] for k in x) - # else: - # would this prevent calling SteadyStateDict.__rmatmul__? - # return NotImplementedError + else: + return NotImplemented def __rmatmul__(self, x): if isinstance(x, dict): diff --git a/src/sequence_jacobian/primitives.py b/src/sequence_jacobian/primitives.py index 557ac8b..82ecf47 100644 --- a/src/sequence_jacobian/primitives.py +++ b/src/sequence_jacobian/primitives.py @@ -72,11 +72,8 @@ def outputs(self): # Typing information is purely to inform future user-developed `Block` sub-classes to enforce a canonical # input and output argument structure - # def steady_state(self, calibration: Dict[str, Union[Real, Array]], - # **kwargs) -> SteadyStateDict: - # raise NotImplementedError(f'{type(self)} does not implement .steady_state()') def steady_state(self, calibration, **kwargs): - return self._steady_state(calibration @ self.M.inv, **kwargs) @ self.M + return self.M @ self._steady_state(self.M.inv @ calibration, **kwargs) def impulse_nonlinear(self, ss: Dict[str, Union[Real, Array]], exogenous: Dict[str, Array], **kwargs) -> ImpulseDict: diff --git a/src/sequence_jacobian/steady_state/drivers.py b/src/sequence_jacobian/steady_state/drivers.py index b42bf5d..bdf34c4 100644 --- a/src/sequence_jacobian/steady_state/drivers.py +++ b/src/sequence_jacobian/steady_state/drivers.py @@ -150,8 +150,8 @@ def eval_block_ss(block, calibration, toplevel_unknowns=None, dissolve=None, **k dissolve = [] # Remapping - calibration = calibration @ block.M.inv - toplevel_unknowns = list(toplevel_unknowns) @ block.M.inv + calibration = block.M.inv @ calibration + toplevel_unknowns = block.M.inv @ list(toplevel_unknowns) # Add the block's internal variables as inputs, if the block has an internal attribute if isinstance(calibration, dict): @@ -174,7 +174,7 @@ def eval_block_ss(block, calibration, toplevel_unknowns=None, dissolve=None, **k f"If the user provides a set of top-level unknowns that subsume block-level unknowns," f" it must be explicitly declared in `dissolve`.") - return block.steady_state(input_arg_dict, **input_kwarg_dict) @ block.M + return block.M @ block.steady_state(input_arg_dict, **input_kwarg_dict) def _solve_for_unknowns(residual, unknowns, targets, solver, solver_kwargs, residual_kwargs=None, From 2a6ee86bd7e5336b2bc3b5e0c8556b787e0376b6 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Tue, 22 Jun 2021 17:48:35 -0500 Subject: [PATCH 20/35] KS model with two types works. Something broke with solve_steady_state. --- remap_test.py | 49 +++++++++++-------- .../blocks/support/bijection.py | 8 ++- src/sequence_jacobian/primitives.py | 12 +++-- 3 files changed, 44 insertions(+), 25 deletions(-) diff --git a/remap_test.py b/remap_test.py index ce98027..db9c745 100644 --- a/remap_test.py +++ b/remap_test.py @@ -85,8 +85,8 @@ def mkt_clearing(K, A, Y, C, delta): @simple def income_state_vars(rho, sigma, nS): - e, _, Pi = utils.discretize.markov_rouwenhorst(rho=rho, sigma=sigma, N=nS) - return e, Pi + e_grid, _, Pi = utils.discretize.markov_rouwenhorst(rho=rho, sigma=sigma, N=nS) + return e_grid, Pi @simple @@ -96,41 +96,48 @@ def asset_state_vars(amax, nA): @simple -def firm_steady_state_solution(r, delta, alpha): +def firm_ss(r, Y, L, delta, alpha): rk = r + delta - Z = (rk / alpha) ** alpha # normalize so that Y=1 - K = (alpha * Z / rk) ** (1 / (1 - alpha)) - Y = Z * K ** alpha - w = (1 - alpha) * Z * (alpha * Z / rk) ** (alpha / (1 - alpha)) - return Z, K, Y, w + w = (1 - alpha) * Y / L + K = alpha * Y / rk + Z = Y / K ** alpha / L ** (1 - alpha) + return w, K, Z + + +@hetoutput() +def mpcs(c, a, a_grid, r): + mpc = get_mpcs(c, a, a_grid, r) + return mpc '''Part 3: permanent heterogeneity''' # remap method takes a dict and returns new copies of blocks +household.add_hetoutput(mpcs, verbose=False) to_map = ['beta', *household.outputs] hh_patient = household.remap({k: k + '_patient' for k in to_map}).rename('patient household') hh_impatient = household.remap({k: k + '_impatient' for k in to_map}).rename('impatient household') @simple -def aggregate(A_patient, A_impatient, C_patient, C_impatient, mass_patient): +def aggregate(A_patient, A_impatient, C_patient, C_impatient, Mpc_patient, Mpc_impatient, mass_patient): C = mass_patient * C_patient + (1 - mass_patient) * C_impatient A = mass_patient * A_patient + (1 - mass_patient) * A_impatient - return C, A + Mpc = mass_patient * Mpc_patient + (1 - mass_patient) * Mpc_impatient + return C, A, Mpc '''Steady state''' # DAG -# blocks = [hh_patient, hh_impatient, firm, mkt_clearing, income_state_vars, asset_state_vars, aggregate] -# helper_blocks = [firm_steady_state_solution] -# ks_model = create_model(blocks, name="Krusell-Smith") - -# # Steady State -# calibration = {"eis": 1, "delta": 0.025, "alpha": 0.3, "rho": 0.966, "sigma": 0.5, "L": 1.0, -# "nS": 11, "nA": 100, "amax": 1000, "r": 0.01, 'beta_impatient': 0.98} -# unknowns_ss = {"beta_patient": (0.98/1.01, 0.999/1.01), "Z": 0.85, "K": 3.} -# targets_ss = {"asset_mkt": 0., "Y": 1., "r": 0.01} -# ss = ks_model.solve_steady_state(calibration, unknowns_ss, targets_ss, solver="brentq", -# helper_blocks=helper_blocks, helper_targets=["Y", "r"]) +blocks = [hh_patient, hh_impatient, firm, mkt_clearing, income_state_vars, asset_state_vars, aggregate] +ks_model = create_model(blocks, name="Krusell-Smith") + +# Steady State +calibration = {'eis': 1, 'delta': 0.025, 'alpha': 0.3, 'rho': 0.966, 'sigma': 0.5, 'L': 1.0, + 'nS': 11, 'nA': 500, 'amax': 1000, 'beta_impatient': 0.98, 'mass_patient': 0.5} +ss = ks_model.solve_steady_state(calibration, solver='brentq', + unknowns={'beta_patient': (0.97/1.01, 0.999/1.01), 'Z': 0.5, 'K': 8.6}, + targets={'asset_mkt': 0.0, 'Y': 1.0, 'r': 0.01}, + helper_blocks=[firm_ss], helper_targets=['Y', 'r']) +# TODO why is this solution so inaccurate? diff --git a/src/sequence_jacobian/blocks/support/bijection.py b/src/sequence_jacobian/blocks/support/bijection.py index dabd449..834dac8 100644 --- a/src/sequence_jacobian/blocks/support/bijection.py +++ b/src/sequence_jacobian/blocks/support/bijection.py @@ -38,6 +38,8 @@ def __matmul__(self, x): return {self[k]: v for k, v in x.items()} elif isinstance(x, list): return [self[k] for k in x] + elif isinstance(x, set): + return {self[k] for k in x} elif isinstance(x, tuple): return tuple(self[k] for k in x) else: @@ -48,5 +50,9 @@ def __rmatmul__(self, x): return {self[k]: v for k, v in x.items()} elif isinstance(x, list): return [self[k] for k in x] + elif isinstance(x, set): + return {self[k] for k in x} elif isinstance(x, tuple): - return tuple(self[k] for k in x) \ No newline at end of file + return tuple(self[k] for k in x) + else: + return NotImplemented \ No newline at end of file diff --git a/src/sequence_jacobian/primitives.py b/src/sequence_jacobian/primitives.py index 82ecf47..304fad0 100644 --- a/src/sequence_jacobian/primitives.py +++ b/src/sequence_jacobian/primitives.py @@ -138,9 +138,15 @@ def solve_jacobian(self, ss: Dict[str, Union[Real, Array]], return get_G(blocks, exogenous, unknowns, targets, T=T, ss=ss, Js=Js, **kwargs) def remap(self, map): - remapped = deepcopy(self) - remapped.M = self.M @ Bijection(map) - return remapped + other = deepcopy(self) + other.M = self.M @ Bijection(map) + other.inputs = other.M @ self.inputs + other.outputs = other.M @ self.outputs + if hasattr(self, 'input_list'): + other.input_list = other.M @ self.input_list + if hasattr(self, 'output_list'): + other.output_list = other.M @ self.output_list + return other def rename(self, name): renamed = deepcopy(self) From e7ae8c757cb008024e791c3909c1207dcc1fcc50 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Tue, 22 Jun 2021 18:13:07 -0500 Subject: [PATCH 21/35] Fixed steady_state driver. --- remap_test.py | 1 - src/sequence_jacobian/steady_state/drivers.py | 6 +----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/remap_test.py b/remap_test.py index db9c745..3811bae 100644 --- a/remap_test.py +++ b/remap_test.py @@ -140,4 +140,3 @@ def aggregate(A_patient, A_impatient, C_patient, C_impatient, Mpc_patient, Mpc_i unknowns={'beta_patient': (0.97/1.01, 0.999/1.01), 'Z': 0.5, 'K': 8.6}, targets={'asset_mkt': 0.0, 'Y': 1.0, 'r': 0.01}, helper_blocks=[firm_ss], helper_targets=['Y', 'r']) -# TODO why is this solution so inaccurate? diff --git a/src/sequence_jacobian/steady_state/drivers.py b/src/sequence_jacobian/steady_state/drivers.py index bdf34c4..3119bfa 100644 --- a/src/sequence_jacobian/steady_state/drivers.py +++ b/src/sequence_jacobian/steady_state/drivers.py @@ -149,10 +149,6 @@ def eval_block_ss(block, calibration, toplevel_unknowns=None, dissolve=None, **k if dissolve is None: dissolve = [] - # Remapping - calibration = block.M.inv @ calibration - toplevel_unknowns = block.M.inv @ list(toplevel_unknowns) - # Add the block's internal variables as inputs, if the block has an internal attribute if isinstance(calibration, dict): input_arg_dict = deepcopy(calibration) @@ -174,7 +170,7 @@ def eval_block_ss(block, calibration, toplevel_unknowns=None, dissolve=None, **k f"If the user provides a set of top-level unknowns that subsume block-level unknowns," f" it must be explicitly declared in `dissolve`.") - return block.M @ block.steady_state(input_arg_dict, **input_kwarg_dict) + return block.M @ block.steady_state({k: v for k, v in input_arg_dict.items() if k in block.inputs}, **input_kwarg_dict) def _solve_for_unknowns(residual, unknowns, targets, solver, solver_kwargs, residual_kwargs=None, From eab87604fb4382561bf2b0ce3e495343b018c6a0 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Wed, 23 Jun 2021 09:22:31 -0500 Subject: [PATCH 22/35] Remapping JacobianDicts and ImpulseDicts in progress. --- remap_test.py | 3 +++ .../blocks/combined_block.py | 6 ++--- src/sequence_jacobian/blocks/discont_block.py | 6 ++--- src/sequence_jacobian/blocks/het_block.py | 6 ++--- src/sequence_jacobian/blocks/simple_block.py | 6 ++--- src/sequence_jacobian/blocks/solved_block.py | 6 ++--- .../blocks/support/impulse.py | 12 ++++++++++ src/sequence_jacobian/jacobian/classes.py | 1 + src/sequence_jacobian/primitives.py | 23 ++++++++++--------- 9 files changed, 43 insertions(+), 26 deletions(-) diff --git a/remap_test.py b/remap_test.py index 3811bae..14a88a4 100644 --- a/remap_test.py +++ b/remap_test.py @@ -140,3 +140,6 @@ def aggregate(A_patient, A_impatient, C_patient, C_impatient, Mpc_patient, Mpc_i unknowns={'beta_patient': (0.97/1.01, 0.999/1.01), 'Z': 0.5, 'K': 8.6}, targets={'asset_mkt': 0.0, 'Y': 1.0, 'r': 0.01}, helper_blocks=[firm_ss], helper_targets=['Y', 'r']) + +td_nonlin = ks_model.solve_impulse_nonlinear(ss, {'Z': 0.001*0.9**np.arange(300)}, + unknowns=['K'], targets=['asset_mkt']) diff --git a/src/sequence_jacobian/blocks/combined_block.py b/src/sequence_jacobian/blocks/combined_block.py index 1cbf863..b28108c 100644 --- a/src/sequence_jacobian/blocks/combined_block.py +++ b/src/sequence_jacobian/blocks/combined_block.py @@ -71,7 +71,7 @@ def _steady_state(self, calibration, helper_blocks=None, **kwargs): ss_partial_eq.update(eval_block_ss(blocks_all[i], ss_partial_eq, **kwargs)) return SteadyStateDict(ss_partial_eq) - def impulse_nonlinear(self, ss, exogenous, **kwargs): + def _impulse_nonlinear(self, ss, exogenous, **kwargs): """Calculate a partial equilibrium, non-linear impulse response to a set of `exogenous` shocks from a steady state, `ss`""" irf_nonlin_partial_eq = deepcopy(exogenous) @@ -83,7 +83,7 @@ def impulse_nonlinear(self, ss, exogenous, **kwargs): return ImpulseDict(irf_nonlin_partial_eq) - def impulse_linear(self, ss, exogenous, T=None, Js=None): + def _impulse_linear(self, ss, exogenous, T=None, Js=None): """Calculate a partial equilibrium, linear impulse response to a set of `exogenous` shocks from a steady_state, `ss`""" irf_lin_partial_eq = deepcopy(exogenous) @@ -95,7 +95,7 @@ def impulse_linear(self, ss, exogenous, T=None, Js=None): return ImpulseDict(irf_lin_partial_eq) - def jacobian(self, ss, exogenous=None, T=None, outputs=None, Js=None): + def _jacobian(self, ss, exogenous=None, T=None, outputs=None, Js=None): """Calculate a partial equilibrium Jacobian with respect to a set of `exogenous` shocks at a steady state, `ss`""" if exogenous is None: diff --git a/src/sequence_jacobian/blocks/discont_block.py b/src/sequence_jacobian/blocks/discont_block.py index e31a462..091acda 100644 --- a/src/sequence_jacobian/blocks/discont_block.py +++ b/src/sequence_jacobian/blocks/discont_block.py @@ -235,7 +235,7 @@ def _steady_state(self, calibration, backward_tol=1E-8, backward_maxit=5000, return SteadyStateDict(ss, internal=self) - def impulse_nonlinear(self, ss, exogenous, returnindividual=False, grid_paths=None): + def _impulse_nonlinear(self, ss, exogenous, returnindividual=False, grid_paths=None): """Evaluate transitional dynamics for DiscontBlock given dynamic paths for inputs in exogenous, assuming that we start and end in steady state ss, and that all inputs not specified in exogenous are constant at their ss values. Analog to SimpleBlock.td. @@ -367,7 +367,7 @@ def impulse_nonlinear(self, ss, exogenous, returnindividual=False, grid_paths=No else: return ImpulseDict({**aggregates, **aggregate_hetoutputs}) - ss - def impulse_linear(self, ss, exogenous, T=None, Js=None, **kwargs): + def _impulse_linear(self, ss, exogenous, T=None, Js=None, **kwargs): # infer T from exogenous, check that all shocks have same length shock_lengths = [x.shape[0] for x in exogenous.values()] if shock_lengths[1:] != shock_lengths[:-1]: @@ -376,7 +376,7 @@ def impulse_linear(self, ss, exogenous, T=None, Js=None, **kwargs): return ImpulseDict(self.jacobian(ss, list(exogenous.keys()), T=T, Js=Js, **kwargs).apply(exogenous)) - def jacobian(self, ss, exogenous=None, T=300, outputs=None, Js=None, h=1E-4): + def _jacobian(self, ss, exogenous=None, T=300, outputs=None, Js=None, h=1E-4): """Assemble nested dict of Jacobians of agg outputs vs. inputs, using fake news algorithm. Parameters diff --git a/src/sequence_jacobian/blocks/het_block.py b/src/sequence_jacobian/blocks/het_block.py index 52312f1..2af7d55 100644 --- a/src/sequence_jacobian/blocks/het_block.py +++ b/src/sequence_jacobian/blocks/het_block.py @@ -234,7 +234,7 @@ def _steady_state(self, calibration, backward_tol=1E-8, backward_maxit=5000, return SteadyStateDict(ss, internal=self) - def impulse_nonlinear(self, ss, exogenous, monotonic=False, returnindividual=False, grid_paths=None): + def _impulse_nonlinear(self, ss, exogenous, monotonic=False, returnindividual=False, grid_paths=None): """Evaluate transitional dynamics for HetBlock given dynamic paths for inputs in kwargs, assuming that we start and end in steady state ss, and that all inputs not specified in kwargs are constant at their ss values. Analog to SimpleBlock.td. @@ -345,7 +345,7 @@ def impulse_nonlinear(self, ss, exogenous, monotonic=False, returnindividual=Fal else: return ImpulseDict({**aggregates, **aggregate_hetoutputs}) - ss - def impulse_linear(self, ss, exogenous, T=None, Js=None, **kwargs): + def _impulse_linear(self, ss, exogenous, T=None, Js=None, **kwargs): # infer T from exogenous, check that all shocks have same length shock_lengths = [x.shape[0] for x in exogenous.values()] if shock_lengths[1:] != shock_lengths[:-1]: @@ -354,7 +354,7 @@ def impulse_linear(self, ss, exogenous, T=None, Js=None, **kwargs): return ImpulseDict(self.jacobian(ss, list(exogenous.keys()), T=T, Js=Js, **kwargs).apply(exogenous)) - def jacobian(self, ss, exogenous=None, T=300, outputs=None, output_list=None, Js=None, h=1E-4): + def _jacobian(self, ss, exogenous=None, T=300, outputs=None, output_list=None, Js=None, h=1E-4): """Assemble nested dict of Jacobians of agg outputs vs. inputs, using fake news algorithm. Parameters diff --git a/src/sequence_jacobian/blocks/simple_block.py b/src/sequence_jacobian/blocks/simple_block.py index 3ac7f63..ae6ee2b 100644 --- a/src/sequence_jacobian/blocks/simple_block.py +++ b/src/sequence_jacobian/blocks/simple_block.py @@ -52,7 +52,7 @@ def _steady_state(self, calibration): misc.numeric_primitive(self.f(**input_args))] return SteadyStateDict({**calibration, **dict(zip(self.output_list, output_vars))}) - def impulse_nonlinear(self, ss, exogenous): + def _impulse_nonlinear(self, ss, exogenous): input_args = {} for k, v in exogenous.items(): if np.isscalar(v): @@ -65,10 +65,10 @@ def impulse_nonlinear(self, ss, exogenous): return ImpulseDict(make_impulse_uniform_length(self.f(**input_args), self.output_list)) - ss - def impulse_linear(self, ss, exogenous, T=None, Js=None): + def _impulse_linear(self, ss, exogenous, T=None, Js=None): return ImpulseDict(self.jacobian(ss, exogenous=list(exogenous.keys()), T=T, Js=Js).apply(exogenous)) - def jacobian(self, ss, exogenous=None, T=None, Js=None): + def _jacobian(self, ss, exogenous=None, T=None, Js=None): """Assemble nested dict of Jacobians Parameters diff --git a/src/sequence_jacobian/blocks/solved_block.py b/src/sequence_jacobian/blocks/solved_block.py index 35e0769..efaa614 100644 --- a/src/sequence_jacobian/blocks/solved_block.py +++ b/src/sequence_jacobian/blocks/solved_block.py @@ -87,18 +87,18 @@ def _steady_state(self, calibration, unknowns=None, helper_blocks=None, solver=N return super().solve_steady_state(calibration, unknowns, self.targets, solver=solver, consistency_check=consistency_check, ttol=ttol, ctol=ctol, verbose=verbose) - def impulse_nonlinear(self, ss, exogenous=None, monotonic=False, Js=None, returnindividual=False, verbose=False): + def _impulse_nonlinear(self, ss, exogenous=None, monotonic=False, Js=None, returnindividual=False, verbose=False): return super().solve_impulse_nonlinear(ss, exogenous=exogenous, unknowns=list(self.unknowns.keys()), Js=Js, targets=self.targets if isinstance(self.targets, list) else list(self.targets.keys()), monotonic=monotonic, returnindividual=returnindividual, verbose=verbose) - def impulse_linear(self, ss, exogenous, T=None, Js=None): + def _impulse_linear(self, ss, exogenous, T=None, Js=None): return super().solve_impulse_linear(ss, exogenous=exogenous, unknowns=list(self.unknowns.keys()), targets=self.targets if isinstance(self.targets, list) else list(self.targets.keys()), T=T, Js=Js) - def jacobian(self, ss, exogenous=None, T=300, outputs=None, Js=None): + def _jacobian(self, ss, exogenous=None, T=300, outputs=None, Js=None): if exogenous is None: exogenous = list(self.inputs) if outputs is None: diff --git a/src/sequence_jacobian/blocks/support/impulse.py b/src/sequence_jacobian/blocks/support/impulse.py index 4be8f93..c8fe87f 100644 --- a/src/sequence_jacobian/blocks/support/impulse.py +++ b/src/sequence_jacobian/blocks/support/impulse.py @@ -1,8 +1,10 @@ """ImpulseDict class for manipulating impulse responses.""" import numpy as np +from copy import deepcopy from ...steady_state.classes import SteadyStateDict +from .bijection import Bijection class ImpulseDict: @@ -67,3 +69,13 @@ def __truediv__(self, other): # ImpulseDict[['C, 'Y']] / ss[['C', 'Y']]: matches steady states; don't divide by zero if isinstance(other, SteadyStateDict): return type(self)({k: v / other[k] if not np.isclose(other[k], 0) else v for k, v in self.impulse.items()}) + + def __matmul__(self, x): + # remap keys in toplevel + if isinstance(x, Bijection): + new = deepcopy(self) + new.impulse = x @ self.impulse + return new + + def __rmatmul__(self, x): + return self.__matmul__(x) diff --git a/src/sequence_jacobian/jacobian/classes.py b/src/sequence_jacobian/jacobian/classes.py index b522f7d..572c23e 100644 --- a/src/sequence_jacobian/jacobian/classes.py +++ b/src/sequence_jacobian/jacobian/classes.py @@ -5,6 +5,7 @@ import numpy as np from . import support +from ..blocks.support.bijection import Bijection class Jacobian(metaclass=ABCMeta): diff --git a/src/sequence_jacobian/primitives.py b/src/sequence_jacobian/primitives.py index 304fad0..06fae56 100644 --- a/src/sequence_jacobian/primitives.py +++ b/src/sequence_jacobian/primitives.py @@ -70,22 +70,23 @@ def inputs(self): def outputs(self): pass - # Typing information is purely to inform future user-developed `Block` sub-classes to enforce a canonical - # input and output argument structure def steady_state(self, calibration, **kwargs): + """Evaluate a partial equilibrium steady state of Block given a `calibration`.""" return self.M @ self._steady_state(self.M.inv @ calibration, **kwargs) - def impulse_nonlinear(self, ss: Dict[str, Union[Real, Array]], - exogenous: Dict[str, Array], **kwargs) -> ImpulseDict: - raise NotImplementedError(f'{type(self)} does not implement .impulse_nonlinear()') + def impulse_nonlinear(self, ss, exogenous, **kwargs): + """Calculate a partial equilibrium, non-linear impulse response to a set of `exogenous` shocks + from a steady state `ss`.""" + return self.M @ self._impulse_nonlinear(self.M.inv @ ss, self.M.inv @ exogenous, **kwargs) - def impulse_linear(self, ss: Dict[str, Union[Real, Array]], - exogenous: Dict[str, Array], **kwargs) -> ImpulseDict: - raise NotImplementedError(f'{type(self)} does not implement .impulse_linear()') + def impulse_linear(self, ss, exogenous, **kwargs): + """Calculate a partial equilibrium, linear impulse response to a set of `exogenous` shocks + from a steady state `ss`.""" + return self.M @ self._impulse_linear(self.M.inv @ ss, self.M.inv @ exogenous, **kwargs) - def jacobian(self, ss: Dict[str, Union[Real, Array]], exogenous: List[str] = None, - T: int = None, **kwargs) -> JacobianDict: - raise NotImplementedError(f'{type(self)} does not implement .jacobian()') + def jacobian(self, ss, exogenous, **kwargs): + """Calculate a partial equilibrium Jacobian to a set of `exogenous` shocks at a steady state `ss`.""" + return self.M @ self._jacobian(self.M.inv @ ss, self.M.inv @ exogenous, **kwargs) def solve_steady_state(self, calibration: Dict[str, Union[Real, Array]], unknowns: Dict[str, Union[Real, Tuple[Real, Real]]], From 9363bfd28b031fde0eeea92a3441e0a10b61f6e2 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Wed, 23 Jun 2021 13:03:25 -0500 Subject: [PATCH 23/35] Remap is done! --- remap_test.py | 7 ++++--- src/sequence_jacobian/blocks/combined_block.py | 2 +- src/sequence_jacobian/jacobian/classes.py | 9 +++++++++ src/sequence_jacobian/primitives.py | 4 ++-- tests/base/test_jacobian.py | 2 +- 5 files changed, 17 insertions(+), 7 deletions(-) diff --git a/remap_test.py b/remap_test.py index 14a88a4..0419171 100644 --- a/remap_test.py +++ b/remap_test.py @@ -73,13 +73,14 @@ def firm(K, L, Z, alpha, delta): r = alpha * Z * (K(-1) / L) ** (alpha-1) - delta w = (1 - alpha) * Z * (K(-1) / L) ** alpha Y = Z * K(-1) ** alpha * L ** (1 - alpha) - return r, w, Y + I = K - (1 - delta) * K(-1) + return r, w, Y, I @simple -def mkt_clearing(K, A, Y, C, delta): +def mkt_clearing(K, A, Y, C, I): asset_mkt = A - K - goods_mkt = Y - C - delta * K + goods_mkt = Y - C - I return asset_mkt, goods_mkt diff --git a/src/sequence_jacobian/blocks/combined_block.py b/src/sequence_jacobian/blocks/combined_block.py index b28108c..3c355c9 100644 --- a/src/sequence_jacobian/blocks/combined_block.py +++ b/src/sequence_jacobian/blocks/combined_block.py @@ -105,7 +105,7 @@ def _jacobian(self, ss, exogenous=None, T=None, outputs=None, Js=None): kwargs = {"exogenous": exogenous, "T": T, "outputs": outputs, "Js": Js} for i, block in enumerate(self.blocks): - curlyJ = block.jacobian(ss, **{k: kwargs[k] for k in utils.misc.input_kwarg_list(block.jacobian) if k in kwargs}).complete() + curlyJ = block._jacobian(ss, **{k: kwargs[k] for k in utils.misc.input_kwarg_list(block._jacobian) if k in kwargs}).complete() # If we want specific list of outputs, restrict curlyJ to that before continuing curlyJ = curlyJ[[k for k in curlyJ.outputs if k in outputs or k in self._required]] diff --git a/src/sequence_jacobian/jacobian/classes.py b/src/sequence_jacobian/jacobian/classes.py index 572c23e..d5e1cdc 100644 --- a/src/sequence_jacobian/jacobian/classes.py +++ b/src/sequence_jacobian/jacobian/classes.py @@ -386,9 +386,18 @@ def addinputs(self): def __matmul__(self, x): if isinstance(x, JacobianDict): return self.compose(x) + elif isinstance(x, Bijection): + nesteddict = x @ self.nesteddict + for o in nesteddict.keys(): + nesteddict[o] = x @ nesteddict[o] + return JacobianDict(nesteddict, inputs=x @ self.inputs, outputs=x @ self.outputs) else: return self.apply(x) + def __rmatmul__(self, x): + if isinstance(x, Bijection): + return JacobianDict(x @ self.nesteddict, inputs=x @ self.inputs, outputs=x @ self.outputs) + def __bool__(self): return bool(self.outputs) and bool(self.inputs) diff --git a/src/sequence_jacobian/primitives.py b/src/sequence_jacobian/primitives.py index 06fae56..a8ed6a9 100644 --- a/src/sequence_jacobian/primitives.py +++ b/src/sequence_jacobian/primitives.py @@ -84,9 +84,9 @@ def impulse_linear(self, ss, exogenous, **kwargs): from a steady state `ss`.""" return self.M @ self._impulse_linear(self.M.inv @ ss, self.M.inv @ exogenous, **kwargs) - def jacobian(self, ss, exogenous, **kwargs): + def jacobian(self, ss, exogenous, T=None, **kwargs): """Calculate a partial equilibrium Jacobian to a set of `exogenous` shocks at a steady state `ss`.""" - return self.M @ self._jacobian(self.M.inv @ ss, self.M.inv @ exogenous, **kwargs) + return self.M @ self._jacobian(self.M.inv @ ss, self.M.inv @ exogenous, T=T, **kwargs) def solve_steady_state(self, calibration: Dict[str, Union[Real, Array]], unknowns: Dict[str, Union[Real, Tuple[Real, Real]]], diff --git a/tests/base/test_jacobian.py b/tests/base/test_jacobian.py index 431b36d..6bd4a1d 100644 --- a/tests/base/test_jacobian.py +++ b/tests/base/test_jacobian.py @@ -126,7 +126,7 @@ def test_fake_news_v_direct_method(one_asset_hank_dag): household = hank_model._blocks_unsorted[0] T = 40 - exogenous = 'r' + exogenous = ['r'] output_list = household.non_back_iter_outputs h = 1E-4 From 440fc836ad2b87e2d82ef02b244aca805604cb77 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Wed, 23 Jun 2021 13:29:55 -0500 Subject: [PATCH 24/35] Simple models to test dcblock and remap. --- dcblock_test.py => tests/dcblock_test.py | 2 +- remap_test.py => tests/remap_test.py | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename dcblock_test.py => tests/dcblock_test.py (99%) rename remap_test.py => tests/remap_test.py (100%) diff --git a/dcblock_test.py b/tests/dcblock_test.py similarity index 99% rename from dcblock_test.py rename to tests/dcblock_test.py index d238c37..9540471 100644 --- a/dcblock_test.py +++ b/tests/dcblock_test.py @@ -1,4 +1,4 @@ -'''This model is to test DisContBlock''' +"""This model is to test DisContBlock""" import numpy as np from numba import guvectorize, njit diff --git a/remap_test.py b/tests/remap_test.py similarity index 100% rename from remap_test.py rename to tests/remap_test.py From c4cdb434a7cd05bc351fd1df93df083f64b1434b Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Wed, 23 Jun 2021 13:37:17 -0500 Subject: [PATCH 25/35] Signature with types for main block methods in primitives.py. --- src/sequence_jacobian/primitives.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/sequence_jacobian/primitives.py b/src/sequence_jacobian/primitives.py index a8ed6a9..c8201ad 100644 --- a/src/sequence_jacobian/primitives.py +++ b/src/sequence_jacobian/primitives.py @@ -70,21 +70,25 @@ def inputs(self): def outputs(self): pass - def steady_state(self, calibration, **kwargs): + def steady_state(self, calibration: SteadyStateDict, **kwargs) -> SteadyStateDict: """Evaluate a partial equilibrium steady state of Block given a `calibration`.""" return self.M @ self._steady_state(self.M.inv @ calibration, **kwargs) - def impulse_nonlinear(self, ss, exogenous, **kwargs): + def impulse_nonlinear(self, ss: SteadyStateDict, + exogenous: Dict[str, Array], **kwargs) -> ImpulseDict: """Calculate a partial equilibrium, non-linear impulse response to a set of `exogenous` shocks from a steady state `ss`.""" return self.M @ self._impulse_nonlinear(self.M.inv @ ss, self.M.inv @ exogenous, **kwargs) - def impulse_linear(self, ss, exogenous, **kwargs): + def impulse_linear(self, ss: SteadyStateDict, + exogenous: Dict[str, Array], **kwargs) -> ImpulseDict: """Calculate a partial equilibrium, linear impulse response to a set of `exogenous` shocks from a steady state `ss`.""" return self.M @ self._impulse_linear(self.M.inv @ ss, self.M.inv @ exogenous, **kwargs) - def jacobian(self, ss, exogenous, T=None, **kwargs): + def jacobian(self, ss: SteadyStateDict, + exogenous: Dict[str, Array], + T: Optional[int] = None, **kwargs) -> JacobianDict: """Calculate a partial equilibrium Jacobian to a set of `exogenous` shocks at a steady state `ss`.""" return self.M @ self._jacobian(self.M.inv @ ss, self.M.inv @ exogenous, T=T, **kwargs) From bbf89691fc67b6ae68bcf078ab1e58ab92a602c9 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Thu, 24 Jun 2021 14:18:46 -0500 Subject: [PATCH 26/35] Model w 2 remapped CombinedBlocks works well, but should return internals. --- tests/dcblock_test.py | 68 +++++++++++++++++++++++++++++++------------ 1 file changed, 50 insertions(+), 18 deletions(-) diff --git a/tests/dcblock_test.py b/tests/dcblock_test.py index 9540471..40cf638 100644 --- a/tests/dcblock_test.py +++ b/tests/dcblock_test.py @@ -116,9 +116,8 @@ def household(V_p, Va_p, Pi_z_p, Pi_s_p, choice_set, a_grid, y_grid, z_grid, b_g a_nextgrid = (c_nextgrid + a_grid[np.newaxis, np.newaxis, :] - y_grid[:, :, np.newaxis]) / (1 + rpost) # e. upper envelope - # V, c = upper_envelope(W, a_nextgrid, c_nextgrid, a_grid, y_grid, rpost, eis, vphi, chi) imin, imax = nonconcave(uc_nextgrid) # bounds of non-concave region - V, c = upper_envelope_fast(imin, imax, W, a_nextgrid, c_nextgrid, a_grid, y_grid, rpost, eis, vphi, chi) + V, c = upper_envelope(imin, imax, W, a_nextgrid, c_nextgrid, a_grid, y_grid, rpost, eis, vphi, chi) # f. update Va uc = c ** (-1 / eis) @@ -208,7 +207,7 @@ def nonconcave(uc_nextgrid, imin, imax): @njit -def upper_envelope_fast(imin, imax, W, a_nextgrid, c_nextgrid, a_grid, y_grid, rpost, *args): +def upper_envelope(imin, imax, W, a_nextgrid, c_nextgrid, a_grid, y_grid, rpost, *args): """ Interpolate consumption and value function to exogenous grid. Brute force but safe. Parameters @@ -341,7 +340,6 @@ def firm(Z, K, L, mc, alpha, delta0, delta1, psi): Y = Z * K(-1) ** alpha * L ** (1 - alpha) w = (1 - alpha) * mc * Y / L u = (alpha / delta0 / delta1 * mc * Y / K(-1)) ** (1 / delta1) - # u = 1.0 delta = delta0 * u ** delta1 Q = 1 + psi * (K / K(-1) - 1) I = K - (1 - delta) * K(-1) + psi / 2 * (K / K(-1) - 1) ** 2 * K(-1) @@ -399,9 +397,25 @@ def mkt_clearing(A, B, Y, C, I, G, L, Ze): return asset_mkt, goods_mkt, labor_mkt +@simple +def dividends(transfer): + transfer_sm = transfer + transfer_sw = transfer + return transfer_sm, transfer_sw + + +@simple +def aggregate(A_sm, C_sm, Ze_sm, Ui_sm, A_sw, C_sw, Ze_sw, Ui_sw): + A = (A_sm + A_sw) / 2 + C = (C_sm + C_sw) / 2 + Ze = (Ze_sm + Ze_sw) / 2 + Ui = (Ui_sm + Ui_sw) / 2 + return A, C, Ze, Ui + + @simple def helper1(A, tax, w, Ze, rpost, Ui): - # after hh + # after hh block B = A G = tax * w * Ze - rpost * B - (1 - tax) * w * Ui return B, G @@ -420,24 +434,42 @@ def helper2(eps, rpost, Y, L, alpha, delta0): '''Try this''' +cali_sm = {'beta': 0.9782, 'vphi': 0.8079, 'chi': 0.5690, 'fU': 0.25, 'fN': 0.1174, 's': 0.0218, + 'mean_z': 1.0, 'rho_z': 0.98, 'sd_z': 0.943*0.82, + 'fU_eps': 10.69, 'fN_eps': 5.57, 's_eps': -11.17} + +cali_sw = {'beta': 0.9830, 'vphi': 0.9909, 'chi': 0.4982, 'fU': 0.22, 'fN': 0.1099, 's': 0.0132, + 'mean_z': 0.8, 'rho_z': 0.98, 'sd_z': 0.86*0.82, + 'fU_eps': 8.72, 'fN_eps': 3.55, 's_eps': -6.55} + +cali_sm = {k + '_sm': v for k, v in cali_sm.items()} +cali_sw = {k + '_sw': v for k, v in cali_sw.items()} + +calibration = {**cali_sm, **cali_sw, + 'eis': 1.0, 'uicap': 0.66, 'uirate': 0.5, 'expiry': 1/6, 'eps': 10.0, 'tax': 0.3, + 'amin': 0.0, 'amax': 500, 'nA': 100, 'lamM': 0.01, 'lamB': 0.01, 'lamL': 0.04, 'nZ': 7, + 'kappa': 0.03, 'phi_pi': 1.25, 'rstar': 0.002, 'pi': 0.0, 'alpha': 0.2, 'psi': 30, 'delta0': 0.0083} + +hh = create_model([income_state_vars, employment_state_vars, asset_state_vars, flows, household], name='Single') + + +to_map = ['beta', 'vphi', 'chi', 'fU', 'fN', 's', 'mean_z', 'rho_z', 'sd_z', 'transfer', 'fU_eps', 'fN_eps', 's_eps', + *hh.outputs] +hh_sm = hh.remap({k: k + '_sm' for k in to_map}).rename('SingleMen') +hh_sw = hh.remap({k: k + '_sw' for k in to_map}).rename('SingleWomen') -calibration = {'eis': 1.0, 'vphi': 0.65, 'chi': 0.6, - 'uicap': 0.66, 'uirate': 0.5, 'expiry': 1/6, 'fU': 0.25, 'fN': 0.1, 's': 0.025, - 'amin': 0.0, 'amax': 500, 'nA': 100, 'lamM': 0.01, 'lamB': 0.01, 'lamL': 0.04, - 'mean_z': 1.0, 'rho_z': 0.98, 'sd_z': 0.943*0.82, 'nZ': 7, 'beta': 0.985, - 'kappa': 0.03, 'phi_pi': 1.25, 'rstar': 0.002, 'pi': 0, 'alpha': 0.2, 'psi': 30, 'delta0': 0.0083, - 'eps': 10, 'fU_eps': 10.0, 'fN_eps': 5.0, 's_eps': -8.0, 'tax': 0.3} -hh = create_model([income_state_vars, employment_state_vars, asset_state_vars, household], name='SingleMen') -hank = create_model([hh, firm, monetary, valuation, nkpc, fiscal1, fiscal2, flows, mkt_clearing], name='HANK') +hank = create_model([hh_sm, hh_sw, aggregate, dividends, firm, monetary, valuation, nkpc, fiscal1, fiscal2, + mkt_clearing], name='HANK') -ss = hank.solve_steady_state(calibration, solver='hybr', dissolve=[fiscal1, flows], - unknowns={'Z': 0.63, 'L': 0.86, 'mc': 0.9, 'G': 0.2, 'delta1': 1.2, - 'B': 3.0, 'K': 17.0}, - targets={'Y': 1.0, 'labor_mkt': 0.0, 'nkpc_res': 0.0, 'budget': 0.0, - 'asset_mkt': 0.0, 'val': 0.0, 'u': 1.0}, +ss = hank.solve_steady_state(calibration, solver='brentq', dissolve=[fiscal1, flows], + unknowns={'L': (0.7, 0.72), + 'Z': 0.63, 'mc': 0.9, 'G': 0.2, 'delta1': 1.2, 'B': 3.0, 'K': 17.0}, + targets={'labor_mkt': 0.0, + 'Y': 1.0, 'nkpc_res': 0.0, 'budget': 0.0, 'asset_mkt': 0.0, 'val': 0.0, 'u': 1.0}, helper_blocks=[helper1, helper2], helper_targets=['asset_mkt', 'budget', 'val', 'nkpc_res', 'Y', 'u']) +out = hh_sm.steady_state(ss) From f649f923711212551539b3dadda7ef99bbf95a05 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Thu, 24 Jun 2021 16:50:48 -0500 Subject: [PATCH 27/35] Calibrated ss of full model w couples and singles. --- tests/couple_endo.py | 404 +++++++++++++++++++++++++++++++++++++ tests/dcblock_test.py | 450 +++++++----------------------------------- tests/single_endo.py | 341 ++++++++++++++++++++++++++++++++ 3 files changed, 811 insertions(+), 384 deletions(-) create mode 100644 tests/couple_endo.py create mode 100644 tests/single_endo.py diff --git a/tests/couple_endo.py b/tests/couple_endo.py new file mode 100644 index 0000000..b70df73 --- /dev/null +++ b/tests/couple_endo.py @@ -0,0 +1,404 @@ +"""Couple household model with frictional labor supply.""" + +import numpy as np +from numba import guvectorize, njit + +from sequence_jacobian import simple, agrid, markov_rouwenhorst, create_model, solved +from sequence_jacobian.blocks.discont_block import discont +from sequence_jacobian.utilities.misc import choice_prob, logsum + + +'''Core HA block''' + + +def household_init(a_grid, z_grid_m, b_grid_m, z_grid_f, b_grid_f, + atw, transfer, rpost, eis, vphi_m, chi_m, vphi_f, chi_f): + _, income = labor_income(z_grid_m, b_grid_m, z_grid_f, b_grid_f, atw, transfer, 1, 1, 0, 0, 0, 0, 0, 0) + coh = income[:, :, np.newaxis] + (1 + rpost) * a_grid[np.newaxis, np.newaxis, :] + c_guess = 0.3 * coh + V, Va = np.empty_like(coh), np.empty_like(coh) + for iw in range(16): + V[iw, ...] = util(c_guess[iw, ...], iw, eis, vphi_m, chi_m, vphi_f, chi_f) + V = V / 0.1 + + # get Va by finite difference + Va[:, :, 1:-1] = (V[:, :, 2:] - V[:, :, :-2]) / (a_grid[2:] - a_grid[:-2]) + Va[:, :, 0] = (V[:, :, 1] - V[:, :, 0]) / (a_grid[1] - a_grid[0]) + Va[:, :, -1] = (V[:, :, -1] - V[:, :, -2]) / (a_grid[-1] - a_grid[-2]) + + return Va, V + + +@njit(fastmath=True) +def util(c, iw, eis, vphi_m, chi_m, vphi_f, chi_f): + """Utility function.""" + # 1. utility from consumption. + if eis == 1: + u = np.log(c / 2) + else: + u = (c/2) ** (1 - 1/eis) / (1 - 1/eis) + + # 2. disutility from work and search + if iw <= 3: + u = u - vphi_m # E male + if (iw >= 4) & (iw <= 11): + u = u - chi_m # U male + if np.mod(iw, 4) == 0: + u = u - vphi_f # E female + if (np.mod(iw, 4) == 1) | (np.mod(iw, 4) == 2): + u = u - chi_f # U female + return u + + +@discont(exogenous=('Pi_s', 'Pi_z'), policy='a', disc_policy='P', backward=('V', 'Va'), backward_init=household_init) +def household(V_p, Va_p, Pi_z_p, Pi_s_p, choice_set, a_grid, y_grid, lam_grid, + z_all, b_all, z_man, b_man, z_wom, b_wom, eis, beta, rpost, vphi_m, chi_m, vphi_f, chi_f): + """ + Backward step function EGM with upper envelope. + + Dimensions: 0: labor market status, 1: productivity, 2: assets. + Status: 0: EE, 1: EUb, 2: EU, 3: EN, 4: UbE, 5: UbUb, 6: UbU, 7: UbN, 8: UE, 9: UUb, 10: UU, 11: UN, + 12: NE, 13: NUb, 14: NU, 15: NN + State: 0: MM, 1: MB, 2: ML, 3: BM, 4: BB, 5: BL, 6: LM, 7: LB, 8: LL + + Parameters + ---------- + V_p : array(Ns, Nz, Na), status-specific value function tomorrow + Va_p : array(Ns, Nz, Na), partial of status-specific value function tomorrow + Pi_s_p : array(Ns, Nx), Markov matrix for labor market shocks + Pi_z_p : array(Nz, Nz), (non-status-specific Markov) matrix for productivity + choice_set : list(Nz), discrete choices available in each state X + a_grid : array(Na), exogenous asset grid + y_grid : array(Ns, Nz), exogenous labor income grid + lam_grid : array(Nx), scale of taste shocks, specific to interim state + eis : float, EIS + beta : float, discount factor + rpost : float, ex-post interest rate + + Returns + ------- + V : array(Ns, Nz, Na), status-specific value function today + Va : array(Ns, Nz, Na), partial of status-specific value function today + P : array(Nx, Ns, Nz, Na), probability of choosing status s in state x + c : array(Ns, Nz, Na), status-specific consumption policy today + a : array(Ns, Nz, Na), status-specific asset policy today + ze : array(Ns, Nz, Na), effective labor (average productivity if employed) + ui : array(Ns, Nz, Na), UI benefit claims (average productivity if unemployed) + """ + # shapes + Ns, Nz, Na = V_p.shape + Nx = Pi_s_p.shape[1] + + # PART 1: update value and policy functions + # a. discrete choice I expect to make tomorrow + V_p_X = np.empty((Nx, Nz, Na)) + Va_p_X = np.empty((Nx, Nz, Na)) + for ix in range(Nx): + V_p_ix = np.take(V_p, indices=choice_set[ix], axis=0) + Va_p_ix = np.take(Va_p, indices=choice_set[ix], axis=0) + P_p_ix = choice_prob(V_p_ix, lam_grid[ix]) + V_p_X[ix, ...] = logsum(V_p_ix, lam_grid[ix]) + Va_p_X[ix, ...] = np.sum(P_p_ix*Va_p_ix, axis=0) + + # b. compute expectation wrt labor market shock + V_p1 = np.einsum('ij,jkl->ikl', Pi_s_p, V_p_X) + Va_p1 = np.einsum('ij,jkl->ikl', Pi_s_p, Va_p_X) + + # b. compute expectation wrt productivity + V_p2 = np.einsum('ij,kjl->kil', Pi_z_p, V_p1) + Va_p2 = np.einsum('ij,kjl->kil', Pi_z_p, Va_p1) + + # d. consumption today on tomorrow's grid and endogenous asset grid today + W = beta * V_p2 + uc_nextgrid = beta * Va_p2 + c_nextgrid = 2 ** (1 - eis) * uc_nextgrid ** (-eis) + a_nextgrid = (c_nextgrid + a_grid[np.newaxis, np.newaxis, :] - y_grid[:, :, np.newaxis]) / (1 + rpost) + + # e. upper envelope + imin, imax = nonconcave(uc_nextgrid) # bounds of non-concave region + V, c = upper_envelope(imin, imax, W, a_nextgrid, c_nextgrid, a_grid, y_grid, rpost, eis, vphi_m, chi_m, + vphi_f, chi_f) + + # f. update Va + uc = 2 ** (1 / eis - 1) * c ** (-1 / eis) + Va = (1 + rpost) * uc + + # PART 2: things we need for GE + # 2/a. asset policy + a = (1 + rpost) * a_grid[np.newaxis, np.newaxis, :] + y_grid[:, :, np.newaxis] - c + + # 2/b. choice probabilities (don't need jacobian) + P = np.zeros((Nx, Ns, Nz, Na)) + for ix in range(Nx): + V_ix = np.take(V, indices=choice_set[ix], axis=0) + P[ix, choice_set[ix], ...] = choice_prob(V_ix, lam_grid[ix]) + + # 2/c. average productivity of employed + ze = np.zeros_like(a) + ze[0, ...] = z_all[:, np.newaxis] + ze[1, ...], ze[2, ...], ze[3, ...] = z_man[:, np.newaxis], z_man[:, np.newaxis], z_man[:, np.newaxis] + ze[4, ...], ze[8, ...], ze[12, ...] = z_wom[:, np.newaxis], z_wom[:, np.newaxis], z_wom[:, np.newaxis] + + # 2/d. UI claims + ui = np.zeros_like(a) + ui[5, ...] = b_all[:, np.newaxis] + ui[4, ...], ui[6, ...], ui[7, ...] = b_man[:, np.newaxis], b_man[:, np.newaxis], b_man[:, np.newaxis] + ui[1, ...], ui[9, ...], ui[13, ...] = b_wom[:, np.newaxis], b_wom[:, np.newaxis], b_wom[:, np.newaxis] + + return V, Va, a, c, P, ze, ui + + +def labor_income(z_grid_m, b_grid_m, z_grid_f, b_grid_f, atw, transfer, + fU_m, fN_m, s_m, fU_f, fN_f, s_f, rho, expiry): + # 1. income + yE_m, yE_f = atw * z_grid_m + transfer, atw * z_grid_f + yU_m, yU_f = atw * b_grid_m + transfer, atw * b_grid_f + yN_m, yN_f = np.zeros_like(yE_m) + transfer, np.zeros_like(yE_f) + y_EE = (yE_m[:, np.newaxis] + yE_f[np.newaxis, :]).ravel() + y_EU = (yE_m[:, np.newaxis] + yU_f[np.newaxis, :]).ravel() + y_EN = (yE_m[:, np.newaxis] + yN_f[np.newaxis, :]).ravel() + y_UE = (yU_m[:, np.newaxis] + yE_f[np.newaxis, :]).ravel() + y_UU = (yU_m[:, np.newaxis] + yU_f[np.newaxis, :]).ravel() + y_UN = (yU_m[:, np.newaxis] + yN_f[np.newaxis, :]).ravel() + y_NE = (yN_m[:, np.newaxis] + yE_f[np.newaxis, :]).ravel() + y_NU = (yN_m[:, np.newaxis] + yU_f[np.newaxis, :]).ravel() + y_NN = (yN_m[:, np.newaxis] + yN_f[np.newaxis, :]).ravel() + y_grid = np.vstack((y_EE, y_EU, y_EN, y_EN, y_UE, y_UU, y_UN, y_UN, y_NE, y_NU, y_NN, y_NN, y_NE, y_NU, y_NN, y_NN)) + + # 2. transition matrix for joint labor market status + cov = rho * np.sqrt(s_m * s_f * (1 - s_m) * (1 - s_f)) + Pi_s_m = np.array([[1 - s_m, s_m, 0], [fU_m, (1 - fU_m) * (1 - expiry), (1 - fU_m) * expiry], + [fU_m, 0, 1 - fU_m], [fN_m, 0, 1 - fN_m]]) + Pi_s_f = np.array([[1 - s_f, s_f, 0], [fU_f, (1 - fU_f) * (1 - expiry), (1 - fU_f) * expiry], + [fU_f, 0, 1 - fU_f], [fN_f, 0, 1 - fN_f]]) + Pi_s = np.kron(Pi_s_m, Pi_s_f) + + # adjust for correlated job loss + Pi_s[0, 0] += cov # neither loses their job + Pi_s[0, 4] += cov # both lose their job + Pi_s[0, 1] -= cov # only female loses her job + Pi_s[0, 3] -= cov # only male loses his job + + return Pi_s, y_grid + + +"""Supporting functions for HA block""" + + +@guvectorize(['void(float64[:], uint32[:], uint32[:])'], '(nA) -> (),()', nopython=True) +def nonconcave(uc_nextgrid, imin, imax): + """Obtain bounds for non-concave region.""" + nA = uc_nextgrid.shape[-1] + vmin = np.inf + vmax = -np.inf + # step 1: find vmin & vmax + for ia in range(nA - 1): + if uc_nextgrid[ia + 1] > uc_nextgrid[ia]: + vmin_temp = uc_nextgrid[ia] + vmax_temp = uc_nextgrid[ia + 1] + if vmin_temp < vmin: + vmin = vmin_temp + if vmax_temp > vmax: + vmax = vmax_temp + + # 2/a Find imin (upper bound) + if vmin == np.inf: + imin_ = 0 + else: + ia = 0 + while ia < nA: + if uc_nextgrid[ia] < vmin: + break + ia += 1 + imin_ = ia + + # 2/b Find imax (lower bound) + if vmax == -np.inf: + imax_ = nA + else: + ia = nA + while ia > 0: + if uc_nextgrid[ia] > vmax: + break + ia -= 1 + imax_ = ia + + imin[:] = imin_ + imax[:] = imax_ + + +@njit +def upper_envelope(imin, imax, W, a_nextgrid, c_nextgrid, a_grid, y_grid, rpost, *args): + """ + Interpolate consumption and value function to exogenous grid. Brute force but safe. + Parameters + ---------- + W : array(Ns, Nz, Na), status-specific end-of-period value function (on tomorrow's grid) + a_nextgrid : array(Ns, Nz, Na), endogenous asset grid (today's grid) + c_nextgrid : array(Ns, Nz, Na), consumption on endogenous grid (today's grid) + a_grid : array(Na), exogenous asset grid (tomorrow's grid) + y_grid : array(Ns, Nz), labor income + rpost : float, ex-post interest rate + args : (eis, vphi, chi) arguments for utility function + Returns + ------- + V : array(Ns, Nz, Na), status-specific value function on exogenous grid + c : array(Ns, Nz, Na), consumption on exogenous grid + """ + + # 0. initialize + Ns, Nz, Na = W.shape + c = np.zeros_like(W) + V = -np.inf * np.ones_like(W) + + # outer loop could run in parallel + for iw in range(Ns): + for iz in range(Nz): + ycur = y_grid[iw, iz] + imaxcur = imax[iw, iz] + imincur = imin[iw, iz] + + # 1. unconstrained case: loop through a_grid, find bracketing endogenous gridpoints and interpolate. + # in concave region: exploit monotonicity and don't look for extra solutions + for ia in range(Na): + acur = a_grid[ia] + + # Region 1: below non-concave: exploit monotonicity + if (ia <= imaxcur) | (ia >= imincur): + iap = 0 + ap_low = a_nextgrid[iw, iz, iap] + ap_high = a_nextgrid[iw, iz, iap + 1] + while iap < Na - 2: # can this go up all the way? + if ap_high >= acur: + break + iap += 1 + ap_low = ap_high + ap_high = a_nextgrid[iw, iz, iap + 1] + # found bracket, interpolate value function and consumption + w_low, w_high = W[iw, iz, iap], W[iw, iz, iap + 1] + c_low, c_high = c_nextgrid[iw, iz, iap], c_nextgrid[iw, iz, iap + 1] + w_slope = (w_high - w_low) / (ap_high - ap_low) + c_slope = (c_high - c_low) / (ap_high - ap_low) + c_guess = c_low + c_slope * (acur - ap_low) + w_guess = w_low + w_slope * (acur - ap_low) + V[iw, iz, ia] = util(c_guess, iw, *args) + w_guess + c[iw, iz, ia] = c_guess + + # Region 2: non-concave region + else: + # try out all segments of endogenous grid + for iap in range(Na - 1): + # does this endogenous segment bracket ia? + ap_low, ap_high = a_nextgrid[iw, iz, iap], a_nextgrid[iw, iz, iap + 1] + interp = (ap_low <= acur <= ap_high) or (ap_low >= acur >= ap_high) + + # does it need to be extrapolated above the endogenous grid? + # if needed to be extrapolated below, we would be in constrained case + extrap_above = (iap == Na - 2) and (acur > a_nextgrid[iw, iz, Na - 1]) + + if interp or extrap_above: + # interpolation slopes + w_low, w_high = W[iw, iz, iap], W[iw, iz, iap + 1] + c_low, c_high = c_nextgrid[iw, iz, iap], c_nextgrid[iw, iz, iap + 1] + w_slope = (w_high - w_low) / (ap_high - ap_low) + c_slope = (c_high - c_low) / (ap_high - ap_low) + + # implied guess + c_guess = c_low + c_slope * (acur - ap_low) + w_guess = w_low + w_slope * (acur - ap_low) + + # value + v_guess = util(c_guess, iw, *args) + w_guess + + # select best value for this segment + if v_guess > V[iw, iz, ia]: + V[iw, iz, ia] = v_guess + c[iw, iz, ia] = c_guess + + # 2. constrained case: remember that we have the inverse asset policy a(a') + ia = 0 + while ia < Na and a_grid[ia] <= a_nextgrid[iw, iz, 0]: + c[iw, iz, ia] = (1 + rpost) * a_grid[ia] + ycur + V[iw, iz, ia] = util(c[iw, iz, ia], iw, *args) + W[iw, iz, 0] + ia += 1 + + return V, c + + +'''Simple blocks''' + + +def zgrids_couple(zm, zf): + """Combine individual z_grid and b_grid as needed for couple level.""" + zeroes = np.zeros_like(zm) + + # all combinations + z_all = (zm[:, np.newaxis] + zf[np.newaxis, :]).ravel() + z_men = (zm[:, np.newaxis] + zeroes[np.newaxis, :]).ravel() + z_wom = (zeroes[:, np.newaxis] + zf[np.newaxis, :]).ravel() + return z_all, z_men, z_wom + + +@simple +def income_state_vars(mean_m, rho_m, sd_m, mean_f, rho_f, sd_f, nZ, uirate, uicap): + # income husband + z_grid_m, z_pdf_m, z_markov_m = markov_rouwenhorst(rho=rho_m, sigma=sd_m, N=nZ) + z_grid_m *= mean_m + b_grid_m = uirate * z_grid_m + b_grid_m[b_grid_m > uicap] = uicap + + # income wife + z_grid_f, z_pdf_f, z_markov_f = markov_rouwenhorst(rho=rho_f, sigma=sd_f, N=nZ) + z_grid_f *= mean_f + b_grid_f = uirate * z_grid_f + b_grid_f[b_grid_f > uicap] = uicap + + # household income + z_all, z_man, z_wom = zgrids_couple(z_grid_m, z_grid_f) + b_all, b_man, b_wom = zgrids_couple(b_grid_m, b_grid_f) + Pi_z = np.kron(z_markov_m, z_markov_f) + + return z_grid_m, z_grid_f, b_grid_m, b_grid_f, z_all, z_man, z_wom, b_all, b_man, b_wom, Pi_z + + +@simple +def employment_state_vars(lamM, lamB, lamL): + choice_set = [[0, 3, 12, 15], [1, 3, 13, 15], [2, 3, 14, 15], [4, 7, 12, 15], [5, 7, 13, 15], + [6, 7, 14, 15], [8, 11, 12, 15], [9, 11, 13, 15], [10, 11, 14, 15]] + lam_grid = np.array([np.sqrt(2*lamM**2), np.sqrt(lamM**2 + lamB**2), np.sqrt(lamM**2 + lamL**2), + np.sqrt(lamB**2 + lamM**2), np.sqrt(2*lamB**2), np.sqrt(lamB**2 + lamL**2), + np.sqrt(lamL**2 + lamM**2), np.sqrt(lamL**2 + lamB**2), np.sqrt(2*lamL**2)]) + return choice_set, lam_grid + + +@simple +def asset_state_vars(amin, amax, nA): + a_grid = agrid(amin=amin, amax=amax, n=nA) + return a_grid + + +@solved(unknowns={'fU_m': 0.25, 'fN_m': 0.1, 's_m': 0.025}, + targets=['fU_m_res', 'fN_m_res', 's_m_res'], + solver='broyden_custom') +def flows_m(Y, fU_m, fN_m, s_m, fU_eps_m, fN_eps_m, s_eps_m): + fU_m_res = fU_m.ss * (Y / Y.ss) ** fU_eps_m - fU_m + fN_m_res = fN_m.ss * (Y / Y.ss) ** fN_eps_m - fN_m + s_m_res = s_m.ss * (Y / Y.ss) ** s_eps_m - s_m + return fU_m_res, fN_m_res, s_m_res + + +@solved(unknowns={'fU_f': 0.25, 'fN_f': 0.1, 's_f': 0.025}, + targets=['fU_f_res', 'fN_f_res', 's_f_res'], + solver='broyden_custom') +def flows_f(Y, fU_f, fN_f, s_f, fU_eps_f, fN_eps_f, s_eps_f): + fU_f_res = fU_f.ss * (Y / Y.ss) ** fU_eps_f - fU_f + fN_f_res = fN_f.ss * (Y / Y.ss) ** fN_eps_f - fN_f + s_f_res = s_f.ss * (Y / Y.ss) ** s_eps_f - s_f + return fU_f_res, fN_f_res, s_f_res + + +'''Put it together''' + +household.add_hetinput(labor_income, verbose=False) +hh = create_model([income_state_vars, employment_state_vars, asset_state_vars, flows_m, flows_f, household], + name='CoupleHH') diff --git a/tests/dcblock_test.py b/tests/dcblock_test.py index 40cf638..4e59229 100644 --- a/tests/dcblock_test.py +++ b/tests/dcblock_test.py @@ -1,342 +1,17 @@ """This model is to test DisContBlock""" import numpy as np -from numba import guvectorize, njit -import dfols - -from sequence_jacobian import simple, agrid, markov_rouwenhorst, create_model, solved -from sequence_jacobian.blocks.discont_block import discont -from sequence_jacobian.utilities.misc import choice_prob, logsum -from sequence_jacobian.steady_state.classes import SteadyStateDict - - -'''Core HA block''' - - -def household_init(a_grid, z_grid, b_grid, atw, transfer, rpost, eis, vphi, chi): - _, income = labor_income(z_grid, b_grid, atw, transfer, 0, 1, 1, 1) - coh = income[:, :, np.newaxis] + (1 + rpost) * a_grid[np.newaxis, np.newaxis, :] - c_guess = 0.3 * coh - V, Va = np.empty_like(coh), np.empty_like(coh) - for iw in range(4): - V[iw, ...] = util(c_guess[iw, ...], iw, eis, vphi, chi) - V = V / 0.1 - - # get Va by finite difference - Va[:, :, 1:-1] = (V[:, :, 2:] - V[:, :, :-2]) / (a_grid[2:] - a_grid[:-2]) - Va[:, :, 0] = (V[:, :, 1] - V[:, :, 0]) / (a_grid[1] - a_grid[0]) - Va[:, :, -1] = (V[:, :, -1] - V[:, :, -2]) / (a_grid[-1] - a_grid[-2]) - - return Va, V - - -@njit(fastmath=True) -def util(c, iw, eis, vphi, chi): - """Utility function.""" - # 1. utility from consumption. - if eis == 1: - u = np.log(c) - else: - u = c ** (1 - 1 / eis) / (1 - 1 / eis) - - # 2. disutility from work and search - if iw == 0: - u = u - vphi # E - elif (iw == 1) | (iw == 2): - u = u - chi # Ub, U - - return u - - -@discont(exogenous=('Pi_s', 'Pi_z'), policy='a', disc_policy='P', backward=('V', 'Va'), backward_init=household_init) -def household(V_p, Va_p, Pi_z_p, Pi_s_p, choice_set, a_grid, y_grid, z_grid, b_grid, lam_grid, eis, beta, rpost, - vphi, chi): - """ - Backward step function EGM with upper envelope. - - Dimensions: 0: labor market status, 1: productivity, 2: assets. - Status: 0: E, 1: Ub, 2: U, 3: O - State: 0: M, 1: B, 2: L - - Parameters - ---------- - V_p : array(Ns, Nz, Na), status-specific value function tomorrow - Va_p : array(Ns, Nz, Na), partial of status-specific value function tomorrow - Pi_s_p : array(Ns, Nx), Markov matrix for labor market shocks - Pi_z_p : array(Nz, Nz), (non-status-specific Markov) matrix for productivity - choice_set : list(Nz), discrete choices available in each state X - a_grid : array(Na), exogenous asset grid - y_grid : array(Ns, Nz), exogenous labor income grid - z_grid : array(Nz), productivity of employed (need for GE) - b_grid : array(Nz), productivity of unemployed (need for GE) - lam_grid : array(Nx), scale of taste shocks, specific to interim state - eis : float, EIS - beta : float, discount factor - rpost : float, ex-post interest rate - vphi : float, disutility of work - chi : float, disutility of search - - Returns - ------- - V : array(Ns, Nz, Na), status-specific value function today - Va : array(Ns, Nz, Na), partial of status-specific value function today - P : array(Nx, Ns, Nz, Na), probability of choosing status s in state x - c : array(Ns, Nz, Na), status-specific consumption policy today - a : array(Ns, Nz, Na), status-specific asset policy today - ze : array(Ns, Nz, Na), effective labor (average productivity if employed) - ui : array(Ns, Nz, Na), UI benefit claims (average productivity if unemployed) - """ - # shapes - Ns, Nz, Na = V_p.shape - Nx = Pi_s_p.shape[1] - - # PART 1: update value and policy functions - # a. discrete choice I expect to make tomorrow - V_p_X = np.empty((Nx, Nz, Na)) - Va_p_X = np.empty((Nx, Nz, Na)) - for ix in range(Nx): - V_p_ix = np.take(V_p, indices=choice_set[ix], axis=0) - Va_p_ix = np.take(Va_p, indices=choice_set[ix], axis=0) - P_p_ix = choice_prob(V_p_ix, lam_grid[ix]) - V_p_X[ix, ...] = logsum(V_p_ix, lam_grid[ix]) - Va_p_X[ix, ...] = np.sum(P_p_ix * Va_p_ix, axis=0) - - # b. compute expectation wrt labor market shock - V_p1 = np.einsum('ij,jkl->ikl', Pi_s_p, V_p_X) - Va_p1 = np.einsum('ij,jkl->ikl', Pi_s_p, Va_p_X) - - # b. compute expectation wrt productivity - V_p2 = np.einsum('ij,kjl->kil', Pi_z_p, V_p1) - Va_p2 = np.einsum('ij,kjl->kil', Pi_z_p, Va_p1) - - # d. consumption today on tomorrow's grid and endogenous asset grid today - W = beta * V_p2 - uc_nextgrid = beta * Va_p2 - c_nextgrid = uc_nextgrid ** (-eis) - a_nextgrid = (c_nextgrid + a_grid[np.newaxis, np.newaxis, :] - y_grid[:, :, np.newaxis]) / (1 + rpost) - - # e. upper envelope - imin, imax = nonconcave(uc_nextgrid) # bounds of non-concave region - V, c = upper_envelope(imin, imax, W, a_nextgrid, c_nextgrid, a_grid, y_grid, rpost, eis, vphi, chi) - - # f. update Va - uc = c ** (-1 / eis) - Va = (1 + rpost) * uc - - # PART 2: things we need for GE - - # 2/a. asset policy - a = (1 + rpost) * a_grid[np.newaxis, np.newaxis, :] + y_grid[:, :, np.newaxis] - c - - # 2/b. choice probabilities (don't need jacobian) - P = np.zeros((Nx, Ns, Nz, Na)) - for ix in range(Nx): - V_ix = np.take(V, indices=choice_set[ix], axis=0) - P[ix, choice_set[ix], ...] = choice_prob(V_ix, lam_grid[ix]) - - # 2/c. average productivity of employed - ze = np.zeros_like(a) - ze[0, ...] = z_grid[:, np.newaxis] - - # 2/d. UI claims - ui = np.zeros_like(a) - ui[1, ...] = b_grid[:, np.newaxis] - - return V, Va, a, c, P, ze, ui - - -def labor_income(z_grid, b_grid, atw, transfer, expiry, fU, fN, s): - # 1. income - yE = atw * z_grid + transfer - yUb = atw * b_grid + transfer - yN = np.zeros_like(yE) + transfer - y_grid = np.vstack((yE, yUb, yN, yN)) - - # 2. transition matrix for labor market status - Pi_s = np.array([[1 - s, s, 0], [fU, (1 - fU) * (1 - expiry), (1 - fU) * expiry], [fU, 0, 1 - fU], [fN, 0, 1 - fN]]) - - return Pi_s, y_grid - - -household.add_hetinput(labor_income, verbose=False) - - -"""Supporting functions for HA block""" - - -@guvectorize(['void(float64[:], uint32[:], uint32[:])'], '(nA) -> (),()', nopython=True) -def nonconcave(uc_nextgrid, imin, imax): - """Obtain bounds for non-concave region.""" - nA = uc_nextgrid.shape[-1] - vmin = np.inf - vmax = -np.inf - # step 1: find vmin & vmax - for ia in range(nA - 1): - if uc_nextgrid[ia + 1] > uc_nextgrid[ia]: - vmin_temp = uc_nextgrid[ia] - vmax_temp = uc_nextgrid[ia + 1] - if vmin_temp < vmin: - vmin = vmin_temp - if vmax_temp > vmax: - vmax = vmax_temp - - # 2/a Find imin (upper bound) - if vmin == np.inf: - imin_ = 0 - else: - ia = 0 - while ia < nA: - if uc_nextgrid[ia] < vmin: - break - ia += 1 - imin_ = ia - - # 2/b Find imax (lower bound) - if vmax == -np.inf: - imax_ = nA - else: - ia = nA - while ia > 0: - if uc_nextgrid[ia] > vmax: - break - ia -= 1 - imax_ = ia - - imin[:] = imin_ - imax[:] = imax_ - - -@njit -def upper_envelope(imin, imax, W, a_nextgrid, c_nextgrid, a_grid, y_grid, rpost, *args): - """ - Interpolate consumption and value function to exogenous grid. Brute force but safe. - Parameters - ---------- - W : array(Ns, Nz, Na), status-specific end-of-period value function (on tomorrow's grid) - a_nextgrid : array(Ns, Nz, Na), endogenous asset grid (today's grid) - c_nextgrid : array(Ns, Nz, Na), consumption on endogenous grid (today's grid) - a_grid : array(Na), exogenous asset grid (tomorrow's grid) - y_grid : array(Ns, Nz), labor income - rpost : float, ex-post interest rate - args : (eis, vphi, chi) arguments for utility function - Returns - ------- - V : array(Ns, Nz, Na), status-specific value function on exogenous grid - c : array(Ns, Nz, Na), consumption on exogenous grid - """ - - # 0. initialize - Ns, Nz, Na = W.shape - c = np.zeros_like(W) - V = -np.inf * np.ones_like(W) - - # outer loop could run in parallel - for iw in range(Ns): - for iz in range(Nz): - ycur = y_grid[iw, iz] - imaxcur = imax[iw, iz] - imincur = imin[iw, iz] - - # 1. unconstrained case: loop through a_grid, find bracketing endogenous gridpoints and interpolate. - # in concave region: exploit monotonicity and don't look for extra solutions - for ia in range(Na): - acur = a_grid[ia] - - # Region 1: below non-concave: exploit monotonicity - if (ia <= imaxcur) | (ia >= imincur): - iap = 0 - ap_low = a_nextgrid[iw, iz, iap] - ap_high = a_nextgrid[iw, iz, iap + 1] - while iap < Na - 2: # can this go up all the way? - if ap_high >= acur: - break - iap += 1 - ap_low = ap_high - ap_high = a_nextgrid[iw, iz, iap + 1] - # found bracket, interpolate value function and consumption - w_low, w_high = W[iw, iz, iap], W[iw, iz, iap + 1] - c_low, c_high = c_nextgrid[iw, iz, iap], c_nextgrid[iw, iz, iap + 1] - w_slope = (w_high - w_low) / (ap_high - ap_low) - c_slope = (c_high - c_low) / (ap_high - ap_low) - c_guess = c_low + c_slope * (acur - ap_low) - w_guess = w_low + w_slope * (acur - ap_low) - V[iw, iz, ia] = util(c_guess, iw, *args) + w_guess - c[iw, iz, ia] = c_guess - - # Region 2: non-concave region - else: - # try out all segments of endogenous grid - for iap in range(Na - 1): - # does this endogenous segment bracket ia? - ap_low, ap_high = a_nextgrid[iw, iz, iap], a_nextgrid[iw, iz, iap + 1] - interp = (ap_low <= acur <= ap_high) or (ap_low >= acur >= ap_high) - - # does it need to be extrapolated above the endogenous grid? - # if needed to be extrapolated below, we would be in constrained case - extrap_above = (iap == Na - 2) and (acur > a_nextgrid[iw, iz, Na - 1]) - - if interp or extrap_above: - # interpolation slopes - w_low, w_high = W[iw, iz, iap], W[iw, iz, iap + 1] - c_low, c_high = c_nextgrid[iw, iz, iap], c_nextgrid[iw, iz, iap + 1] - w_slope = (w_high - w_low) / (ap_high - ap_low) - c_slope = (c_high - c_low) / (ap_high - ap_low) - - # implied guess - c_guess = c_low + c_slope * (acur - ap_low) - w_guess = w_low + w_slope * (acur - ap_low) - - # value - v_guess = util(c_guess, iw, *args) + w_guess - - # select best value for this segment - if v_guess > V[iw, iz, ia]: - V[iw, iz, ia] = v_guess - c[iw, iz, ia] = c_guess - - # 2. constrained case: remember that we have the inverse asset policy a(a') - ia = 0 - while ia < Na and a_grid[ia] <= a_nextgrid[iw, iz, 0]: - c[iw, iz, ia] = (1 + rpost) * a_grid[ia] + ycur - V[iw, iz, ia] = util(c[iw, iz, ia], iw, *args) + W[iw, iz, 0] - ia += 1 - - return V, c - - -'''Simple blocks''' - -@simple -def income_state_vars(mean_z, rho_z, sd_z, nZ, uirate, uicap): - # productivity - z_grid, pi_z, Pi_z = markov_rouwenhorst(rho=rho_z, sigma=sd_z, N=nZ) - z_grid *= mean_z - - # unemployment benefits - b_grid = uirate * z_grid - b_grid[b_grid > uicap] = uicap - return z_grid, b_grid, pi_z, Pi_z - - -@simple -def employment_state_vars(lamM, lamB, lamL): - choice_set = [[0, 3], [1, 3], [2, 3]] - lam_grid = np.array([lamM, lamB, lamL]) - return choice_set, lam_grid - - -@simple -def asset_state_vars(amin, amax, nA): - a_grid = agrid(amin=amin, amax=amax, n=nA) - return a_grid +from sequence_jacobian import simple, create_model, solved +from single_endo import hh as hh_single +from couple_endo import hh as hh_couple '''Rest of the model''' @simple -def firm(Z, K, L, mc, alpha, delta0, delta1, psi): +def firm(Z, K, L, mc, tax, alpha, delta0, delta1, psi): Y = Z * K(-1) ** alpha * L ** (1 - alpha) w = (1 - alpha) * mc * Y / L u = (alpha / delta0 / delta1 * mc * Y / K(-1)) ** (1 / delta1) @@ -344,7 +19,8 @@ def firm(Z, K, L, mc, alpha, delta0, delta1, psi): Q = 1 + psi * (K / K(-1) - 1) I = K - (1 - delta) * K(-1) + psi / 2 * (K / K(-1) - 1) ** 2 * K(-1) transfer = Y - w * L - I - return Y, w, u, delta, Q, I, transfer + atw = (1 - tax) * w + return Y, w, u, delta, Q, I, transfer, atw @simple @@ -368,27 +44,13 @@ def valuation(rpost, mc, Y, K, Q, delta, psi, alpha): @solved(unknowns={'B': [0.0, 10.0]}, targets=['budget'], solver='brentq') -def fiscal1(B, tax, w, rpost, G, Ze, Ui): +def fiscal(B, tax, w, rpost, G, Ze, Ui): budget = (1 + rpost) * B + G + (1 - tax) * w * Ui - tax * w * Ze - B # tax_rule = tax - tax.ss - phi * (B(-1) - B.ss) / Y.ss tax_rule = B - B.ss return budget, tax_rule -@simple -def fiscal2(w, tax): - atw = (1 - tax) * w - return atw - - -@solved(unknowns={'fU': 0.25, 'fN': 0.1, 's': 0.025}, targets=['fU_res', 'fN_res', 's_res'], solver='broyden_custom') -def flows(Y, fU, fN, s, fU_eps, fN_eps, s_eps): - fU_res = fU.ss * (Y / Y.ss) ** fU_eps - fU - fN_res = fN.ss * (Y / Y.ss) ** fN_eps - fN - s_res = s.ss * (Y / Y.ss) ** s_eps - s - return fU_res, fN_res, s_res - - @simple def mkt_clearing(A, B, Y, C, I, G, L, Ze): asset_mkt = A - B @@ -398,31 +60,27 @@ def mkt_clearing(A, B, Y, C, I, G, L, Ze): @simple -def dividends(transfer): - transfer_sm = transfer - transfer_sw = transfer - return transfer_sm, transfer_sw +def dividends(transfer, pop_sm, pop_sw, pop_mc, illiq_sm, illiq_sw, illiq_mc): + transfer_sm = illiq_sm * transfer / (pop_sm * illiq_sm + pop_sw * illiq_sw + pop_mc * illiq_mc) + transfer_sw = illiq_sw * transfer / (pop_sm * illiq_sm + pop_sw * illiq_sw + pop_mc * illiq_mc) + transfer_mc = illiq_mc * transfer / (pop_sm * illiq_sm + pop_sw * illiq_sw + pop_mc * illiq_mc) + return transfer_sm, transfer_sw, transfer_mc @simple -def aggregate(A_sm, C_sm, Ze_sm, Ui_sm, A_sw, C_sw, Ze_sw, Ui_sw): - A = (A_sm + A_sw) / 2 - C = (C_sm + C_sw) / 2 - Ze = (Ze_sm + Ze_sw) / 2 - Ui = (Ui_sm + Ui_sw) / 2 +def aggregate(A_sm, C_sm, Ze_sm, Ui_sm, pop_sm, A_sw, C_sw, Ze_sw, Ui_sw, pop_sw, A_mc, C_mc, Ze_mc, Ui_mc, pop_mc): + A = pop_sm * A_sm + pop_sw * A_sw + pop_mc * A_mc + C = pop_sm * C_sm + pop_sw * C_sw + pop_mc * C_mc + Ze = pop_sm * Ze_sm + pop_sw * Ze_sw + pop_mc * Ze_mc + Ui = pop_sm * Ui_sm + pop_sw * Ui_sw + pop_mc * Ui_mc return A, C, Ze, Ui -@simple -def helper1(A, tax, w, Ze, rpost, Ui): - # after hh block - B = A - G = tax * w * Ze - rpost * B - (1 - tax) * w * Ui - return B, G +'''Steady-state helpers''' @simple -def helper2(eps, rpost, Y, L, alpha, delta0): +def firm_ss(eps, rpost, Y, L, alpha, delta0): # uses u=1 mc = (eps - 1) / eps K = mc * Y * alpha / (rpost + delta0) @@ -432,44 +90,68 @@ def helper2(eps, rpost, Y, L, alpha, delta0): return mc, K, delta1, Z, w -'''Try this''' +@simple +def fiscal_ss(A, tax, w, Ze, rpost, Ui): + # after hh block + B = A + G = tax * w * Ze - rpost * B - (1 - tax) * w * Ui + return B, G + + +'''Calibration''' -cali_sm = {'beta': 0.9782, 'vphi': 0.8079, 'chi': 0.5690, 'fU': 0.25, 'fN': 0.1174, 's': 0.0218, - 'mean_z': 1.0, 'rho_z': 0.98, 'sd_z': 0.943*0.82, +cali_sm = {'beta': 0.9833, 'vphi': 0.7625, 'chi': 0.5585, 'fU': 0.2499, 'fN': 0.1148, 's': 0.0203, + 'mean_z': 1.0, 'rho_z': 0.98, 'sd_z': 0.943*0.82, 'illiq': 0.25, 'pop': 0.29, 'fU_eps': 10.69, 'fN_eps': 5.57, 's_eps': -11.17} -cali_sw = {'beta': 0.9830, 'vphi': 0.9909, 'chi': 0.4982, 'fU': 0.22, 'fN': 0.1099, 's': 0.0132, - 'mean_z': 0.8, 'rho_z': 0.98, 'sd_z': 0.86*0.82, +cali_sw = {'beta': 0.9830, 'vphi': 0.9940, 'chi': 0.4958, 'fU': 0.2207, 'fN': 0.1098, 's': 0.0132, + 'mean_z': 0.8, 'rho_z': 0.98, 'sd_z': 0.86*0.82, 'illiq': 0.15, 'pop': 0.29, 'fU_eps': 8.72, 'fN_eps': 3.55, 's_eps': -6.55} +cali_mc = {'beta': 0.9882, 'illiq': 0.6, 'pop': 0.42, 'rho': 0.042, + 'vphi_m': 0.1545, 'chi_m': 0.2119, 'fU_m': 0.3013, 'fN_m': 0.1840, 's_m': 0.0107, + 'vphi_f': 0.2605, 'chi_f': 0.2477, 'fU_f': 0.2519, 'fN_f': 0.1691, 's_f': 0.0103, + 'mean_m': 1.0, 'rho_m': 0.98, 'sd_m': 0.943*0.82, + 'mean_f': 0.8, 'rho_f': 0.98, 'sd_f': 0.86*0.82, + 'fU_eps_m': 10.37, 'fN_eps_m': 9.74, 's_eps_m': -13.60, + 'fU_eps_f': 8.40, 'fN_eps_f': 2.30, 's_eps_f': -7.87} + + +'''Remap''' + + +to_map_single = ['beta', 'vphi', 'chi', 'fU', 'fN', 's', 'mean_z', 'rho_z', 'sd_z', 'transfer', + 'fU_eps', 'fN_eps', 's_eps', *hh_single.outputs] +hh_sm = hh_single.remap({k: k + '_sm' for k in to_map_single}).rename('SingleMen') +hh_sw = hh_single.remap({k: k + '_sw' for k in to_map_single}).rename('SingleWomen') + +to_map_couple = ['beta', 'transfer', 'rho', + 'vphi_m', 'chi_m', 'fU_m', 'fN_m', 's_m', 'mean_m', 'rho_m', 'sd_m', 'fU_eps_m', 'fN_eps_m', 's_eps_m', + 'vphi_f', 'chi_f', 'fU_f', 'fN_f', 's_f', 'mean_f', 'rho_f', 'sd_f', 'fU_eps_f', 'fN_eps_f', 's_eps_f', + *hh_couple.outputs] +hh_mc = hh_couple.remap({k: k + '_mc' for k in to_map_couple}).rename('Couples') + cali_sm = {k + '_sm': v for k, v in cali_sm.items()} cali_sw = {k + '_sw': v for k, v in cali_sw.items()} +cali_mc = {k + '_mc': v for k, v in cali_mc.items()} -calibration = {**cali_sm, **cali_sw, - 'eis': 1.0, 'uicap': 0.66, 'uirate': 0.5, 'expiry': 1/6, 'eps': 10.0, 'tax': 0.3, - 'amin': 0.0, 'amax': 500, 'nA': 100, 'lamM': 0.01, 'lamB': 0.01, 'lamL': 0.04, 'nZ': 7, - 'kappa': 0.03, 'phi_pi': 1.25, 'rstar': 0.002, 'pi': 0.0, 'alpha': 0.2, 'psi': 30, 'delta0': 0.0083} - -hh = create_model([income_state_vars, employment_state_vars, asset_state_vars, flows, household], name='Single') +'''Solve ss''' -to_map = ['beta', 'vphi', 'chi', 'fU', 'fN', 's', 'mean_z', 'rho_z', 'sd_z', 'transfer', 'fU_eps', 'fN_eps', 's_eps', - *hh.outputs] -hh_sm = hh.remap({k: k + '_sm' for k in to_map}).rename('SingleMen') -hh_sw = hh.remap({k: k + '_sw' for k in to_map}).rename('SingleWomen') +hank = create_model([hh_sm, hh_sw, hh_mc, aggregate, dividends, firm, monetary, valuation, nkpc, fiscal, mkt_clearing], + name='HANK') -hank = create_model([hh_sm, hh_sw, aggregate, dividends, firm, monetary, valuation, nkpc, fiscal1, fiscal2, - mkt_clearing], name='HANK') +calibration = {**cali_sm, **cali_sw, **cali_mc, + 'eis': 1.0, 'uicap': 0.66, 'uirate': 0.5, 'expiry': 1/6, 'eps': 10.0, 'tax': 0.3, + 'amin': 0.0, 'amax': 500, 'nA': 100, 'lamM': 0.01, 'lamB': 0.01, 'lamL': 0.04, 'nZ': 7, + 'kappa': 0.03, 'phi_pi': 1.25, 'rstar': 0.002, 'pi': 0.0, 'alpha': 0.2, 'psi': 30, 'delta0': 0.0083} -ss = hank.solve_steady_state(calibration, solver='brentq', dissolve=[fiscal1, flows], - unknowns={'L': (0.7, 0.72), +ss = hank.solve_steady_state(calibration, solver='toms748', dissolve=[fiscal], ttol=1E-6, + unknowns={'L': (1.06, 1.12), 'Z': 0.63, 'mc': 0.9, 'G': 0.2, 'delta1': 1.2, 'B': 3.0, 'K': 17.0}, targets={'labor_mkt': 0.0, 'Y': 1.0, 'nkpc_res': 0.0, 'budget': 0.0, 'asset_mkt': 0.0, 'val': 0.0, 'u': 1.0}, - helper_blocks=[helper1, helper2], + helper_blocks=[firm_ss, fiscal_ss], helper_targets=['asset_mkt', 'budget', 'val', 'nkpc_res', 'Y', 'u']) -out = hh_sm.steady_state(ss) - - diff --git a/tests/single_endo.py b/tests/single_endo.py new file mode 100644 index 0000000..baa3aab --- /dev/null +++ b/tests/single_endo.py @@ -0,0 +1,341 @@ +"""Single household model with frictional labor supply.""" + +import numpy as np +from numba import guvectorize, njit + +from sequence_jacobian import simple, agrid, markov_rouwenhorst, create_model, solved +from sequence_jacobian.blocks.discont_block import discont +from sequence_jacobian.utilities.misc import choice_prob, logsum + + +'''Core HA block''' + + +def household_init(a_grid, z_grid, b_grid, atw, transfer, rpost, eis, vphi, chi): + _, income = labor_income(z_grid, b_grid, atw, transfer, 0, 1, 1, 1) + coh = income[:, :, np.newaxis] + (1 + rpost) * a_grid[np.newaxis, np.newaxis, :] + c_guess = 0.3 * coh + V, Va = np.empty_like(coh), np.empty_like(coh) + for iw in range(4): + V[iw, ...] = util(c_guess[iw, ...], iw, eis, vphi, chi) + V = V / 0.1 + + # get Va by finite difference + Va[:, :, 1:-1] = (V[:, :, 2:] - V[:, :, :-2]) / (a_grid[2:] - a_grid[:-2]) + Va[:, :, 0] = (V[:, :, 1] - V[:, :, 0]) / (a_grid[1] - a_grid[0]) + Va[:, :, -1] = (V[:, :, -1] - V[:, :, -2]) / (a_grid[-1] - a_grid[-2]) + + return Va, V + + +@njit(fastmath=True) +def util(c, iw, eis, vphi, chi): + """Utility function.""" + # 1. utility from consumption. + if eis == 1: + u = np.log(c) + else: + u = c ** (1 - 1 / eis) / (1 - 1 / eis) + + # 2. disutility from work and search + if iw == 0: + u = u - vphi # E + elif (iw == 1) | (iw == 2): + u = u - chi # Ub, U + + return u + + +@discont(exogenous=('Pi_s', 'Pi_z'), policy='a', disc_policy='P', backward=('V', 'Va'), backward_init=household_init) +def household(V_p, Va_p, Pi_z_p, Pi_s_p, choice_set, a_grid, y_grid, z_grid, b_grid, lam_grid, eis, beta, rpost, + vphi, chi): + """ + Backward step function EGM with upper envelope. + + Dimensions: 0: labor market status, 1: productivity, 2: assets. + Status: 0: E, 1: Ub, 2: U, 3: O + State: 0: M, 1: B, 2: L + + Parameters + ---------- + V_p : array(Ns, Nz, Na), status-specific value function tomorrow + Va_p : array(Ns, Nz, Na), partial of status-specific value function tomorrow + Pi_s_p : array(Ns, Nx), Markov matrix for labor market shocks + Pi_z_p : array(Nz, Nz), (non-status-specific Markov) matrix for productivity + choice_set : list(Nz), discrete choices available in each state X + a_grid : array(Na), exogenous asset grid + y_grid : array(Ns, Nz), exogenous labor income grid + z_grid : array(Nz), productivity of employed (need for GE) + b_grid : array(Nz), productivity of unemployed (need for GE) + lam_grid : array(Nx), scale of taste shocks, specific to interim state + eis : float, EIS + beta : float, discount factor + rpost : float, ex-post interest rate + vphi : float, disutility of work + chi : float, disutility of search + + Returns + ------- + V : array(Ns, Nz, Na), status-specific value function today + Va : array(Ns, Nz, Na), partial of status-specific value function today + P : array(Nx, Ns, Nz, Na), probability of choosing status s in state x + c : array(Ns, Nz, Na), status-specific consumption policy today + a : array(Ns, Nz, Na), status-specific asset policy today + ze : array(Ns, Nz, Na), effective labor (average productivity if employed) + ui : array(Ns, Nz, Na), UI benefit claims (average productivity if unemployed) + """ + # shapes + Ns, Nz, Na = V_p.shape + Nx = Pi_s_p.shape[1] + + # PART 1: update value and policy functions + # a. discrete choice I expect to make tomorrow + V_p_X = np.empty((Nx, Nz, Na)) + Va_p_X = np.empty((Nx, Nz, Na)) + for ix in range(Nx): + V_p_ix = np.take(V_p, indices=choice_set[ix], axis=0) + Va_p_ix = np.take(Va_p, indices=choice_set[ix], axis=0) + P_p_ix = choice_prob(V_p_ix, lam_grid[ix]) + V_p_X[ix, ...] = logsum(V_p_ix, lam_grid[ix]) + Va_p_X[ix, ...] = np.sum(P_p_ix * Va_p_ix, axis=0) + + # b. compute expectation wrt labor market shock + V_p1 = np.einsum('ij,jkl->ikl', Pi_s_p, V_p_X) + Va_p1 = np.einsum('ij,jkl->ikl', Pi_s_p, Va_p_X) + + # b. compute expectation wrt productivity + V_p2 = np.einsum('ij,kjl->kil', Pi_z_p, V_p1) + Va_p2 = np.einsum('ij,kjl->kil', Pi_z_p, Va_p1) + + # d. consumption today on tomorrow's grid and endogenous asset grid today + W = beta * V_p2 + uc_nextgrid = beta * Va_p2 + c_nextgrid = uc_nextgrid ** (-eis) + a_nextgrid = (c_nextgrid + a_grid[np.newaxis, np.newaxis, :] - y_grid[:, :, np.newaxis]) / (1 + rpost) + + # e. upper envelope + imin, imax = nonconcave(uc_nextgrid) # bounds of non-concave region + V, c = upper_envelope(imin, imax, W, a_nextgrid, c_nextgrid, a_grid, y_grid, rpost, eis, vphi, chi) + + # f. update Va + uc = c ** (-1 / eis) + Va = (1 + rpost) * uc + + # PART 2: things we need for GE + + # 2/a. asset policy + a = (1 + rpost) * a_grid[np.newaxis, np.newaxis, :] + y_grid[:, :, np.newaxis] - c + + # 2/b. choice probabilities (don't need jacobian) + P = np.zeros((Nx, Ns, Nz, Na)) + for ix in range(Nx): + V_ix = np.take(V, indices=choice_set[ix], axis=0) + P[ix, choice_set[ix], ...] = choice_prob(V_ix, lam_grid[ix]) + + # 2/c. average productivity of employed + ze = np.zeros_like(a) + ze[0, ...] = z_grid[:, np.newaxis] + + # 2/d. UI claims + ui = np.zeros_like(a) + ui[1, ...] = b_grid[:, np.newaxis] + + return V, Va, a, c, P, ze, ui + + +def labor_income(z_grid, b_grid, atw, transfer, expiry, fU, fN, s): + # 1. income + yE = atw * z_grid + transfer + yUb = atw * b_grid + transfer + yN = np.zeros_like(yE) + transfer + y_grid = np.vstack((yE, yUb, yN, yN)) + + # 2. transition matrix for labor market status + Pi_s = np.array([[1 - s, s, 0], [fU, (1 - fU) * (1 - expiry), (1 - fU) * expiry], [fU, 0, 1 - fU], [fN, 0, 1 - fN]]) + + return Pi_s, y_grid + + +"""Supporting functions for HA block""" + + +@guvectorize(['void(float64[:], uint32[:], uint32[:])'], '(nA) -> (),()', nopython=True) +def nonconcave(uc_nextgrid, imin, imax): + """Obtain bounds for non-concave region.""" + nA = uc_nextgrid.shape[-1] + vmin = np.inf + vmax = -np.inf + # step 1: find vmin & vmax + for ia in range(nA - 1): + if uc_nextgrid[ia + 1] > uc_nextgrid[ia]: + vmin_temp = uc_nextgrid[ia] + vmax_temp = uc_nextgrid[ia + 1] + if vmin_temp < vmin: + vmin = vmin_temp + if vmax_temp > vmax: + vmax = vmax_temp + + # 2/a Find imin (upper bound) + if vmin == np.inf: + imin_ = 0 + else: + ia = 0 + while ia < nA: + if uc_nextgrid[ia] < vmin: + break + ia += 1 + imin_ = ia + + # 2/b Find imax (lower bound) + if vmax == -np.inf: + imax_ = nA + else: + ia = nA + while ia > 0: + if uc_nextgrid[ia] > vmax: + break + ia -= 1 + imax_ = ia + + imin[:] = imin_ + imax[:] = imax_ + + +@njit +def upper_envelope(imin, imax, W, a_nextgrid, c_nextgrid, a_grid, y_grid, rpost, *args): + """ + Interpolate consumption and value function to exogenous grid. Brute force but safe. + Parameters + ---------- + W : array(Ns, Nz, Na), status-specific end-of-period value function (on tomorrow's grid) + a_nextgrid : array(Ns, Nz, Na), endogenous asset grid (today's grid) + c_nextgrid : array(Ns, Nz, Na), consumption on endogenous grid (today's grid) + a_grid : array(Na), exogenous asset grid (tomorrow's grid) + y_grid : array(Ns, Nz), labor income + rpost : float, ex-post interest rate + args : (eis, vphi, chi) arguments for utility function + Returns + ------- + V : array(Ns, Nz, Na), status-specific value function on exogenous grid + c : array(Ns, Nz, Na), consumption on exogenous grid + """ + + # 0. initialize + Ns, Nz, Na = W.shape + c = np.zeros_like(W) + V = -np.inf * np.ones_like(W) + + # outer loop could run in parallel + for iw in range(Ns): + for iz in range(Nz): + ycur = y_grid[iw, iz] + imaxcur = imax[iw, iz] + imincur = imin[iw, iz] + + # 1. unconstrained case: loop through a_grid, find bracketing endogenous gridpoints and interpolate. + # in concave region: exploit monotonicity and don't look for extra solutions + for ia in range(Na): + acur = a_grid[ia] + + # Region 1: below non-concave: exploit monotonicity + if (ia <= imaxcur) | (ia >= imincur): + iap = 0 + ap_low = a_nextgrid[iw, iz, iap] + ap_high = a_nextgrid[iw, iz, iap + 1] + while iap < Na - 2: # can this go up all the way? + if ap_high >= acur: + break + iap += 1 + ap_low = ap_high + ap_high = a_nextgrid[iw, iz, iap + 1] + # found bracket, interpolate value function and consumption + w_low, w_high = W[iw, iz, iap], W[iw, iz, iap + 1] + c_low, c_high = c_nextgrid[iw, iz, iap], c_nextgrid[iw, iz, iap + 1] + w_slope = (w_high - w_low) / (ap_high - ap_low) + c_slope = (c_high - c_low) / (ap_high - ap_low) + c_guess = c_low + c_slope * (acur - ap_low) + w_guess = w_low + w_slope * (acur - ap_low) + V[iw, iz, ia] = util(c_guess, iw, *args) + w_guess + c[iw, iz, ia] = c_guess + + # Region 2: non-concave region + else: + # try out all segments of endogenous grid + for iap in range(Na - 1): + # does this endogenous segment bracket ia? + ap_low, ap_high = a_nextgrid[iw, iz, iap], a_nextgrid[iw, iz, iap + 1] + interp = (ap_low <= acur <= ap_high) or (ap_low >= acur >= ap_high) + + # does it need to be extrapolated above the endogenous grid? + # if needed to be extrapolated below, we would be in constrained case + extrap_above = (iap == Na - 2) and (acur > a_nextgrid[iw, iz, Na - 1]) + + if interp or extrap_above: + # interpolation slopes + w_low, w_high = W[iw, iz, iap], W[iw, iz, iap + 1] + c_low, c_high = c_nextgrid[iw, iz, iap], c_nextgrid[iw, iz, iap + 1] + w_slope = (w_high - w_low) / (ap_high - ap_low) + c_slope = (c_high - c_low) / (ap_high - ap_low) + + # implied guess + c_guess = c_low + c_slope * (acur - ap_low) + w_guess = w_low + w_slope * (acur - ap_low) + + # value + v_guess = util(c_guess, iw, *args) + w_guess + + # select best value for this segment + if v_guess > V[iw, iz, ia]: + V[iw, iz, ia] = v_guess + c[iw, iz, ia] = c_guess + + # 2. constrained case: remember that we have the inverse asset policy a(a') + ia = 0 + while ia < Na and a_grid[ia] <= a_nextgrid[iw, iz, 0]: + c[iw, iz, ia] = (1 + rpost) * a_grid[ia] + ycur + V[iw, iz, ia] = util(c[iw, iz, ia], iw, *args) + W[iw, iz, 0] + ia += 1 + + return V, c + + +'''Simple blocks''' + + +@simple +def income_state_vars(mean_z, rho_z, sd_z, nZ, uirate, uicap): + # productivity + z_grid, pi_z, Pi_z = markov_rouwenhorst(rho=rho_z, sigma=sd_z, N=nZ) + z_grid *= mean_z + + # unemployment benefits + b_grid = uirate * z_grid + b_grid[b_grid > uicap] = uicap + return z_grid, b_grid, pi_z, Pi_z + + +@simple +def employment_state_vars(lamM, lamB, lamL): + choice_set = [[0, 3], [1, 3], [2, 3]] + lam_grid = np.array([lamM, lamB, lamL]) + return choice_set, lam_grid + + +@simple +def asset_state_vars(amin, amax, nA): + a_grid = agrid(amin=amin, amax=amax, n=nA) + return a_grid + + +@solved(unknowns={'fU': 0.25, 'fN': 0.1, 's': 0.025}, targets=['fU_res', 'fN_res', 's_res'], solver='broyden_custom') +def flows(Y, fU, fN, s, fU_eps, fN_eps, s_eps): + fU_res = fU.ss * (Y / Y.ss) ** fU_eps - fU + fN_res = fN.ss * (Y / Y.ss) ** fN_eps - fN + s_res = s.ss * (Y / Y.ss) ** s_eps - s + return fU_res, fN_res, s_res + + +'''Put it together''' + +household.add_hetinput(labor_income, verbose=False) +hh = create_model([income_state_vars, employment_state_vars, asset_state_vars, flows, household], name='SingleHH') From 737aa3abdc393c29f44c6df59585ea677b149ceb Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Fri, 25 Jun 2021 15:01:27 -0500 Subject: [PATCH 28/35] Complete Bijection's `__matmul__` methods with `NotImplemented`. --- .../blocks/support/impulse.py | 26 ++++++++++++++----- src/sequence_jacobian/steady_state/classes.py | 2 ++ tests/base/test_public_classes.py | 2 +- 3 files changed, 23 insertions(+), 7 deletions(-) diff --git a/src/sequence_jacobian/blocks/support/impulse.py b/src/sequence_jacobian/blocks/support/impulse.py index c8fe87f..345363f 100644 --- a/src/sequence_jacobian/blocks/support/impulse.py +++ b/src/sequence_jacobian/blocks/support/impulse.py @@ -35,40 +35,52 @@ def __getitem__(self, item): if isinstance(item, str): # Case 1: ImpulseDict['C'] returns array return self.impulse[item] - if isinstance(item, list): + elif isinstance(item, list): # Case 2: ImpulseDict[['C']] or ImpulseDict[['C', 'Y']] return smaller ImpulseDicts return type(self)({k: self.impulse[k] for k in item}) + else: + ValueError("Use ImpulseDict['X'] to return an array or ImpulseDict[['X']] to return a smaller ImpulseDict.") def __add__(self, other): if isinstance(other, (float, int)): return type(self)({k: v + other for k, v in self.impulse.items()}) - if isinstance(other, SteadyStateDict): + elif isinstance(other, SteadyStateDict): return type(self)({k: v + other[k] for k, v in self.impulse.items()}) + else: + NotImplementedError('Only a number or a SteadyStateDict can be added from an ImpulseDict.') def __sub__(self, other): if isinstance(other, (float, int)): return type(self)({k: v - other for k, v in self.impulse.items()}) - if isinstance(other, (SteadyStateDict, ImpulseDict)): + elif isinstance(other, (SteadyStateDict, ImpulseDict)): return type(self)({k: v - other[k] for k, v in self.impulse.items()}) + else: + NotImplementedError('Only a number or a SteadyStateDict can be subtracted from an ImpulseDict.') def __mul__(self, other): if isinstance(other, (float, int)): return type(self)({k: v * other for k, v in self.impulse.items()}) - if isinstance(other, SteadyStateDict): + elif isinstance(other, SteadyStateDict): return type(self)({k: v * other[k] for k, v in self.impulse.items()}) + else: + NotImplementedError('An ImpulseDict can only be multiplied by a number or a SteadyStateDict.') def __rmul__(self, other): if isinstance(other, (float, int)): return type(self)({k: v * other for k, v in self.impulse.items()}) - if isinstance(other, SteadyStateDict): + elif isinstance(other, SteadyStateDict): return type(self)({k: v * other[k] for k, v in self.impulse.items()}) + else: + NotImplementedError('An ImpulseDict can only be multiplied by a number or a SteadyStateDict.') def __truediv__(self, other): if isinstance(other, (float, int)): return type(self)({k: v / other for k, v in self.impulse.items()}) # ImpulseDict[['C, 'Y']] / ss[['C', 'Y']]: matches steady states; don't divide by zero - if isinstance(other, SteadyStateDict): + elif isinstance(other, SteadyStateDict): return type(self)({k: v / other[k] if not np.isclose(other[k], 0) else v for k, v in self.impulse.items()}) + else: + NotImplementedError('An ImpulseDict can only be divided by a number or a SteadyStateDict.') def __matmul__(self, x): # remap keys in toplevel @@ -76,6 +88,8 @@ def __matmul__(self, x): new = deepcopy(self) new.impulse = x @ self.impulse return new + else: + NotImplemented def __rmatmul__(self, x): return self.__matmul__(x) diff --git a/src/sequence_jacobian/steady_state/classes.py b/src/sequence_jacobian/steady_state/classes.py index e4ff000..c281b74 100644 --- a/src/sequence_jacobian/steady_state/classes.py +++ b/src/sequence_jacobian/steady_state/classes.py @@ -39,6 +39,8 @@ def __matmul__(self, x): new = deepcopy(self) new.toplevel = x @ self.toplevel return new + else: + NotImplemented def __rmatmul__(self, x): return self.__matmul__(x) diff --git a/tests/base/test_public_classes.py b/tests/base/test_public_classes.py index 1c325e8..ceee3d3 100644 --- a/tests/base/test_public_classes.py +++ b/tests/base/test_public_classes.py @@ -91,7 +91,7 @@ def test_bijection(): mymap2 = Bijection({'a1': 'a2'}) assert (mymap2 @ mymap)['a'] == 'a2' - # composition with SteadyStateDict (only left __matmul__ works as intended) + # composition with SteadyStateDict ss = SteadyStateDict({'a': 2.0, 'b': 1.0}, internal={}) ss_remapped = ss @ mymap assert isinstance(ss_remapped, SteadyStateDict) From 28c2987c5e250326e724695987945be6e71e1d3b Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Fri, 25 Jun 2021 16:41:37 -0500 Subject: [PATCH 29/35] Could .solve_steady_state return internals nested in CombinedBlocks? Can use remap_test.py to play with this. --- tests/dcblock_test.py | 60 +++++++++++++++++++++++++++++-------------- tests/remap_test.py | 16 +++++++----- 2 files changed, 51 insertions(+), 25 deletions(-) diff --git a/tests/dcblock_test.py b/tests/dcblock_test.py index 4e59229..026adb7 100644 --- a/tests/dcblock_test.py +++ b/tests/dcblock_test.py @@ -3,8 +3,8 @@ import numpy as np from sequence_jacobian import simple, create_model, solved -from single_endo import hh as hh_single -from couple_endo import hh as hh_couple +import single_endo as single +import couple_endo as couple '''Rest of the model''' @@ -101,39 +101,48 @@ def fiscal_ss(A, tax, w, Ze, rpost, Ui): '''Calibration''' cali_sm = {'beta': 0.9833, 'vphi': 0.7625, 'chi': 0.5585, 'fU': 0.2499, 'fN': 0.1148, 's': 0.0203, - 'mean_z': 1.0, 'rho_z': 0.98, 'sd_z': 0.943*0.82, 'illiq': 0.25, 'pop': 0.29, - 'fU_eps': 10.69, 'fN_eps': 5.57, 's_eps': -11.17} + 'mean_z': 1.0, 'rho_z': 0.98, 'sd_z': 0.943*0.82, + 'fU_eps': 10.69, 'fN_eps': 5.57, 's_eps': -11.17, + 'illiq': 0.25, 'pop': 0.29} cali_sw = {'beta': 0.9830, 'vphi': 0.9940, 'chi': 0.4958, 'fU': 0.2207, 'fN': 0.1098, 's': 0.0132, - 'mean_z': 0.8, 'rho_z': 0.98, 'sd_z': 0.86*0.82, 'illiq': 0.15, 'pop': 0.29, - 'fU_eps': 8.72, 'fN_eps': 3.55, 's_eps': -6.55} + 'mean_z': 0.8, 'rho_z': 0.98, 'sd_z': 0.86*0.82, + 'fU_eps': 8.72, 'fN_eps': 3.55, 's_eps': -6.55, + 'illiq': 0.15, 'pop': 0.29} -cali_mc = {'beta': 0.9882, 'illiq': 0.6, 'pop': 0.42, 'rho': 0.042, +cali_mc = {'beta': 0.9882, 'rho': 0.042, 'vphi_m': 0.1545, 'chi_m': 0.2119, 'fU_m': 0.3013, 'fN_m': 0.1840, 's_m': 0.0107, 'vphi_f': 0.2605, 'chi_f': 0.2477, 'fU_f': 0.2519, 'fN_f': 0.1691, 's_f': 0.0103, 'mean_m': 1.0, 'rho_m': 0.98, 'sd_m': 0.943*0.82, 'mean_f': 0.8, 'rho_f': 0.98, 'sd_f': 0.86*0.82, 'fU_eps_m': 10.37, 'fN_eps_m': 9.74, 's_eps_m': -13.60, - 'fU_eps_f': 8.40, 'fN_eps_f': 2.30, 's_eps_f': -7.87} - + 'fU_eps_f': 8.40, 'fN_eps_f': 2.30, 's_eps_f': -7.87, + 'illiq': 0.6, 'pop': 0.42} '''Remap''' - +# variables to remap to_map_single = ['beta', 'vphi', 'chi', 'fU', 'fN', 's', 'mean_z', 'rho_z', 'sd_z', 'transfer', - 'fU_eps', 'fN_eps', 's_eps', *hh_single.outputs] -hh_sm = hh_single.remap({k: k + '_sm' for k in to_map_single}).rename('SingleMen') -hh_sw = hh_single.remap({k: k + '_sw' for k in to_map_single}).rename('SingleWomen') - + 'fU_eps', 'fN_eps', 's_eps', *single.hh.outputs] to_map_couple = ['beta', 'transfer', 'rho', 'vphi_m', 'chi_m', 'fU_m', 'fN_m', 's_m', 'mean_m', 'rho_m', 'sd_m', 'fU_eps_m', 'fN_eps_m', 's_eps_m', 'vphi_f', 'chi_f', 'fU_f', 'fN_f', 's_f', 'mean_f', 'rho_f', 'sd_f', 'fU_eps_f', 'fN_eps_f', 's_eps_f', - *hh_couple.outputs] -hh_mc = hh_couple.remap({k: k + '_mc' for k in to_map_couple}).rename('Couples') + *couple.hh.outputs] -cali_sm = {k + '_sm': v for k, v in cali_sm.items()} -cali_sw = {k + '_sw': v for k, v in cali_sw.items()} -cali_mc = {k + '_mc': v for k, v in cali_mc.items()} +# Single men +hh_sm = create_model([single.income_state_vars, single.employment_state_vars, single.asset_state_vars, single.flows, + single.household.rename('single_men')], name='SingleMen') +hh_sm = hh_sm.remap({k: k + '_sm' for k in to_map_single}) + +# Single women +hh_sw = create_model([single.income_state_vars, single.employment_state_vars, single.asset_state_vars, single.flows, + single.household.rename('single_women')], name='SingleWomen') +hh_sw = hh_sw.remap({k: k + '_sw' for k in to_map_single}) + +# Married couples +hh_mc = create_model([couple.income_state_vars, couple.employment_state_vars, couple.asset_state_vars, + couple.flows_m, couple.flows_f, couple.household.rename('couples')], name='Couples') +hh_mc = hh_mc.remap({k: k + '_mc' for k in to_map_couple}) '''Solve ss''' @@ -142,6 +151,11 @@ def fiscal_ss(A, tax, w, Ze, rpost, Ui): hank = create_model([hh_sm, hh_sw, hh_mc, aggregate, dividends, firm, monetary, valuation, nkpc, fiscal, mkt_clearing], name='HANK') +# remap calibration +cali_sm = {k + '_sm': v for k, v in cali_sm.items()} +cali_sw = {k + '_sw': v for k, v in cali_sw.items()} +cali_mc = {k + '_mc': v for k, v in cali_mc.items()} + calibration = {**cali_sm, **cali_sw, **cali_mc, 'eis': 1.0, 'uicap': 0.66, 'uirate': 0.5, 'expiry': 1/6, 'eps': 10.0, 'tax': 0.3, 'amin': 0.0, 'amax': 500, 'nA': 100, 'lamM': 0.01, 'lamB': 0.01, 'lamL': 0.04, 'nZ': 7, @@ -155,3 +169,11 @@ def fiscal_ss(A, tax, w, Ze, rpost, Ui): helper_blocks=[firm_ss, fiscal_ss], helper_targets=['asset_mkt', 'budget', 'val', 'nkpc_res', 'Y', 'u']) + +# jacobians +# J = {} +# J['sm'] = hh_sm.jacobian(ss, exogenous=['atw', 'rpost', 'beta_sm', 'Y', 'transfer_sm'], T=500) + +# td_nonlin = hank.solve_impulse_nonlinear(ss, {'Z': 0.001*0.9**np.arange(300)}, +# unknowns=['K'], targets=['asset_mkt']) + diff --git a/tests/remap_test.py b/tests/remap_test.py index 0419171..221c6d3 100644 --- a/tests/remap_test.py +++ b/tests/remap_test.py @@ -115,9 +115,13 @@ def mpcs(c, a, a_grid, r): # remap method takes a dict and returns new copies of blocks household.add_hetoutput(mpcs, verbose=False) -to_map = ['beta', *household.outputs] -hh_patient = household.remap({k: k + '_patient' for k in to_map}).rename('patient household') -hh_impatient = household.remap({k: k + '_impatient' for k in to_map}).rename('impatient household') +hh_patient = create_model([income_state_vars, household.rename('patient_hh')], name='PatientHH') +to_map = ['beta', *hh_patient.outputs] + +hh_patient = hh_patient.remap({k: k + '_patient' for k in to_map}) + +hh_impatient = create_model([income_state_vars, household.rename('impatient_hh')], name='ImpatientHH') +hh_impatient = hh_impatient.remap({k: k + '_impatient' for k in to_map}) @simple @@ -131,7 +135,7 @@ def aggregate(A_patient, A_impatient, C_patient, C_impatient, Mpc_patient, Mpc_i '''Steady state''' # DAG -blocks = [hh_patient, hh_impatient, firm, mkt_clearing, income_state_vars, asset_state_vars, aggregate] +blocks = [hh_patient, hh_impatient, firm, mkt_clearing, asset_state_vars, aggregate] ks_model = create_model(blocks, name="Krusell-Smith") # Steady State @@ -142,5 +146,5 @@ def aggregate(A_patient, A_impatient, C_patient, C_impatient, Mpc_patient, Mpc_i targets={'asset_mkt': 0.0, 'Y': 1.0, 'r': 0.01}, helper_blocks=[firm_ss], helper_targets=['Y', 'r']) -td_nonlin = ks_model.solve_impulse_nonlinear(ss, {'Z': 0.001*0.9**np.arange(300)}, - unknowns=['K'], targets=['asset_mkt']) +# td_nonlin = ks_model.solve_impulse_nonlinear(ss, {'Z': 0.001*0.9**np.arange(300)}, +# unknowns=['K'], targets=['asset_mkt']) From 781f786f1ff8a118eeaddfc7c8a6c8ac0fb234c1 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Thu, 15 Jul 2021 19:18:32 -0400 Subject: [PATCH 30/35] save temporary changes --- src/sequence_jacobian/primitives.py | 2 ++ tests/couple_endo.py | 1 + tests/dcblock_test.py | 48 ++++++++++++++--------------- tests/remap_test.py | 16 ++++------ tests/single_endo.py | 1 + 5 files changed, 33 insertions(+), 35 deletions(-) diff --git a/src/sequence_jacobian/primitives.py b/src/sequence_jacobian/primitives.py index c8201ad..042e1c1 100644 --- a/src/sequence_jacobian/primitives.py +++ b/src/sequence_jacobian/primitives.py @@ -151,6 +151,8 @@ def remap(self, map): other.input_list = other.M @ self.input_list if hasattr(self, 'output_list'): other.output_list = other.M @ self.output_list + if hasattr(self, 'non_back_iter_outputs'): + other.non_back_iter_outputs = other.M @ self.non_back_iter_outputs return other def rename(self, name): diff --git a/tests/couple_endo.py b/tests/couple_endo.py index b70df73..5aee8f4 100644 --- a/tests/couple_endo.py +++ b/tests/couple_endo.py @@ -402,3 +402,4 @@ def flows_f(Y, fU_f, fN_f, s_f, fU_eps_f, fN_eps_f, s_eps_f): household.add_hetinput(labor_income, verbose=False) hh = create_model([income_state_vars, employment_state_vars, asset_state_vars, flows_m, flows_f, household], name='CoupleHH') +blocks = [income_state_vars, employment_state_vars, asset_state_vars, flows_m, flows_f, household] diff --git a/tests/dcblock_test.py b/tests/dcblock_test.py index 026adb7..637dd99 100644 --- a/tests/dcblock_test.py +++ b/tests/dcblock_test.py @@ -32,7 +32,8 @@ def monetary(pi, rstar, phi_pi): @simple def nkpc(pi, mc, eps, Y, rpost, kappa): - nkpc_res = kappa * (mc - (eps - 1) / eps) + Y(+1) / Y * np.log(1 + pi(+1)) / (1 + rpost(+1)) - np.log(1 + pi) + nkpc_res = kappa * (mc - (eps - 1) / eps) + Y(+1) / Y * (1 + pi(+1)).apply(np.log) / (1 + rpost(+1)) \ + - (1 + pi).apply(np.log) return nkpc_res @@ -123,32 +124,25 @@ def fiscal_ss(A, tax, w, Ze, rpost, Ui): # variables to remap to_map_single = ['beta', 'vphi', 'chi', 'fU', 'fN', 's', 'mean_z', 'rho_z', 'sd_z', 'transfer', - 'fU_eps', 'fN_eps', 's_eps', *single.hh.outputs] + 'fU_eps', 'fN_eps', 's_eps'] + list(single.hh.outputs) + to_map_couple = ['beta', 'transfer', 'rho', 'vphi_m', 'chi_m', 'fU_m', 'fN_m', 's_m', 'mean_m', 'rho_m', 'sd_m', 'fU_eps_m', 'fN_eps_m', 's_eps_m', - 'vphi_f', 'chi_f', 'fU_f', 'fN_f', 's_f', 'mean_f', 'rho_f', 'sd_f', 'fU_eps_f', 'fN_eps_f', 's_eps_f', - *couple.hh.outputs] - -# Single men -hh_sm = create_model([single.income_state_vars, single.employment_state_vars, single.asset_state_vars, single.flows, - single.household.rename('single_men')], name='SingleMen') -hh_sm = hh_sm.remap({k: k + '_sm' for k in to_map_single}) + 'vphi_f', 'chi_f', 'fU_f', 'fN_f', 's_f', 'mean_f', 'rho_f', 'sd_f', 'fU_eps_f', 'fN_eps_f', 's_eps_f'] + list(couple.hh.outputs) -# Single women -hh_sw = create_model([single.income_state_vars, single.employment_state_vars, single.asset_state_vars, single.flows, - single.household.rename('single_women')], name='SingleWomen') -hh_sw = hh_sw.remap({k: k + '_sw' for k in to_map_single}) +# Singles +blocks_sm = [b.remap({k: k + '_sm' for k in to_map_single}).rename(b.name + '_sm') for b in single.blocks] +blocks_sw = [b.remap({k: k + '_sw' for k in to_map_single}).rename(b.name + '_sw') for b in single.blocks] -# Married couples -hh_mc = create_model([couple.income_state_vars, couple.employment_state_vars, couple.asset_state_vars, - couple.flows_m, couple.flows_f, couple.household.rename('couples')], name='Couples') -hh_mc = hh_mc.remap({k: k + '_mc' for k in to_map_couple}) +# Couples +blocks_mc = [b.remap({k: k + '_mc' for k in to_map_couple}).rename(b.name + '_mc') for b in couple.blocks] '''Solve ss''' -hank = create_model([hh_sm, hh_sw, hh_mc, aggregate, dividends, firm, monetary, valuation, nkpc, fiscal, mkt_clearing], +hank = create_model(blocks_sm + blocks_sw + blocks_mc + + [aggregate, dividends, firm, monetary, valuation, nkpc, fiscal, mkt_clearing], name='HANK') # remap calibration @@ -170,10 +164,14 @@ def fiscal_ss(A, tax, w, Ze, rpost, Ui): helper_targets=['asset_mkt', 'budget', 'val', 'nkpc_res', 'Y', 'u']) -# jacobians -# J = {} -# J['sm'] = hh_sm.jacobian(ss, exogenous=['atw', 'rpost', 'beta_sm', 'Y', 'transfer_sm'], T=500) - -# td_nonlin = hank.solve_impulse_nonlinear(ss, {'Z': 0.001*0.9**np.arange(300)}, -# unknowns=['K'], targets=['asset_mkt']) - +# # jacobians +# J = dict() +# J['household_sm'] = blocks_sm[4].jacobian(ss, exogenous=['transfer', 'atw', 'rpost', 'beta', 'fU', 'fN', 's'], T=500) +# J['household_sw'] = blocks_sw[4].jacobian(ss, exogenous=['transfer', 'atw', 'rpost', 'beta', 'fU', 'fN', 's'], T=500) +# J['household_mc'] = blocks_mc[5].jacobian(ss, exogenous=['transfer', 'atw', 'rpost', 'beta', +# 'fU_m', 'fN_m', 's_m', 'fU_f', 'fN_f', 's_f'], T=500) +# +# td_lin = hank.solve_impulse_linear(ss, {'rstar': 0.001*0.9**np.arange(500)}, +# unknowns=['K', 'L', 'mc', 'pi', 'tax'], +# targets=['val', 'goods_mkt', 'labor_mkt', 'nkpc_res', 'tax_rule'], +# Js=J) diff --git a/tests/remap_test.py b/tests/remap_test.py index 221c6d3..0419171 100644 --- a/tests/remap_test.py +++ b/tests/remap_test.py @@ -115,13 +115,9 @@ def mpcs(c, a, a_grid, r): # remap method takes a dict and returns new copies of blocks household.add_hetoutput(mpcs, verbose=False) -hh_patient = create_model([income_state_vars, household.rename('patient_hh')], name='PatientHH') -to_map = ['beta', *hh_patient.outputs] - -hh_patient = hh_patient.remap({k: k + '_patient' for k in to_map}) - -hh_impatient = create_model([income_state_vars, household.rename('impatient_hh')], name='ImpatientHH') -hh_impatient = hh_impatient.remap({k: k + '_impatient' for k in to_map}) +to_map = ['beta', *household.outputs] +hh_patient = household.remap({k: k + '_patient' for k in to_map}).rename('patient household') +hh_impatient = household.remap({k: k + '_impatient' for k in to_map}).rename('impatient household') @simple @@ -135,7 +131,7 @@ def aggregate(A_patient, A_impatient, C_patient, C_impatient, Mpc_patient, Mpc_i '''Steady state''' # DAG -blocks = [hh_patient, hh_impatient, firm, mkt_clearing, asset_state_vars, aggregate] +blocks = [hh_patient, hh_impatient, firm, mkt_clearing, income_state_vars, asset_state_vars, aggregate] ks_model = create_model(blocks, name="Krusell-Smith") # Steady State @@ -146,5 +142,5 @@ def aggregate(A_patient, A_impatient, C_patient, C_impatient, Mpc_patient, Mpc_i targets={'asset_mkt': 0.0, 'Y': 1.0, 'r': 0.01}, helper_blocks=[firm_ss], helper_targets=['Y', 'r']) -# td_nonlin = ks_model.solve_impulse_nonlinear(ss, {'Z': 0.001*0.9**np.arange(300)}, -# unknowns=['K'], targets=['asset_mkt']) +td_nonlin = ks_model.solve_impulse_nonlinear(ss, {'Z': 0.001*0.9**np.arange(300)}, + unknowns=['K'], targets=['asset_mkt']) diff --git a/tests/single_endo.py b/tests/single_endo.py index baa3aab..13dc9e2 100644 --- a/tests/single_endo.py +++ b/tests/single_endo.py @@ -339,3 +339,4 @@ def flows(Y, fU, fN, s, fU_eps, fN_eps, s_eps): household.add_hetinput(labor_income, verbose=False) hh = create_model([income_state_vars, employment_state_vars, asset_state_vars, flows, household], name='SingleHH') +blocks = [income_state_vars, employment_state_vars, asset_state_vars, flows, household] From b8af179e2798cb164af4338e7cf54ef9eeafc928 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Wed, 21 Jul 2021 13:59:54 -0400 Subject: [PATCH 31/35] cleaned up tests/; having other scripts upset VS code --- .gitignore | 3 +- tests/couple_endo.py => couple_endo.py | 0 tests/dcblock_test.py => dcblock_test.py | 34 +++-- hank_fiscal.py | 172 +++++++++++++++++++++++ tests/remap_test.py => remap_test.py | 0 tests/single_endo.py => single_endo.py | 0 6 files changed, 193 insertions(+), 16 deletions(-) rename tests/couple_endo.py => couple_endo.py (100%) rename tests/dcblock_test.py => dcblock_test.py (85%) create mode 100644 hank_fiscal.py rename tests/remap_test.py => remap_test.py (100%) rename tests/single_endo.py => single_endo.py (100%) diff --git a/.gitignore b/.gitignore index f6ec28d..4526876 100644 --- a/.gitignore +++ b/.gitignore @@ -2,7 +2,8 @@ __pycache__/ .ipynb_checkpoints/ .DS_Store -.vscode +.vscode/ +.pytest_cache/ *.zip diff --git a/tests/couple_endo.py b/couple_endo.py similarity index 100% rename from tests/couple_endo.py rename to couple_endo.py diff --git a/tests/dcblock_test.py b/dcblock_test.py similarity index 85% rename from tests/dcblock_test.py rename to dcblock_test.py index 637dd99..cf7df30 100644 --- a/tests/dcblock_test.py +++ b/dcblock_test.py @@ -33,14 +33,14 @@ def monetary(pi, rstar, phi_pi): @simple def nkpc(pi, mc, eps, Y, rpost, kappa): nkpc_res = kappa * (mc - (eps - 1) / eps) + Y(+1) / Y * (1 + pi(+1)).apply(np.log) / (1 + rpost(+1)) \ - - (1 + pi).apply(np.log) + - (1 + pi).apply(np.log) return nkpc_res @simple def valuation(rpost, mc, Y, K, Q, delta, psi, alpha): val = alpha * mc(+1) * Y(+1) / K - (K(+1) / K - (1 - delta) + psi / 2 * (K(+1) / K - 1) ** 2) + \ - K(+1) / K * Q(+1) - (1 + rpost(+1)) * Q + K(+1) / K * Q(+1) - (1 + rpost(+1)) * Q return val @@ -62,9 +62,12 @@ def mkt_clearing(A, B, Y, C, I, G, L, Ze): @simple def dividends(transfer, pop_sm, pop_sw, pop_mc, illiq_sm, illiq_sw, illiq_mc): - transfer_sm = illiq_sm * transfer / (pop_sm * illiq_sm + pop_sw * illiq_sw + pop_mc * illiq_mc) - transfer_sw = illiq_sw * transfer / (pop_sm * illiq_sm + pop_sw * illiq_sw + pop_mc * illiq_mc) - transfer_mc = illiq_mc * transfer / (pop_sm * illiq_sm + pop_sw * illiq_sw + pop_mc * illiq_mc) + transfer_sm = illiq_sm * transfer / \ + (pop_sm * illiq_sm + pop_sw * illiq_sw + pop_mc * illiq_mc) + transfer_sw = illiq_sw * transfer / \ + (pop_sm * illiq_sm + pop_sw * illiq_sw + pop_mc * illiq_mc) + transfer_mc = illiq_mc * transfer / \ + (pop_sm * illiq_sm + pop_sw * illiq_sw + pop_mc * illiq_mc) return transfer_sm, transfer_sw, transfer_mc @@ -124,26 +127,27 @@ def fiscal_ss(A, tax, w, Ze, rpost, Ui): # variables to remap to_map_single = ['beta', 'vphi', 'chi', 'fU', 'fN', 's', 'mean_z', 'rho_z', 'sd_z', 'transfer', - 'fU_eps', 'fN_eps', 's_eps'] + list(single.hh.outputs) + 'fU_eps', 'fN_eps', 's_eps', *single.hh.outputs] to_map_couple = ['beta', 'transfer', 'rho', 'vphi_m', 'chi_m', 'fU_m', 'fN_m', 's_m', 'mean_m', 'rho_m', 'sd_m', 'fU_eps_m', 'fN_eps_m', 's_eps_m', - 'vphi_f', 'chi_f', 'fU_f', 'fN_f', 's_f', 'mean_f', 'rho_f', 'sd_f', 'fU_eps_f', 'fN_eps_f', 's_eps_f'] + list(couple.hh.outputs) + 'vphi_f', 'chi_f', 'fU_f', 'fN_f', 's_f', 'mean_f', 'rho_f', 'sd_f', 'fU_eps_f', 'fN_eps_f', 's_eps_f', + *couple.hh.outputs] -# Singles -blocks_sm = [b.remap({k: k + '_sm' for k in to_map_single}).rename(b.name + '_sm') for b in single.blocks] -blocks_sw = [b.remap({k: k + '_sw' for k in to_map_single}).rename(b.name + '_sw') for b in single.blocks] - -# Couples -blocks_mc = [b.remap({k: k + '_mc' for k in to_map_couple}).rename(b.name + '_mc') for b in couple.blocks] +# remap blocks one-by-one (replicates combinedblock) +blocks_sm = [b.remap({k: k + '_sm' for k in to_map_single} + ).rename(b.name + '_sm') for b in single.blocks] +blocks_sw = [b.remap({k: k + '_sw' for k in to_map_single} + ).rename(b.name + '_sw') for b in single.blocks] +blocks_mc = [b.remap({k: k + '_mc' for k in to_map_couple} + ).rename(b.name + '_mc') for b in couple.blocks] '''Solve ss''' hank = create_model(blocks_sm + blocks_sw + blocks_mc + - [aggregate, dividends, firm, monetary, valuation, nkpc, fiscal, mkt_clearing], - name='HANK') + [aggregate, dividends, firm, monetary, valuation, nkpc, fiscal, mkt_clearing], name='HANK') # remap calibration cali_sm = {k + '_sm': v for k, v in cali_sm.items()} diff --git a/hank_fiscal.py b/hank_fiscal.py new file mode 100644 index 0000000..7510d55 --- /dev/null +++ b/hank_fiscal.py @@ -0,0 +1,172 @@ +import numpy as np +import sequence_jacobian as sj +from sequence_jacobian.blocks.support.bijection import Bijection +from sequence_jacobian.steady_state.classes import SteadyStateDict + + +'''Part 1: Household block''' + + +def household_init(a_grid, y, rpost, sigma): + c = np.maximum(1e-8, y[:, np.newaxis] + np.maximum(rpost, 0.04) * a_grid[np.newaxis, :]) + Va = (1 + rpost) * (c ** (-sigma)) + return Va + + +@sj.het(exogenous='Pi', policy='a', backward='Va', backward_init=household_init) +def household(Va_p, Pi_p, a_grid, y, rpost, beta, sigma): + """ + Backward step in simple incomplete market model. Assumes CRRA utility. + + Parameters + ---------- + Va_p : array (E, A), marginal value of assets tomorrow (backward iterator) + Pi_p : array (E, E), Markov matrix for skills tomorrow + a_grid : array (A), asset grid + y : array (E), non-financial income + rpost : scalar, ex-post return on assets + beta : scalar, discount factor + sigma : scalar, utility parameter + + Returns + ------- + Va : array (E, A), marginal value of assets today + a : array (E, A), asset policy today + c : array (E, A), consumption policy today + """ + uc_nextgrid = (beta * Pi_p) @ Va_p + c_nextgrid = uc_nextgrid ** (-1 / sigma) + coh = (1 + rpost) * a_grid[np.newaxis, :] + y[:, np.newaxis] + a = sj.utilities.interpolate.interpolate_y(c_nextgrid + a_grid, coh, a_grid) # (x, xq, y) + sj.utilities.optimized_routines.setmin(a, a_grid[0]) + c = coh - a + uc = c ** (-sigma) + Va = (1 + rpost) * uc + + return Va, a, c + + +def get_mpcs(c, a, a_grid, rpost): + """Approximate mpc, with symmetric differences where possible, exactly setting mpc=1 for constrained agents.""" + mpcs_ = np.empty_like(c) + post_return = (1 + rpost) * a_grid + + # symmetric differences away from boundaries + mpcs_[:, 1:-1] = (c[:, 2:] - c[:, 0:-2]) / (post_return[2:] - post_return[:-2]) + + # asymmetric first differences at boundaries + mpcs_[:, 0] = (c[:, 1] - c[:, 0]) / (post_return[1] - post_return[0]) + mpcs_[:, -1] = (c[:, -1] - c[:, -2]) / (post_return[-1] - post_return[-2]) + + # special case of constrained + mpcs_[a == a_grid[0]] = 1 + + return mpcs_ + + +def income(tau, Y, e_grid, e_dist, Gamma, transfer): + """Labor income on the grid.""" + gamma = e_grid ** (Gamma * np.log(Y)) / np.vdot(e_dist, e_grid ** (1 + Gamma * np.log(Y))) + y = (1 - tau) * Y * gamma * e_grid + transfer + return y + + +@sj.simple +def income_state_vars(rho_e, sd_e, nE): + e_grid, e_dist, Pi = sj.utilities.discretize.markov_rouwenhorst(rho=rho_e, sigma=sd_e, N=nE) + return e_grid, e_dist, Pi + + +@sj.simple +def asset_state_vars(amin, amax, nA): + a_grid = sj.utilities.discretize.agrid(amin=amin, amax=amax, n=nA) + return a_grid + + +@sj.hetoutput() +def mpcs(c, a, a_grid, rpost): + """MPC out of lump-sum transfer.""" + mpc = get_mpcs(c, a, a_grid, rpost) + return mpc + + +household.add_hetinput(income, verbose=False) +household.add_hetoutput(mpcs, verbose=False) + + +'''Part 2: rest of the model''' + + +@sj.simple +def interest_rates(r): + rpost = r(-1) # household ex-post return + rb = r(-1) # rate on 1-period real bonds + return rpost, rb + + +@sj.solved(unknowns={'B': (0.0, 10.0)}, targets=['B_rule'], solver='brentq') +def fiscal(B, G, rb, rho_B, Y, transfer): + B_rule = B.ss + rho_B * (B(-1) - B.ss) - B + rev = (1 + rb) * B(-1) + G + transfer - B # revenue to be raised + tau = rev / Y + return B_rule, rev, tau + + +@sj.simple +def fiscal_dis(B, G, rb, rho_B, Y, transfer): + B_rule = B.ss + rho_B * (B(-1) - B.ss) - B + rev = (1 + rb) * B(-1) + G + transfer - B # revenue to be raised + tau = rev / Y + return B_rule, rev, tau + + +@sj.simple +def mkt_clearing(A, B, C, Y, G): + asset_mkt = A - B + goods_mkt = Y - C - G + return asset_mkt, goods_mkt + + +'''Try simple block''' + + +@sj.simple +def alma(a, b): + c = a + b + return c + + +alma2 = alma.remap({k: k + '_out' for k in ['a', 'b', 'c']}) +cali2 = SteadyStateDict({'a_out': 1, 'b_out': 2}, internal={}) +ss2 = alma2.steady_state(cali2) +jac2 = alma2.jacobian(cali2, ['a_out']) + +mymap = Bijection({'a': 'A', 'c': 'C'}) + +cali = SteadyStateDict({'a': 1, 'b': 2}, internal={}) +jac = alma.jacobian(cali, ['a']) + + +'''More serious''' + +# household_remapped = household.remap({'beta': 'beta1', 'Mpc': 'Mpc1'}) +# hh = sj.create_model([household_remapped, income_state_vars, asset_state_vars], name='Household') +# dag = sj.create_model([hh, interest_rates, fiscal_dis, mkt_clearing], name='HANK') +# +# calibration = {'Y': 1.0, 'r': 0.005, 'sigma': 2.0, 'rho_e': 0.91, 'sd_e': 0.92, 'nE': 11, +# 'amin': 0.0, 'amax': 1000, 'nA': 500, 'Gamma': 0.0, 'rho_B': 0.9, 'transfer': 0.143} +# +# ss = dag.solve_steady_state(calibration, +# unknowns={'beta1': .95, 'G': 0.2, 'B': 2.0}, +# targets={'asset_mkt': 0.0, 'tau': 0.334, 'Mpc1': 0.25}, +# solver='hybr') + +# td_lin = dag.solve_impulse_linear(ss, {'r': 0.001*0.9**np.arange(300)}, +# unknowns=['B', 'Y'], targets=['asset_mkt', 'B_rule']) +# +# +# td_nonlin = dag.solve_impulse_nonlinear(ss, {'r': 0.001*0.9**np.arange(300)}, +# unknowns=['B', 'Y'], targets=['asset_mkt', 'B_rule']) + + + diff --git a/tests/remap_test.py b/remap_test.py similarity index 100% rename from tests/remap_test.py rename to remap_test.py diff --git a/tests/single_endo.py b/single_endo.py similarity index 100% rename from tests/single_endo.py rename to single_endo.py From f00c2b52ecf250d1ef1b361313a7d18eba84bd35 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Wed, 21 Jul 2021 17:52:16 -0400 Subject: [PATCH 32/35] Test case for nested CombinedBlocks: remap_test.py --- hank_fiscal.py | 172 ------------------------------------------------- remap_test.py | 25 ++++--- 2 files changed, 17 insertions(+), 180 deletions(-) delete mode 100644 hank_fiscal.py diff --git a/hank_fiscal.py b/hank_fiscal.py deleted file mode 100644 index 7510d55..0000000 --- a/hank_fiscal.py +++ /dev/null @@ -1,172 +0,0 @@ -import numpy as np -import sequence_jacobian as sj -from sequence_jacobian.blocks.support.bijection import Bijection -from sequence_jacobian.steady_state.classes import SteadyStateDict - - -'''Part 1: Household block''' - - -def household_init(a_grid, y, rpost, sigma): - c = np.maximum(1e-8, y[:, np.newaxis] + np.maximum(rpost, 0.04) * a_grid[np.newaxis, :]) - Va = (1 + rpost) * (c ** (-sigma)) - return Va - - -@sj.het(exogenous='Pi', policy='a', backward='Va', backward_init=household_init) -def household(Va_p, Pi_p, a_grid, y, rpost, beta, sigma): - """ - Backward step in simple incomplete market model. Assumes CRRA utility. - - Parameters - ---------- - Va_p : array (E, A), marginal value of assets tomorrow (backward iterator) - Pi_p : array (E, E), Markov matrix for skills tomorrow - a_grid : array (A), asset grid - y : array (E), non-financial income - rpost : scalar, ex-post return on assets - beta : scalar, discount factor - sigma : scalar, utility parameter - - Returns - ------- - Va : array (E, A), marginal value of assets today - a : array (E, A), asset policy today - c : array (E, A), consumption policy today - """ - uc_nextgrid = (beta * Pi_p) @ Va_p - c_nextgrid = uc_nextgrid ** (-1 / sigma) - coh = (1 + rpost) * a_grid[np.newaxis, :] + y[:, np.newaxis] - a = sj.utilities.interpolate.interpolate_y(c_nextgrid + a_grid, coh, a_grid) # (x, xq, y) - sj.utilities.optimized_routines.setmin(a, a_grid[0]) - c = coh - a - uc = c ** (-sigma) - Va = (1 + rpost) * uc - - return Va, a, c - - -def get_mpcs(c, a, a_grid, rpost): - """Approximate mpc, with symmetric differences where possible, exactly setting mpc=1 for constrained agents.""" - mpcs_ = np.empty_like(c) - post_return = (1 + rpost) * a_grid - - # symmetric differences away from boundaries - mpcs_[:, 1:-1] = (c[:, 2:] - c[:, 0:-2]) / (post_return[2:] - post_return[:-2]) - - # asymmetric first differences at boundaries - mpcs_[:, 0] = (c[:, 1] - c[:, 0]) / (post_return[1] - post_return[0]) - mpcs_[:, -1] = (c[:, -1] - c[:, -2]) / (post_return[-1] - post_return[-2]) - - # special case of constrained - mpcs_[a == a_grid[0]] = 1 - - return mpcs_ - - -def income(tau, Y, e_grid, e_dist, Gamma, transfer): - """Labor income on the grid.""" - gamma = e_grid ** (Gamma * np.log(Y)) / np.vdot(e_dist, e_grid ** (1 + Gamma * np.log(Y))) - y = (1 - tau) * Y * gamma * e_grid + transfer - return y - - -@sj.simple -def income_state_vars(rho_e, sd_e, nE): - e_grid, e_dist, Pi = sj.utilities.discretize.markov_rouwenhorst(rho=rho_e, sigma=sd_e, N=nE) - return e_grid, e_dist, Pi - - -@sj.simple -def asset_state_vars(amin, amax, nA): - a_grid = sj.utilities.discretize.agrid(amin=amin, amax=amax, n=nA) - return a_grid - - -@sj.hetoutput() -def mpcs(c, a, a_grid, rpost): - """MPC out of lump-sum transfer.""" - mpc = get_mpcs(c, a, a_grid, rpost) - return mpc - - -household.add_hetinput(income, verbose=False) -household.add_hetoutput(mpcs, verbose=False) - - -'''Part 2: rest of the model''' - - -@sj.simple -def interest_rates(r): - rpost = r(-1) # household ex-post return - rb = r(-1) # rate on 1-period real bonds - return rpost, rb - - -@sj.solved(unknowns={'B': (0.0, 10.0)}, targets=['B_rule'], solver='brentq') -def fiscal(B, G, rb, rho_B, Y, transfer): - B_rule = B.ss + rho_B * (B(-1) - B.ss) - B - rev = (1 + rb) * B(-1) + G + transfer - B # revenue to be raised - tau = rev / Y - return B_rule, rev, tau - - -@sj.simple -def fiscal_dis(B, G, rb, rho_B, Y, transfer): - B_rule = B.ss + rho_B * (B(-1) - B.ss) - B - rev = (1 + rb) * B(-1) + G + transfer - B # revenue to be raised - tau = rev / Y - return B_rule, rev, tau - - -@sj.simple -def mkt_clearing(A, B, C, Y, G): - asset_mkt = A - B - goods_mkt = Y - C - G - return asset_mkt, goods_mkt - - -'''Try simple block''' - - -@sj.simple -def alma(a, b): - c = a + b - return c - - -alma2 = alma.remap({k: k + '_out' for k in ['a', 'b', 'c']}) -cali2 = SteadyStateDict({'a_out': 1, 'b_out': 2}, internal={}) -ss2 = alma2.steady_state(cali2) -jac2 = alma2.jacobian(cali2, ['a_out']) - -mymap = Bijection({'a': 'A', 'c': 'C'}) - -cali = SteadyStateDict({'a': 1, 'b': 2}, internal={}) -jac = alma.jacobian(cali, ['a']) - - -'''More serious''' - -# household_remapped = household.remap({'beta': 'beta1', 'Mpc': 'Mpc1'}) -# hh = sj.create_model([household_remapped, income_state_vars, asset_state_vars], name='Household') -# dag = sj.create_model([hh, interest_rates, fiscal_dis, mkt_clearing], name='HANK') -# -# calibration = {'Y': 1.0, 'r': 0.005, 'sigma': 2.0, 'rho_e': 0.91, 'sd_e': 0.92, 'nE': 11, -# 'amin': 0.0, 'amax': 1000, 'nA': 500, 'Gamma': 0.0, 'rho_B': 0.9, 'transfer': 0.143} -# -# ss = dag.solve_steady_state(calibration, -# unknowns={'beta1': .95, 'G': 0.2, 'B': 2.0}, -# targets={'asset_mkt': 0.0, 'tau': 0.334, 'Mpc1': 0.25}, -# solver='hybr') - -# td_lin = dag.solve_impulse_linear(ss, {'r': 0.001*0.9**np.arange(300)}, -# unknowns=['B', 'Y'], targets=['asset_mkt', 'B_rule']) -# -# -# td_nonlin = dag.solve_impulse_nonlinear(ss, {'r': 0.001*0.9**np.arange(300)}, -# unknowns=['B', 'Y'], targets=['asset_mkt', 'B_rule']) - - - diff --git a/remap_test.py b/remap_test.py index 0419171..2a69197 100644 --- a/remap_test.py +++ b/remap_test.py @@ -111,13 +111,19 @@ def mpcs(c, a, a_grid, r): return mpc +household.add_hetoutput(mpcs, verbose=False) + + '''Part 3: permanent heterogeneity''' -# remap method takes a dict and returns new copies of blocks -household.add_hetoutput(mpcs, verbose=False) -to_map = ['beta', *household.outputs] -hh_patient = household.remap({k: k + '_patient' for k in to_map}).rename('patient household') -hh_impatient = household.remap({k: k + '_impatient' for k in to_map}).rename('impatient household') +# permanent types given by CombinedBlocks with different names +hh_patient = create_model([income_state_vars, household], name='patient_hh') +hh_impatient = create_model([income_state_vars, household], name='impatient_hh') + +# remap variables +to_map = ['beta', *hh_patient.outputs] +hh_patient = hh_patient.remap({k: k + '_patient' for k in to_map}) +hh_impatient = hh_impatient.remap({k: k + '_impatient' for k in to_map}) @simple @@ -134,13 +140,16 @@ def aggregate(A_patient, A_impatient, C_patient, C_impatient, Mpc_patient, Mpc_i blocks = [hh_patient, hh_impatient, firm, mkt_clearing, income_state_vars, asset_state_vars, aggregate] ks_model = create_model(blocks, name="Krusell-Smith") -# Steady State +# steady state calibration = {'eis': 1, 'delta': 0.025, 'alpha': 0.3, 'rho': 0.966, 'sigma': 0.5, 'L': 1.0, 'nS': 11, 'nA': 500, 'amax': 1000, 'beta_impatient': 0.98, 'mass_patient': 0.5} + +# this should return ss.internals['patient_hh']['household'] and ss.internals['impatient_hh']['household'] ss = ks_model.solve_steady_state(calibration, solver='brentq', unknowns={'beta_patient': (0.97/1.01, 0.999/1.01), 'Z': 0.5, 'K': 8.6}, targets={'asset_mkt': 0.0, 'Y': 1.0, 'r': 0.01}, helper_blocks=[firm_ss], helper_targets=['Y', 'r']) -td_nonlin = ks_model.solve_impulse_nonlinear(ss, {'Z': 0.001*0.9**np.arange(300)}, - unknowns=['K'], targets=['asset_mkt']) +# .jacobian will have to find the internals, .impulse_linear, .impulse_nonlinear should be able to return internals +# td_nonlin = ks_model.solve_impulse_nonlinear(ss, {'Z': 0.001*0.9**np.arange(300)}, +# unknowns=['K'], targets=['asset_mkt']) From 955b90da3a9befa67c7fcd0befac8671dba1de2b Mon Sep 17 00:00:00 2001 From: Michael Cai Date: Wed, 21 Jul 2021 20:09:30 -0500 Subject: [PATCH 33/35] Return nested .internal data in steady_state invocations on DAGs with CombinedBlocks --- src/sequence_jacobian/blocks/combined_block.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/sequence_jacobian/blocks/combined_block.py b/src/sequence_jacobian/blocks/combined_block.py index 3c355c9..08af6c2 100644 --- a/src/sequence_jacobian/blocks/combined_block.py +++ b/src/sequence_jacobian/blocks/combined_block.py @@ -66,10 +66,15 @@ def _steady_state(self, calibration, helper_blocks=None, **kwargs): topsorted = utils.graph.block_sort(self.blocks, calibration=calibration, helper_blocks=helper_blocks) blocks_all = self.blocks + helper_blocks - ss_partial_eq = deepcopy(calibration) + ss_partial_eq_toplevel = deepcopy(calibration) + ss_partial_eq_internal = {} for i in topsorted: - ss_partial_eq.update(eval_block_ss(blocks_all[i], ss_partial_eq, **kwargs)) - return SteadyStateDict(ss_partial_eq) + outputs = eval_block_ss(blocks_all[i], ss_partial_eq_toplevel, **kwargs) + ss_partial_eq_toplevel.update(outputs.toplevel) + if outputs.internal: + ss_partial_eq_internal.update(outputs.internal) + ss_partial_eq_internal = {self.name: ss_partial_eq_internal} if ss_partial_eq_internal else {} + return SteadyStateDict(ss_partial_eq_toplevel, internal=ss_partial_eq_internal) def _impulse_nonlinear(self, ss, exogenous, **kwargs): """Calculate a partial equilibrium, non-linear impulse response to a set of `exogenous` shocks from From 54e67dfd302056aa7a7c9fd50412c0f30e570b0f Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Mon, 26 Jul 2021 12:09:18 -0400 Subject: [PATCH 34/35] Caught bug in ss driver that sabotaged dissolve. --- src/sequence_jacobian/steady_state/drivers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/sequence_jacobian/steady_state/drivers.py b/src/sequence_jacobian/steady_state/drivers.py index 3119bfa..c105270 100644 --- a/src/sequence_jacobian/steady_state/drivers.py +++ b/src/sequence_jacobian/steady_state/drivers.py @@ -157,7 +157,7 @@ def eval_block_ss(block, calibration, toplevel_unknowns=None, dissolve=None, **k # Bypass the behavior for SolvedBlocks to numerically solve for their unknowns and simply evaluate them # at the provided set of unknowns if included in dissolve. - valid_input_kwargs = misc.input_kwarg_list(block.steady_state) + valid_input_kwargs = misc.input_kwarg_list(block._steady_state) block_unknowns_in_toplevel_unknowns = set(block.unknowns.keys()).issubset(set(toplevel_unknowns)) if hasattr(block, "unknowns") else False input_kwarg_dict = {k: v for k, v in kwargs.items() if k in valid_input_kwargs} if block in dissolve and "solver" in valid_input_kwargs: @@ -170,7 +170,7 @@ def eval_block_ss(block, calibration, toplevel_unknowns=None, dissolve=None, **k f"If the user provides a set of top-level unknowns that subsume block-level unknowns," f" it must be explicitly declared in `dissolve`.") - return block.M @ block.steady_state({k: v for k, v in input_arg_dict.items() if k in block.inputs}, **input_kwarg_dict) + return block.steady_state({k: v for k, v in input_arg_dict.items() if k in block.inputs}, **input_kwarg_dict) def _solve_for_unknowns(residual, unknowns, targets, solver, solver_kwargs, residual_kwargs=None, From a1b8851fe6ba05ebcfdcb91ae8f10f1d67279997 Mon Sep 17 00:00:00 2001 From: bbardoczy Date: Mon, 26 Jul 2021 13:34:14 -0400 Subject: [PATCH 35/35] Simple ss test for perma heterogeneity using remap. --- couple_endo.py | 405 ------------------ dcblock_test.py | 181 -------- remap_test.py | 155 ------- single_endo.py | 342 --------------- src/sequence_jacobian/models/krusell_smith.py | 22 +- tests/base/test_steady_state.py | 6 + tests/conftest.py | 27 ++ 7 files changed, 48 insertions(+), 1090 deletions(-) delete mode 100644 couple_endo.py delete mode 100644 dcblock_test.py delete mode 100644 remap_test.py delete mode 100644 single_endo.py diff --git a/couple_endo.py b/couple_endo.py deleted file mode 100644 index 5aee8f4..0000000 --- a/couple_endo.py +++ /dev/null @@ -1,405 +0,0 @@ -"""Couple household model with frictional labor supply.""" - -import numpy as np -from numba import guvectorize, njit - -from sequence_jacobian import simple, agrid, markov_rouwenhorst, create_model, solved -from sequence_jacobian.blocks.discont_block import discont -from sequence_jacobian.utilities.misc import choice_prob, logsum - - -'''Core HA block''' - - -def household_init(a_grid, z_grid_m, b_grid_m, z_grid_f, b_grid_f, - atw, transfer, rpost, eis, vphi_m, chi_m, vphi_f, chi_f): - _, income = labor_income(z_grid_m, b_grid_m, z_grid_f, b_grid_f, atw, transfer, 1, 1, 0, 0, 0, 0, 0, 0) - coh = income[:, :, np.newaxis] + (1 + rpost) * a_grid[np.newaxis, np.newaxis, :] - c_guess = 0.3 * coh - V, Va = np.empty_like(coh), np.empty_like(coh) - for iw in range(16): - V[iw, ...] = util(c_guess[iw, ...], iw, eis, vphi_m, chi_m, vphi_f, chi_f) - V = V / 0.1 - - # get Va by finite difference - Va[:, :, 1:-1] = (V[:, :, 2:] - V[:, :, :-2]) / (a_grid[2:] - a_grid[:-2]) - Va[:, :, 0] = (V[:, :, 1] - V[:, :, 0]) / (a_grid[1] - a_grid[0]) - Va[:, :, -1] = (V[:, :, -1] - V[:, :, -2]) / (a_grid[-1] - a_grid[-2]) - - return Va, V - - -@njit(fastmath=True) -def util(c, iw, eis, vphi_m, chi_m, vphi_f, chi_f): - """Utility function.""" - # 1. utility from consumption. - if eis == 1: - u = np.log(c / 2) - else: - u = (c/2) ** (1 - 1/eis) / (1 - 1/eis) - - # 2. disutility from work and search - if iw <= 3: - u = u - vphi_m # E male - if (iw >= 4) & (iw <= 11): - u = u - chi_m # U male - if np.mod(iw, 4) == 0: - u = u - vphi_f # E female - if (np.mod(iw, 4) == 1) | (np.mod(iw, 4) == 2): - u = u - chi_f # U female - return u - - -@discont(exogenous=('Pi_s', 'Pi_z'), policy='a', disc_policy='P', backward=('V', 'Va'), backward_init=household_init) -def household(V_p, Va_p, Pi_z_p, Pi_s_p, choice_set, a_grid, y_grid, lam_grid, - z_all, b_all, z_man, b_man, z_wom, b_wom, eis, beta, rpost, vphi_m, chi_m, vphi_f, chi_f): - """ - Backward step function EGM with upper envelope. - - Dimensions: 0: labor market status, 1: productivity, 2: assets. - Status: 0: EE, 1: EUb, 2: EU, 3: EN, 4: UbE, 5: UbUb, 6: UbU, 7: UbN, 8: UE, 9: UUb, 10: UU, 11: UN, - 12: NE, 13: NUb, 14: NU, 15: NN - State: 0: MM, 1: MB, 2: ML, 3: BM, 4: BB, 5: BL, 6: LM, 7: LB, 8: LL - - Parameters - ---------- - V_p : array(Ns, Nz, Na), status-specific value function tomorrow - Va_p : array(Ns, Nz, Na), partial of status-specific value function tomorrow - Pi_s_p : array(Ns, Nx), Markov matrix for labor market shocks - Pi_z_p : array(Nz, Nz), (non-status-specific Markov) matrix for productivity - choice_set : list(Nz), discrete choices available in each state X - a_grid : array(Na), exogenous asset grid - y_grid : array(Ns, Nz), exogenous labor income grid - lam_grid : array(Nx), scale of taste shocks, specific to interim state - eis : float, EIS - beta : float, discount factor - rpost : float, ex-post interest rate - - Returns - ------- - V : array(Ns, Nz, Na), status-specific value function today - Va : array(Ns, Nz, Na), partial of status-specific value function today - P : array(Nx, Ns, Nz, Na), probability of choosing status s in state x - c : array(Ns, Nz, Na), status-specific consumption policy today - a : array(Ns, Nz, Na), status-specific asset policy today - ze : array(Ns, Nz, Na), effective labor (average productivity if employed) - ui : array(Ns, Nz, Na), UI benefit claims (average productivity if unemployed) - """ - # shapes - Ns, Nz, Na = V_p.shape - Nx = Pi_s_p.shape[1] - - # PART 1: update value and policy functions - # a. discrete choice I expect to make tomorrow - V_p_X = np.empty((Nx, Nz, Na)) - Va_p_X = np.empty((Nx, Nz, Na)) - for ix in range(Nx): - V_p_ix = np.take(V_p, indices=choice_set[ix], axis=0) - Va_p_ix = np.take(Va_p, indices=choice_set[ix], axis=0) - P_p_ix = choice_prob(V_p_ix, lam_grid[ix]) - V_p_X[ix, ...] = logsum(V_p_ix, lam_grid[ix]) - Va_p_X[ix, ...] = np.sum(P_p_ix*Va_p_ix, axis=0) - - # b. compute expectation wrt labor market shock - V_p1 = np.einsum('ij,jkl->ikl', Pi_s_p, V_p_X) - Va_p1 = np.einsum('ij,jkl->ikl', Pi_s_p, Va_p_X) - - # b. compute expectation wrt productivity - V_p2 = np.einsum('ij,kjl->kil', Pi_z_p, V_p1) - Va_p2 = np.einsum('ij,kjl->kil', Pi_z_p, Va_p1) - - # d. consumption today on tomorrow's grid and endogenous asset grid today - W = beta * V_p2 - uc_nextgrid = beta * Va_p2 - c_nextgrid = 2 ** (1 - eis) * uc_nextgrid ** (-eis) - a_nextgrid = (c_nextgrid + a_grid[np.newaxis, np.newaxis, :] - y_grid[:, :, np.newaxis]) / (1 + rpost) - - # e. upper envelope - imin, imax = nonconcave(uc_nextgrid) # bounds of non-concave region - V, c = upper_envelope(imin, imax, W, a_nextgrid, c_nextgrid, a_grid, y_grid, rpost, eis, vphi_m, chi_m, - vphi_f, chi_f) - - # f. update Va - uc = 2 ** (1 / eis - 1) * c ** (-1 / eis) - Va = (1 + rpost) * uc - - # PART 2: things we need for GE - # 2/a. asset policy - a = (1 + rpost) * a_grid[np.newaxis, np.newaxis, :] + y_grid[:, :, np.newaxis] - c - - # 2/b. choice probabilities (don't need jacobian) - P = np.zeros((Nx, Ns, Nz, Na)) - for ix in range(Nx): - V_ix = np.take(V, indices=choice_set[ix], axis=0) - P[ix, choice_set[ix], ...] = choice_prob(V_ix, lam_grid[ix]) - - # 2/c. average productivity of employed - ze = np.zeros_like(a) - ze[0, ...] = z_all[:, np.newaxis] - ze[1, ...], ze[2, ...], ze[3, ...] = z_man[:, np.newaxis], z_man[:, np.newaxis], z_man[:, np.newaxis] - ze[4, ...], ze[8, ...], ze[12, ...] = z_wom[:, np.newaxis], z_wom[:, np.newaxis], z_wom[:, np.newaxis] - - # 2/d. UI claims - ui = np.zeros_like(a) - ui[5, ...] = b_all[:, np.newaxis] - ui[4, ...], ui[6, ...], ui[7, ...] = b_man[:, np.newaxis], b_man[:, np.newaxis], b_man[:, np.newaxis] - ui[1, ...], ui[9, ...], ui[13, ...] = b_wom[:, np.newaxis], b_wom[:, np.newaxis], b_wom[:, np.newaxis] - - return V, Va, a, c, P, ze, ui - - -def labor_income(z_grid_m, b_grid_m, z_grid_f, b_grid_f, atw, transfer, - fU_m, fN_m, s_m, fU_f, fN_f, s_f, rho, expiry): - # 1. income - yE_m, yE_f = atw * z_grid_m + transfer, atw * z_grid_f - yU_m, yU_f = atw * b_grid_m + transfer, atw * b_grid_f - yN_m, yN_f = np.zeros_like(yE_m) + transfer, np.zeros_like(yE_f) - y_EE = (yE_m[:, np.newaxis] + yE_f[np.newaxis, :]).ravel() - y_EU = (yE_m[:, np.newaxis] + yU_f[np.newaxis, :]).ravel() - y_EN = (yE_m[:, np.newaxis] + yN_f[np.newaxis, :]).ravel() - y_UE = (yU_m[:, np.newaxis] + yE_f[np.newaxis, :]).ravel() - y_UU = (yU_m[:, np.newaxis] + yU_f[np.newaxis, :]).ravel() - y_UN = (yU_m[:, np.newaxis] + yN_f[np.newaxis, :]).ravel() - y_NE = (yN_m[:, np.newaxis] + yE_f[np.newaxis, :]).ravel() - y_NU = (yN_m[:, np.newaxis] + yU_f[np.newaxis, :]).ravel() - y_NN = (yN_m[:, np.newaxis] + yN_f[np.newaxis, :]).ravel() - y_grid = np.vstack((y_EE, y_EU, y_EN, y_EN, y_UE, y_UU, y_UN, y_UN, y_NE, y_NU, y_NN, y_NN, y_NE, y_NU, y_NN, y_NN)) - - # 2. transition matrix for joint labor market status - cov = rho * np.sqrt(s_m * s_f * (1 - s_m) * (1 - s_f)) - Pi_s_m = np.array([[1 - s_m, s_m, 0], [fU_m, (1 - fU_m) * (1 - expiry), (1 - fU_m) * expiry], - [fU_m, 0, 1 - fU_m], [fN_m, 0, 1 - fN_m]]) - Pi_s_f = np.array([[1 - s_f, s_f, 0], [fU_f, (1 - fU_f) * (1 - expiry), (1 - fU_f) * expiry], - [fU_f, 0, 1 - fU_f], [fN_f, 0, 1 - fN_f]]) - Pi_s = np.kron(Pi_s_m, Pi_s_f) - - # adjust for correlated job loss - Pi_s[0, 0] += cov # neither loses their job - Pi_s[0, 4] += cov # both lose their job - Pi_s[0, 1] -= cov # only female loses her job - Pi_s[0, 3] -= cov # only male loses his job - - return Pi_s, y_grid - - -"""Supporting functions for HA block""" - - -@guvectorize(['void(float64[:], uint32[:], uint32[:])'], '(nA) -> (),()', nopython=True) -def nonconcave(uc_nextgrid, imin, imax): - """Obtain bounds for non-concave region.""" - nA = uc_nextgrid.shape[-1] - vmin = np.inf - vmax = -np.inf - # step 1: find vmin & vmax - for ia in range(nA - 1): - if uc_nextgrid[ia + 1] > uc_nextgrid[ia]: - vmin_temp = uc_nextgrid[ia] - vmax_temp = uc_nextgrid[ia + 1] - if vmin_temp < vmin: - vmin = vmin_temp - if vmax_temp > vmax: - vmax = vmax_temp - - # 2/a Find imin (upper bound) - if vmin == np.inf: - imin_ = 0 - else: - ia = 0 - while ia < nA: - if uc_nextgrid[ia] < vmin: - break - ia += 1 - imin_ = ia - - # 2/b Find imax (lower bound) - if vmax == -np.inf: - imax_ = nA - else: - ia = nA - while ia > 0: - if uc_nextgrid[ia] > vmax: - break - ia -= 1 - imax_ = ia - - imin[:] = imin_ - imax[:] = imax_ - - -@njit -def upper_envelope(imin, imax, W, a_nextgrid, c_nextgrid, a_grid, y_grid, rpost, *args): - """ - Interpolate consumption and value function to exogenous grid. Brute force but safe. - Parameters - ---------- - W : array(Ns, Nz, Na), status-specific end-of-period value function (on tomorrow's grid) - a_nextgrid : array(Ns, Nz, Na), endogenous asset grid (today's grid) - c_nextgrid : array(Ns, Nz, Na), consumption on endogenous grid (today's grid) - a_grid : array(Na), exogenous asset grid (tomorrow's grid) - y_grid : array(Ns, Nz), labor income - rpost : float, ex-post interest rate - args : (eis, vphi, chi) arguments for utility function - Returns - ------- - V : array(Ns, Nz, Na), status-specific value function on exogenous grid - c : array(Ns, Nz, Na), consumption on exogenous grid - """ - - # 0. initialize - Ns, Nz, Na = W.shape - c = np.zeros_like(W) - V = -np.inf * np.ones_like(W) - - # outer loop could run in parallel - for iw in range(Ns): - for iz in range(Nz): - ycur = y_grid[iw, iz] - imaxcur = imax[iw, iz] - imincur = imin[iw, iz] - - # 1. unconstrained case: loop through a_grid, find bracketing endogenous gridpoints and interpolate. - # in concave region: exploit monotonicity and don't look for extra solutions - for ia in range(Na): - acur = a_grid[ia] - - # Region 1: below non-concave: exploit monotonicity - if (ia <= imaxcur) | (ia >= imincur): - iap = 0 - ap_low = a_nextgrid[iw, iz, iap] - ap_high = a_nextgrid[iw, iz, iap + 1] - while iap < Na - 2: # can this go up all the way? - if ap_high >= acur: - break - iap += 1 - ap_low = ap_high - ap_high = a_nextgrid[iw, iz, iap + 1] - # found bracket, interpolate value function and consumption - w_low, w_high = W[iw, iz, iap], W[iw, iz, iap + 1] - c_low, c_high = c_nextgrid[iw, iz, iap], c_nextgrid[iw, iz, iap + 1] - w_slope = (w_high - w_low) / (ap_high - ap_low) - c_slope = (c_high - c_low) / (ap_high - ap_low) - c_guess = c_low + c_slope * (acur - ap_low) - w_guess = w_low + w_slope * (acur - ap_low) - V[iw, iz, ia] = util(c_guess, iw, *args) + w_guess - c[iw, iz, ia] = c_guess - - # Region 2: non-concave region - else: - # try out all segments of endogenous grid - for iap in range(Na - 1): - # does this endogenous segment bracket ia? - ap_low, ap_high = a_nextgrid[iw, iz, iap], a_nextgrid[iw, iz, iap + 1] - interp = (ap_low <= acur <= ap_high) or (ap_low >= acur >= ap_high) - - # does it need to be extrapolated above the endogenous grid? - # if needed to be extrapolated below, we would be in constrained case - extrap_above = (iap == Na - 2) and (acur > a_nextgrid[iw, iz, Na - 1]) - - if interp or extrap_above: - # interpolation slopes - w_low, w_high = W[iw, iz, iap], W[iw, iz, iap + 1] - c_low, c_high = c_nextgrid[iw, iz, iap], c_nextgrid[iw, iz, iap + 1] - w_slope = (w_high - w_low) / (ap_high - ap_low) - c_slope = (c_high - c_low) / (ap_high - ap_low) - - # implied guess - c_guess = c_low + c_slope * (acur - ap_low) - w_guess = w_low + w_slope * (acur - ap_low) - - # value - v_guess = util(c_guess, iw, *args) + w_guess - - # select best value for this segment - if v_guess > V[iw, iz, ia]: - V[iw, iz, ia] = v_guess - c[iw, iz, ia] = c_guess - - # 2. constrained case: remember that we have the inverse asset policy a(a') - ia = 0 - while ia < Na and a_grid[ia] <= a_nextgrid[iw, iz, 0]: - c[iw, iz, ia] = (1 + rpost) * a_grid[ia] + ycur - V[iw, iz, ia] = util(c[iw, iz, ia], iw, *args) + W[iw, iz, 0] - ia += 1 - - return V, c - - -'''Simple blocks''' - - -def zgrids_couple(zm, zf): - """Combine individual z_grid and b_grid as needed for couple level.""" - zeroes = np.zeros_like(zm) - - # all combinations - z_all = (zm[:, np.newaxis] + zf[np.newaxis, :]).ravel() - z_men = (zm[:, np.newaxis] + zeroes[np.newaxis, :]).ravel() - z_wom = (zeroes[:, np.newaxis] + zf[np.newaxis, :]).ravel() - return z_all, z_men, z_wom - - -@simple -def income_state_vars(mean_m, rho_m, sd_m, mean_f, rho_f, sd_f, nZ, uirate, uicap): - # income husband - z_grid_m, z_pdf_m, z_markov_m = markov_rouwenhorst(rho=rho_m, sigma=sd_m, N=nZ) - z_grid_m *= mean_m - b_grid_m = uirate * z_grid_m - b_grid_m[b_grid_m > uicap] = uicap - - # income wife - z_grid_f, z_pdf_f, z_markov_f = markov_rouwenhorst(rho=rho_f, sigma=sd_f, N=nZ) - z_grid_f *= mean_f - b_grid_f = uirate * z_grid_f - b_grid_f[b_grid_f > uicap] = uicap - - # household income - z_all, z_man, z_wom = zgrids_couple(z_grid_m, z_grid_f) - b_all, b_man, b_wom = zgrids_couple(b_grid_m, b_grid_f) - Pi_z = np.kron(z_markov_m, z_markov_f) - - return z_grid_m, z_grid_f, b_grid_m, b_grid_f, z_all, z_man, z_wom, b_all, b_man, b_wom, Pi_z - - -@simple -def employment_state_vars(lamM, lamB, lamL): - choice_set = [[0, 3, 12, 15], [1, 3, 13, 15], [2, 3, 14, 15], [4, 7, 12, 15], [5, 7, 13, 15], - [6, 7, 14, 15], [8, 11, 12, 15], [9, 11, 13, 15], [10, 11, 14, 15]] - lam_grid = np.array([np.sqrt(2*lamM**2), np.sqrt(lamM**2 + lamB**2), np.sqrt(lamM**2 + lamL**2), - np.sqrt(lamB**2 + lamM**2), np.sqrt(2*lamB**2), np.sqrt(lamB**2 + lamL**2), - np.sqrt(lamL**2 + lamM**2), np.sqrt(lamL**2 + lamB**2), np.sqrt(2*lamL**2)]) - return choice_set, lam_grid - - -@simple -def asset_state_vars(amin, amax, nA): - a_grid = agrid(amin=amin, amax=amax, n=nA) - return a_grid - - -@solved(unknowns={'fU_m': 0.25, 'fN_m': 0.1, 's_m': 0.025}, - targets=['fU_m_res', 'fN_m_res', 's_m_res'], - solver='broyden_custom') -def flows_m(Y, fU_m, fN_m, s_m, fU_eps_m, fN_eps_m, s_eps_m): - fU_m_res = fU_m.ss * (Y / Y.ss) ** fU_eps_m - fU_m - fN_m_res = fN_m.ss * (Y / Y.ss) ** fN_eps_m - fN_m - s_m_res = s_m.ss * (Y / Y.ss) ** s_eps_m - s_m - return fU_m_res, fN_m_res, s_m_res - - -@solved(unknowns={'fU_f': 0.25, 'fN_f': 0.1, 's_f': 0.025}, - targets=['fU_f_res', 'fN_f_res', 's_f_res'], - solver='broyden_custom') -def flows_f(Y, fU_f, fN_f, s_f, fU_eps_f, fN_eps_f, s_eps_f): - fU_f_res = fU_f.ss * (Y / Y.ss) ** fU_eps_f - fU_f - fN_f_res = fN_f.ss * (Y / Y.ss) ** fN_eps_f - fN_f - s_f_res = s_f.ss * (Y / Y.ss) ** s_eps_f - s_f - return fU_f_res, fN_f_res, s_f_res - - -'''Put it together''' - -household.add_hetinput(labor_income, verbose=False) -hh = create_model([income_state_vars, employment_state_vars, asset_state_vars, flows_m, flows_f, household], - name='CoupleHH') -blocks = [income_state_vars, employment_state_vars, asset_state_vars, flows_m, flows_f, household] diff --git a/dcblock_test.py b/dcblock_test.py deleted file mode 100644 index cf7df30..0000000 --- a/dcblock_test.py +++ /dev/null @@ -1,181 +0,0 @@ -"""This model is to test DisContBlock""" - -import numpy as np - -from sequence_jacobian import simple, create_model, solved -import single_endo as single -import couple_endo as couple - - -'''Rest of the model''' - - -@simple -def firm(Z, K, L, mc, tax, alpha, delta0, delta1, psi): - Y = Z * K(-1) ** alpha * L ** (1 - alpha) - w = (1 - alpha) * mc * Y / L - u = (alpha / delta0 / delta1 * mc * Y / K(-1)) ** (1 / delta1) - delta = delta0 * u ** delta1 - Q = 1 + psi * (K / K(-1) - 1) - I = K - (1 - delta) * K(-1) + psi / 2 * (K / K(-1) - 1) ** 2 * K(-1) - transfer = Y - w * L - I - atw = (1 - tax) * w - return Y, w, u, delta, Q, I, transfer, atw - - -@simple -def monetary(pi, rstar, phi_pi): - # rpost = (1 + rstar(-1) + phi_pi * pi(-1)) / (1 + pi) - 1 - rpost = rstar - return rpost - - -@simple -def nkpc(pi, mc, eps, Y, rpost, kappa): - nkpc_res = kappa * (mc - (eps - 1) / eps) + Y(+1) / Y * (1 + pi(+1)).apply(np.log) / (1 + rpost(+1)) \ - - (1 + pi).apply(np.log) - return nkpc_res - - -@simple -def valuation(rpost, mc, Y, K, Q, delta, psi, alpha): - val = alpha * mc(+1) * Y(+1) / K - (K(+1) / K - (1 - delta) + psi / 2 * (K(+1) / K - 1) ** 2) + \ - K(+1) / K * Q(+1) - (1 + rpost(+1)) * Q - return val - - -@solved(unknowns={'B': [0.0, 10.0]}, targets=['budget'], solver='brentq') -def fiscal(B, tax, w, rpost, G, Ze, Ui): - budget = (1 + rpost) * B + G + (1 - tax) * w * Ui - tax * w * Ze - B - # tax_rule = tax - tax.ss - phi * (B(-1) - B.ss) / Y.ss - tax_rule = B - B.ss - return budget, tax_rule - - -@simple -def mkt_clearing(A, B, Y, C, I, G, L, Ze): - asset_mkt = A - B - goods_mkt = Y - C - I - G - labor_mkt = L - Ze - return asset_mkt, goods_mkt, labor_mkt - - -@simple -def dividends(transfer, pop_sm, pop_sw, pop_mc, illiq_sm, illiq_sw, illiq_mc): - transfer_sm = illiq_sm * transfer / \ - (pop_sm * illiq_sm + pop_sw * illiq_sw + pop_mc * illiq_mc) - transfer_sw = illiq_sw * transfer / \ - (pop_sm * illiq_sm + pop_sw * illiq_sw + pop_mc * illiq_mc) - transfer_mc = illiq_mc * transfer / \ - (pop_sm * illiq_sm + pop_sw * illiq_sw + pop_mc * illiq_mc) - return transfer_sm, transfer_sw, transfer_mc - - -@simple -def aggregate(A_sm, C_sm, Ze_sm, Ui_sm, pop_sm, A_sw, C_sw, Ze_sw, Ui_sw, pop_sw, A_mc, C_mc, Ze_mc, Ui_mc, pop_mc): - A = pop_sm * A_sm + pop_sw * A_sw + pop_mc * A_mc - C = pop_sm * C_sm + pop_sw * C_sw + pop_mc * C_mc - Ze = pop_sm * Ze_sm + pop_sw * Ze_sw + pop_mc * Ze_mc - Ui = pop_sm * Ui_sm + pop_sw * Ui_sw + pop_mc * Ui_mc - return A, C, Ze, Ui - - -'''Steady-state helpers''' - - -@simple -def firm_ss(eps, rpost, Y, L, alpha, delta0): - # uses u=1 - mc = (eps - 1) / eps - K = mc * Y * alpha / (rpost + delta0) - delta1 = alpha / delta0 * mc * Y / K - Z = Y / (K ** alpha * L ** (1 - alpha)) - w = (1 - alpha) * mc * Y / L - return mc, K, delta1, Z, w - - -@simple -def fiscal_ss(A, tax, w, Ze, rpost, Ui): - # after hh block - B = A - G = tax * w * Ze - rpost * B - (1 - tax) * w * Ui - return B, G - - -'''Calibration''' - -cali_sm = {'beta': 0.9833, 'vphi': 0.7625, 'chi': 0.5585, 'fU': 0.2499, 'fN': 0.1148, 's': 0.0203, - 'mean_z': 1.0, 'rho_z': 0.98, 'sd_z': 0.943*0.82, - 'fU_eps': 10.69, 'fN_eps': 5.57, 's_eps': -11.17, - 'illiq': 0.25, 'pop': 0.29} - -cali_sw = {'beta': 0.9830, 'vphi': 0.9940, 'chi': 0.4958, 'fU': 0.2207, 'fN': 0.1098, 's': 0.0132, - 'mean_z': 0.8, 'rho_z': 0.98, 'sd_z': 0.86*0.82, - 'fU_eps': 8.72, 'fN_eps': 3.55, 's_eps': -6.55, - 'illiq': 0.15, 'pop': 0.29} - -cali_mc = {'beta': 0.9882, 'rho': 0.042, - 'vphi_m': 0.1545, 'chi_m': 0.2119, 'fU_m': 0.3013, 'fN_m': 0.1840, 's_m': 0.0107, - 'vphi_f': 0.2605, 'chi_f': 0.2477, 'fU_f': 0.2519, 'fN_f': 0.1691, 's_f': 0.0103, - 'mean_m': 1.0, 'rho_m': 0.98, 'sd_m': 0.943*0.82, - 'mean_f': 0.8, 'rho_f': 0.98, 'sd_f': 0.86*0.82, - 'fU_eps_m': 10.37, 'fN_eps_m': 9.74, 's_eps_m': -13.60, - 'fU_eps_f': 8.40, 'fN_eps_f': 2.30, 's_eps_f': -7.87, - 'illiq': 0.6, 'pop': 0.42} - -'''Remap''' - -# variables to remap -to_map_single = ['beta', 'vphi', 'chi', 'fU', 'fN', 's', 'mean_z', 'rho_z', 'sd_z', 'transfer', - 'fU_eps', 'fN_eps', 's_eps', *single.hh.outputs] - -to_map_couple = ['beta', 'transfer', 'rho', - 'vphi_m', 'chi_m', 'fU_m', 'fN_m', 's_m', 'mean_m', 'rho_m', 'sd_m', 'fU_eps_m', 'fN_eps_m', 's_eps_m', - 'vphi_f', 'chi_f', 'fU_f', 'fN_f', 's_f', 'mean_f', 'rho_f', 'sd_f', 'fU_eps_f', 'fN_eps_f', 's_eps_f', - *couple.hh.outputs] - -# remap blocks one-by-one (replicates combinedblock) -blocks_sm = [b.remap({k: k + '_sm' for k in to_map_single} - ).rename(b.name + '_sm') for b in single.blocks] -blocks_sw = [b.remap({k: k + '_sw' for k in to_map_single} - ).rename(b.name + '_sw') for b in single.blocks] -blocks_mc = [b.remap({k: k + '_mc' for k in to_map_couple} - ).rename(b.name + '_mc') for b in couple.blocks] - - -'''Solve ss''' - - -hank = create_model(blocks_sm + blocks_sw + blocks_mc + - [aggregate, dividends, firm, monetary, valuation, nkpc, fiscal, mkt_clearing], name='HANK') - -# remap calibration -cali_sm = {k + '_sm': v for k, v in cali_sm.items()} -cali_sw = {k + '_sw': v for k, v in cali_sw.items()} -cali_mc = {k + '_mc': v for k, v in cali_mc.items()} - -calibration = {**cali_sm, **cali_sw, **cali_mc, - 'eis': 1.0, 'uicap': 0.66, 'uirate': 0.5, 'expiry': 1/6, 'eps': 10.0, 'tax': 0.3, - 'amin': 0.0, 'amax': 500, 'nA': 100, 'lamM': 0.01, 'lamB': 0.01, 'lamL': 0.04, 'nZ': 7, - 'kappa': 0.03, 'phi_pi': 1.25, 'rstar': 0.002, 'pi': 0.0, 'alpha': 0.2, 'psi': 30, 'delta0': 0.0083} - -ss = hank.solve_steady_state(calibration, solver='toms748', dissolve=[fiscal], ttol=1E-6, - unknowns={'L': (1.06, 1.12), - 'Z': 0.63, 'mc': 0.9, 'G': 0.2, 'delta1': 1.2, 'B': 3.0, 'K': 17.0}, - targets={'labor_mkt': 0.0, - 'Y': 1.0, 'nkpc_res': 0.0, 'budget': 0.0, 'asset_mkt': 0.0, 'val': 0.0, 'u': 1.0}, - helper_blocks=[firm_ss, fiscal_ss], - helper_targets=['asset_mkt', 'budget', 'val', 'nkpc_res', 'Y', 'u']) - - -# # jacobians -# J = dict() -# J['household_sm'] = blocks_sm[4].jacobian(ss, exogenous=['transfer', 'atw', 'rpost', 'beta', 'fU', 'fN', 's'], T=500) -# J['household_sw'] = blocks_sw[4].jacobian(ss, exogenous=['transfer', 'atw', 'rpost', 'beta', 'fU', 'fN', 's'], T=500) -# J['household_mc'] = blocks_mc[5].jacobian(ss, exogenous=['transfer', 'atw', 'rpost', 'beta', -# 'fU_m', 'fN_m', 's_m', 'fU_f', 'fN_f', 's_f'], T=500) -# -# td_lin = hank.solve_impulse_linear(ss, {'rstar': 0.001*0.9**np.arange(500)}, -# unknowns=['K', 'L', 'mc', 'pi', 'tax'], -# targets=['val', 'goods_mkt', 'labor_mkt', 'nkpc_res', 'tax_rule'], -# Js=J) diff --git a/remap_test.py b/remap_test.py deleted file mode 100644 index 2a69197..0000000 --- a/remap_test.py +++ /dev/null @@ -1,155 +0,0 @@ -"""Simple model to test remapping & multiple hetblocks.""" - -import numpy as np -import copy - -from sequence_jacobian import utilities as utils -from sequence_jacobian import create_model, hetoutput, het, simple - - -'''Part 1: HA block''' - - -def household_init(a_grid, e_grid, r, w, eis): - coh = (1 + r) * a_grid[np.newaxis, :] + w * e_grid[:, np.newaxis] - Va = (1 + r) * (0.1 * coh) ** (-1 / eis) - return Va - - -@het(exogenous='Pi', policy='a', backward='Va', backward_init=household_init) -def household(Va_p, Pi_p, a_grid, e_grid, beta, r, w, eis): - """Single backward iteration step using endogenous gridpoint method for households with CRRA utility. - - Parameters - ---------- - Va_p : array (S, A), marginal value of assets tomorrow - Pi_p : array (S, S), Markov matrix for skills tomorrow - a_grid : array (A), asset grid - e_grid : array (S*B), skill grid - beta : scalar, discount rate today - r : scalar, ex-post real interest rate - w : scalar, wage - eis : scalar, elasticity of intertemporal substitution - - Returns - ---------- - Va : array (S*B, A), marginal value of assets today - a : array (S*B, A), asset policy today - c : array (S*B, A), consumption policy today - """ - uc_nextgrid = (beta * Pi_p) @ Va_p - c_nextgrid = uc_nextgrid ** (-eis) - coh = (1 + r) * a_grid[np.newaxis, :] + w * e_grid[:, np.newaxis] - a = utils.interpolate.interpolate_y(c_nextgrid + a_grid, coh, a_grid) - utils.optimized_routines.setmin(a, a_grid[0]) - c = coh - a - Va = (1 + r) * c ** (-1 / eis) - return Va, a, c - - -def get_mpcs(c, a, a_grid, rpost): - """Approximate mpc, with symmetric differences where possible, exactly setting mpc=1 for constrained agents.""" - mpcs = np.empty_like(c) - post_return = (1 + rpost) * a_grid - - # symmetric differences away from boundaries - mpcs[:, 1:-1] = (c[:, 2:] - c[:, 0:-2]) / (post_return[2:] - post_return[:-2]) - - # asymmetric first differences at boundaries - mpcs[:, 0] = (c[:, 1] - c[:, 0]) / (post_return[1] - post_return[0]) - mpcs[:, -1] = (c[:, -1] - c[:, -2]) / (post_return[-1] - post_return[-2]) - - # special case of constrained - mpcs[a == a_grid[0]] = 1 - - return mpcs - - -'''Part 2: Simple Blocks''' - - -@simple -def firm(K, L, Z, alpha, delta): - r = alpha * Z * (K(-1) / L) ** (alpha-1) - delta - w = (1 - alpha) * Z * (K(-1) / L) ** alpha - Y = Z * K(-1) ** alpha * L ** (1 - alpha) - I = K - (1 - delta) * K(-1) - return r, w, Y, I - - -@simple -def mkt_clearing(K, A, Y, C, I): - asset_mkt = A - K - goods_mkt = Y - C - I - return asset_mkt, goods_mkt - - -@simple -def income_state_vars(rho, sigma, nS): - e_grid, _, Pi = utils.discretize.markov_rouwenhorst(rho=rho, sigma=sigma, N=nS) - return e_grid, Pi - - -@simple -def asset_state_vars(amax, nA): - a_grid = utils.discretize.agrid(amax=amax, n=nA) - return a_grid - - -@simple -def firm_ss(r, Y, L, delta, alpha): - rk = r + delta - w = (1 - alpha) * Y / L - K = alpha * Y / rk - Z = Y / K ** alpha / L ** (1 - alpha) - return w, K, Z - - -@hetoutput() -def mpcs(c, a, a_grid, r): - mpc = get_mpcs(c, a, a_grid, r) - return mpc - - -household.add_hetoutput(mpcs, verbose=False) - - -'''Part 3: permanent heterogeneity''' - -# permanent types given by CombinedBlocks with different names -hh_patient = create_model([income_state_vars, household], name='patient_hh') -hh_impatient = create_model([income_state_vars, household], name='impatient_hh') - -# remap variables -to_map = ['beta', *hh_patient.outputs] -hh_patient = hh_patient.remap({k: k + '_patient' for k in to_map}) -hh_impatient = hh_impatient.remap({k: k + '_impatient' for k in to_map}) - - -@simple -def aggregate(A_patient, A_impatient, C_patient, C_impatient, Mpc_patient, Mpc_impatient, mass_patient): - C = mass_patient * C_patient + (1 - mass_patient) * C_impatient - A = mass_patient * A_patient + (1 - mass_patient) * A_impatient - Mpc = mass_patient * Mpc_patient + (1 - mass_patient) * Mpc_impatient - return C, A, Mpc - - -'''Steady state''' - -# DAG -blocks = [hh_patient, hh_impatient, firm, mkt_clearing, income_state_vars, asset_state_vars, aggregate] -ks_model = create_model(blocks, name="Krusell-Smith") - -# steady state -calibration = {'eis': 1, 'delta': 0.025, 'alpha': 0.3, 'rho': 0.966, 'sigma': 0.5, 'L': 1.0, - 'nS': 11, 'nA': 500, 'amax': 1000, 'beta_impatient': 0.98, 'mass_patient': 0.5} - -# this should return ss.internals['patient_hh']['household'] and ss.internals['impatient_hh']['household'] -ss = ks_model.solve_steady_state(calibration, solver='brentq', - unknowns={'beta_patient': (0.97/1.01, 0.999/1.01), 'Z': 0.5, 'K': 8.6}, - targets={'asset_mkt': 0.0, 'Y': 1.0, 'r': 0.01}, - helper_blocks=[firm_ss], helper_targets=['Y', 'r']) - -# .jacobian will have to find the internals, .impulse_linear, .impulse_nonlinear should be able to return internals -# td_nonlin = ks_model.solve_impulse_nonlinear(ss, {'Z': 0.001*0.9**np.arange(300)}, -# unknowns=['K'], targets=['asset_mkt']) diff --git a/single_endo.py b/single_endo.py deleted file mode 100644 index 13dc9e2..0000000 --- a/single_endo.py +++ /dev/null @@ -1,342 +0,0 @@ -"""Single household model with frictional labor supply.""" - -import numpy as np -from numba import guvectorize, njit - -from sequence_jacobian import simple, agrid, markov_rouwenhorst, create_model, solved -from sequence_jacobian.blocks.discont_block import discont -from sequence_jacobian.utilities.misc import choice_prob, logsum - - -'''Core HA block''' - - -def household_init(a_grid, z_grid, b_grid, atw, transfer, rpost, eis, vphi, chi): - _, income = labor_income(z_grid, b_grid, atw, transfer, 0, 1, 1, 1) - coh = income[:, :, np.newaxis] + (1 + rpost) * a_grid[np.newaxis, np.newaxis, :] - c_guess = 0.3 * coh - V, Va = np.empty_like(coh), np.empty_like(coh) - for iw in range(4): - V[iw, ...] = util(c_guess[iw, ...], iw, eis, vphi, chi) - V = V / 0.1 - - # get Va by finite difference - Va[:, :, 1:-1] = (V[:, :, 2:] - V[:, :, :-2]) / (a_grid[2:] - a_grid[:-2]) - Va[:, :, 0] = (V[:, :, 1] - V[:, :, 0]) / (a_grid[1] - a_grid[0]) - Va[:, :, -1] = (V[:, :, -1] - V[:, :, -2]) / (a_grid[-1] - a_grid[-2]) - - return Va, V - - -@njit(fastmath=True) -def util(c, iw, eis, vphi, chi): - """Utility function.""" - # 1. utility from consumption. - if eis == 1: - u = np.log(c) - else: - u = c ** (1 - 1 / eis) / (1 - 1 / eis) - - # 2. disutility from work and search - if iw == 0: - u = u - vphi # E - elif (iw == 1) | (iw == 2): - u = u - chi # Ub, U - - return u - - -@discont(exogenous=('Pi_s', 'Pi_z'), policy='a', disc_policy='P', backward=('V', 'Va'), backward_init=household_init) -def household(V_p, Va_p, Pi_z_p, Pi_s_p, choice_set, a_grid, y_grid, z_grid, b_grid, lam_grid, eis, beta, rpost, - vphi, chi): - """ - Backward step function EGM with upper envelope. - - Dimensions: 0: labor market status, 1: productivity, 2: assets. - Status: 0: E, 1: Ub, 2: U, 3: O - State: 0: M, 1: B, 2: L - - Parameters - ---------- - V_p : array(Ns, Nz, Na), status-specific value function tomorrow - Va_p : array(Ns, Nz, Na), partial of status-specific value function tomorrow - Pi_s_p : array(Ns, Nx), Markov matrix for labor market shocks - Pi_z_p : array(Nz, Nz), (non-status-specific Markov) matrix for productivity - choice_set : list(Nz), discrete choices available in each state X - a_grid : array(Na), exogenous asset grid - y_grid : array(Ns, Nz), exogenous labor income grid - z_grid : array(Nz), productivity of employed (need for GE) - b_grid : array(Nz), productivity of unemployed (need for GE) - lam_grid : array(Nx), scale of taste shocks, specific to interim state - eis : float, EIS - beta : float, discount factor - rpost : float, ex-post interest rate - vphi : float, disutility of work - chi : float, disutility of search - - Returns - ------- - V : array(Ns, Nz, Na), status-specific value function today - Va : array(Ns, Nz, Na), partial of status-specific value function today - P : array(Nx, Ns, Nz, Na), probability of choosing status s in state x - c : array(Ns, Nz, Na), status-specific consumption policy today - a : array(Ns, Nz, Na), status-specific asset policy today - ze : array(Ns, Nz, Na), effective labor (average productivity if employed) - ui : array(Ns, Nz, Na), UI benefit claims (average productivity if unemployed) - """ - # shapes - Ns, Nz, Na = V_p.shape - Nx = Pi_s_p.shape[1] - - # PART 1: update value and policy functions - # a. discrete choice I expect to make tomorrow - V_p_X = np.empty((Nx, Nz, Na)) - Va_p_X = np.empty((Nx, Nz, Na)) - for ix in range(Nx): - V_p_ix = np.take(V_p, indices=choice_set[ix], axis=0) - Va_p_ix = np.take(Va_p, indices=choice_set[ix], axis=0) - P_p_ix = choice_prob(V_p_ix, lam_grid[ix]) - V_p_X[ix, ...] = logsum(V_p_ix, lam_grid[ix]) - Va_p_X[ix, ...] = np.sum(P_p_ix * Va_p_ix, axis=0) - - # b. compute expectation wrt labor market shock - V_p1 = np.einsum('ij,jkl->ikl', Pi_s_p, V_p_X) - Va_p1 = np.einsum('ij,jkl->ikl', Pi_s_p, Va_p_X) - - # b. compute expectation wrt productivity - V_p2 = np.einsum('ij,kjl->kil', Pi_z_p, V_p1) - Va_p2 = np.einsum('ij,kjl->kil', Pi_z_p, Va_p1) - - # d. consumption today on tomorrow's grid and endogenous asset grid today - W = beta * V_p2 - uc_nextgrid = beta * Va_p2 - c_nextgrid = uc_nextgrid ** (-eis) - a_nextgrid = (c_nextgrid + a_grid[np.newaxis, np.newaxis, :] - y_grid[:, :, np.newaxis]) / (1 + rpost) - - # e. upper envelope - imin, imax = nonconcave(uc_nextgrid) # bounds of non-concave region - V, c = upper_envelope(imin, imax, W, a_nextgrid, c_nextgrid, a_grid, y_grid, rpost, eis, vphi, chi) - - # f. update Va - uc = c ** (-1 / eis) - Va = (1 + rpost) * uc - - # PART 2: things we need for GE - - # 2/a. asset policy - a = (1 + rpost) * a_grid[np.newaxis, np.newaxis, :] + y_grid[:, :, np.newaxis] - c - - # 2/b. choice probabilities (don't need jacobian) - P = np.zeros((Nx, Ns, Nz, Na)) - for ix in range(Nx): - V_ix = np.take(V, indices=choice_set[ix], axis=0) - P[ix, choice_set[ix], ...] = choice_prob(V_ix, lam_grid[ix]) - - # 2/c. average productivity of employed - ze = np.zeros_like(a) - ze[0, ...] = z_grid[:, np.newaxis] - - # 2/d. UI claims - ui = np.zeros_like(a) - ui[1, ...] = b_grid[:, np.newaxis] - - return V, Va, a, c, P, ze, ui - - -def labor_income(z_grid, b_grid, atw, transfer, expiry, fU, fN, s): - # 1. income - yE = atw * z_grid + transfer - yUb = atw * b_grid + transfer - yN = np.zeros_like(yE) + transfer - y_grid = np.vstack((yE, yUb, yN, yN)) - - # 2. transition matrix for labor market status - Pi_s = np.array([[1 - s, s, 0], [fU, (1 - fU) * (1 - expiry), (1 - fU) * expiry], [fU, 0, 1 - fU], [fN, 0, 1 - fN]]) - - return Pi_s, y_grid - - -"""Supporting functions for HA block""" - - -@guvectorize(['void(float64[:], uint32[:], uint32[:])'], '(nA) -> (),()', nopython=True) -def nonconcave(uc_nextgrid, imin, imax): - """Obtain bounds for non-concave region.""" - nA = uc_nextgrid.shape[-1] - vmin = np.inf - vmax = -np.inf - # step 1: find vmin & vmax - for ia in range(nA - 1): - if uc_nextgrid[ia + 1] > uc_nextgrid[ia]: - vmin_temp = uc_nextgrid[ia] - vmax_temp = uc_nextgrid[ia + 1] - if vmin_temp < vmin: - vmin = vmin_temp - if vmax_temp > vmax: - vmax = vmax_temp - - # 2/a Find imin (upper bound) - if vmin == np.inf: - imin_ = 0 - else: - ia = 0 - while ia < nA: - if uc_nextgrid[ia] < vmin: - break - ia += 1 - imin_ = ia - - # 2/b Find imax (lower bound) - if vmax == -np.inf: - imax_ = nA - else: - ia = nA - while ia > 0: - if uc_nextgrid[ia] > vmax: - break - ia -= 1 - imax_ = ia - - imin[:] = imin_ - imax[:] = imax_ - - -@njit -def upper_envelope(imin, imax, W, a_nextgrid, c_nextgrid, a_grid, y_grid, rpost, *args): - """ - Interpolate consumption and value function to exogenous grid. Brute force but safe. - Parameters - ---------- - W : array(Ns, Nz, Na), status-specific end-of-period value function (on tomorrow's grid) - a_nextgrid : array(Ns, Nz, Na), endogenous asset grid (today's grid) - c_nextgrid : array(Ns, Nz, Na), consumption on endogenous grid (today's grid) - a_grid : array(Na), exogenous asset grid (tomorrow's grid) - y_grid : array(Ns, Nz), labor income - rpost : float, ex-post interest rate - args : (eis, vphi, chi) arguments for utility function - Returns - ------- - V : array(Ns, Nz, Na), status-specific value function on exogenous grid - c : array(Ns, Nz, Na), consumption on exogenous grid - """ - - # 0. initialize - Ns, Nz, Na = W.shape - c = np.zeros_like(W) - V = -np.inf * np.ones_like(W) - - # outer loop could run in parallel - for iw in range(Ns): - for iz in range(Nz): - ycur = y_grid[iw, iz] - imaxcur = imax[iw, iz] - imincur = imin[iw, iz] - - # 1. unconstrained case: loop through a_grid, find bracketing endogenous gridpoints and interpolate. - # in concave region: exploit monotonicity and don't look for extra solutions - for ia in range(Na): - acur = a_grid[ia] - - # Region 1: below non-concave: exploit monotonicity - if (ia <= imaxcur) | (ia >= imincur): - iap = 0 - ap_low = a_nextgrid[iw, iz, iap] - ap_high = a_nextgrid[iw, iz, iap + 1] - while iap < Na - 2: # can this go up all the way? - if ap_high >= acur: - break - iap += 1 - ap_low = ap_high - ap_high = a_nextgrid[iw, iz, iap + 1] - # found bracket, interpolate value function and consumption - w_low, w_high = W[iw, iz, iap], W[iw, iz, iap + 1] - c_low, c_high = c_nextgrid[iw, iz, iap], c_nextgrid[iw, iz, iap + 1] - w_slope = (w_high - w_low) / (ap_high - ap_low) - c_slope = (c_high - c_low) / (ap_high - ap_low) - c_guess = c_low + c_slope * (acur - ap_low) - w_guess = w_low + w_slope * (acur - ap_low) - V[iw, iz, ia] = util(c_guess, iw, *args) + w_guess - c[iw, iz, ia] = c_guess - - # Region 2: non-concave region - else: - # try out all segments of endogenous grid - for iap in range(Na - 1): - # does this endogenous segment bracket ia? - ap_low, ap_high = a_nextgrid[iw, iz, iap], a_nextgrid[iw, iz, iap + 1] - interp = (ap_low <= acur <= ap_high) or (ap_low >= acur >= ap_high) - - # does it need to be extrapolated above the endogenous grid? - # if needed to be extrapolated below, we would be in constrained case - extrap_above = (iap == Na - 2) and (acur > a_nextgrid[iw, iz, Na - 1]) - - if interp or extrap_above: - # interpolation slopes - w_low, w_high = W[iw, iz, iap], W[iw, iz, iap + 1] - c_low, c_high = c_nextgrid[iw, iz, iap], c_nextgrid[iw, iz, iap + 1] - w_slope = (w_high - w_low) / (ap_high - ap_low) - c_slope = (c_high - c_low) / (ap_high - ap_low) - - # implied guess - c_guess = c_low + c_slope * (acur - ap_low) - w_guess = w_low + w_slope * (acur - ap_low) - - # value - v_guess = util(c_guess, iw, *args) + w_guess - - # select best value for this segment - if v_guess > V[iw, iz, ia]: - V[iw, iz, ia] = v_guess - c[iw, iz, ia] = c_guess - - # 2. constrained case: remember that we have the inverse asset policy a(a') - ia = 0 - while ia < Na and a_grid[ia] <= a_nextgrid[iw, iz, 0]: - c[iw, iz, ia] = (1 + rpost) * a_grid[ia] + ycur - V[iw, iz, ia] = util(c[iw, iz, ia], iw, *args) + W[iw, iz, 0] - ia += 1 - - return V, c - - -'''Simple blocks''' - - -@simple -def income_state_vars(mean_z, rho_z, sd_z, nZ, uirate, uicap): - # productivity - z_grid, pi_z, Pi_z = markov_rouwenhorst(rho=rho_z, sigma=sd_z, N=nZ) - z_grid *= mean_z - - # unemployment benefits - b_grid = uirate * z_grid - b_grid[b_grid > uicap] = uicap - return z_grid, b_grid, pi_z, Pi_z - - -@simple -def employment_state_vars(lamM, lamB, lamL): - choice_set = [[0, 3], [1, 3], [2, 3]] - lam_grid = np.array([lamM, lamB, lamL]) - return choice_set, lam_grid - - -@simple -def asset_state_vars(amin, amax, nA): - a_grid = agrid(amin=amin, amax=amax, n=nA) - return a_grid - - -@solved(unknowns={'fU': 0.25, 'fN': 0.1, 's': 0.025}, targets=['fU_res', 'fN_res', 's_res'], solver='broyden_custom') -def flows(Y, fU, fN, s, fU_eps, fN_eps, s_eps): - fU_res = fU.ss * (Y / Y.ss) ** fU_eps - fU - fN_res = fN.ss * (Y / Y.ss) ** fN_eps - fN - s_res = s.ss * (Y / Y.ss) ** s_eps - s - return fU_res, fN_res, s_res - - -'''Put it together''' - -household.add_hetinput(labor_income, verbose=False) -hh = create_model([income_state_vars, employment_state_vars, asset_state_vars, flows, household], name='SingleHH') -blocks = [income_state_vars, employment_state_vars, asset_state_vars, flows, household] diff --git a/src/sequence_jacobian/models/krusell_smith.py b/src/sequence_jacobian/models/krusell_smith.py index 378664f..fddd2cd 100644 --- a/src/sequence_jacobian/models/krusell_smith.py +++ b/src/sequence_jacobian/models/krusell_smith.py @@ -78,14 +78,12 @@ def asset_state_vars(amax, nA): @simple -def firm_steady_state_solution(r, delta, alpha): +def firm_steady_state_solution(r, Y, L, delta, alpha): rk = r + delta - Z = (rk / alpha) ** alpha # normalize so that Y=1 - K = (alpha * Z / rk) ** (1 / (1 - alpha)) - Y = Z * K ** alpha - w = (1 - alpha) * Z * (alpha * Z / rk) ** (alpha / (1 - alpha)) - - return Z, K, Y, w + w = (1 - alpha) * Y / L + K = alpha * Y / rk + Z = Y / K ** alpha / L ** (1 - alpha) + return w, K, Z '''Part 3: Steady state''' @@ -127,3 +125,13 @@ def res(beta_loc): 'rho': rho, 'nS': nS, 'asset_mkt': ss['A'] - K}) return ss + + +'''Part 4: Permanent beta heterogeneity''' + + +@simple +def aggregate(A_patient, A_impatient, C_patient, C_impatient, mass_patient): + C = mass_patient * C_patient + (1 - mass_patient) * C_impatient + A = mass_patient * A_patient + (1 - mass_patient) * A_impatient + return C, A diff --git a/tests/base/test_steady_state.py b/tests/base/test_steady_state.py index 0a4c372..2a412e1 100644 --- a/tests/base/test_steady_state.py +++ b/tests/base/test_steady_state.py @@ -35,3 +35,9 @@ def test_two_asset_steady_state(two_asset_hank_dag): assert set(ss.keys()) == set(ss_ref.keys()) for k in ss.keys(): assert np.all(np.isclose(ss[k], ss_ref[k])) + + +def test_remap_steady_state(ks_remapped_dag): + _, _, _, _, ss = ks_remapped_dag + assert ss['beta_impatient'] < ss['beta_patient'] + assert ss['A_impatient'] < ss['A_patient'] diff --git a/tests/conftest.py b/tests/conftest.py index ab85a96..274d7d4 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -98,3 +98,30 @@ def two_asset_hank_dag(): targets = ["asset_mkt", "fisher", "wnkpc"] return two_asset_model, exogenous, unknowns, targets, ss + + +@pytest.fixture(scope='session') +def ks_remapped_dag(): + # Create 2 versions of the household block using `remap` + to_map = ['beta', *krusell_smith.household.outputs] + hh_patient = krusell_smith.household.remap({k: k + '_patient' for k in to_map}).rename('hh_patient') + hh_impatient = krusell_smith.household.remap({k: k + '_impatient' for k in to_map}).rename('hh_impatient') + blocks = [hh_patient, hh_impatient, krusell_smith.firm, krusell_smith.mkt_clearing, krusell_smith.income_state_vars, + krusell_smith.asset_state_vars, krusell_smith.aggregate] + ks_remapped = create_model(blocks, name='Krusell-Smith') + + # Steady State + calibration = {'eis': 1., 'delta': 0.025, 'alpha': 0.3, 'rho': 0.966, 'sigma': 0.5, 'L': 1.0, + 'nS': 3, 'nA': 100, 'amax': 1000, 'beta_impatient': 0.985, 'mass_patient': 0.5} + unknowns_ss = {'beta_patient': (0.98 / 1.01, 0.999 / 1.01), 'Z': 0.5, 'K': 8.} + targets_ss = {'asset_mkt': 0., 'Y': 1., 'r': 0.01} + helper_blocks = [krusell_smith.firm_steady_state_solution] + ss = ks_remapped.solve_steady_state(calibration, unknowns_ss, targets_ss, solver='brentq', + helper_blocks=helper_blocks, helper_targets=['Y', 'r']) + + # Transitional Dynamics/Jacobian Calculation + exogenous = ["Z"] + unknowns = ["K"] + targets = ["asset_mkt"] + + return ks_remapped, exogenous, unknowns, targets, ss