diff --git a/Project.toml b/Project.toml index 06e8ec4..ee367d4 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "WildBootTests" uuid = "65c2e505-86ba-4c19-93f1-95506c1443d5" authors = ["droodman "] -version = "0.7.10" +version = "0.7.11" [deps] Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" diff --git a/README.md b/README.md index 39980ec..963e1c7 100644 --- a/README.md +++ b/README.md @@ -41,7 +41,7 @@ test <- WildBootTests$wildboottest(R, r, resp=df$y, predexog=cbind(1, df$x), clu test WildBootTests$teststat(test) WildBootTests$p(test) -WildBootTests$CI(test) +WildBootTests$ci(test) plotpoints <- WildBootTests$plotpoints(test) plot(plotpoints$X[[1]], plotpoints$p, type="l") ``` @@ -61,7 +61,7 @@ clustid = df.firm.values test = wbt.wildboottest(R, r, resp=resp, predexog=predexog, clustid=clustid) wbt.teststat(test) wbt.p(test) -wbt.CI(test) +wbt.ci(test) plotpoints = wbt.plotpoints(test) plt.plot(plotpoints.X[0], plotpoints.p) ``` @@ -81,6 +81,6 @@ clustid = np.asarray(Data.get('firm')) test = wbt.wildboottest(R, r, resp=resp, predexog=predexog, clustid=clustid) wbt.p(test) wbt.teststat(test) -wbt.CI(test) +wbt.ci(test) end ``` diff --git a/docs/build/exported/index.html b/docs/build/exported/index.html index f308873..e666d69 100644 --- a/docs/build/exported/index.html +++ b/docs/build/exported/index.html @@ -1,2 +1,2 @@ -Public functions and types · WildBootTests.jl
WildBootTests.wildboottestFunction

wildboottest([T::DataType=Float64,] R::AbstractMatrix, r::AbstractVector; resp, <optional keyword arguments>) -> WildBootTests.BootTestResult

Function to perform wild-bootstrap-based hypothesis test

Positional arguments

  • T::DataType: data type for inputs, results, and computations: Float32 or Float64 (default)
  • R::AbstractMatrix and r::AbstractVector: required matrix and vector expressing the null Rβ=r; see notes below

Required keyword argument

  • resp::AbstractVector: response/dependent variable (y or y₁ in Roodman et al. (2019))

Optional keyword arguments

  • predexog::AbstractVecOrMat: exogenous predictors, including constant term, if any (X/X₁)
  • predendog::AbstractVecOrMat: endogenous predictors (Y₂)
  • inst::AbstractVecOrMat: instruments (X₂)
  • R1::AbstractMatrix and r1::AbstractVector: model constraints; same format as for R and r
  • clustid::AbstractVecOrMat{<:Integer}: data vector/matrix of error and bootstrapping cluster identifiers; see Notes
  • nbootclustvar::Integer=1: number of bootstrap-clustering variables
  • nerrclustvar::Integer=nbootclustvar: number of error-clustering variables
  • issorted:Bool=false: time-saving flag: data matrices are already sort by column types 2, then 3, then 1 (see notes)
  • hetrobust::Bool=true: true unless errors are treated as iid
  • nfe::Integer=0: number of fixed-effect groups; if 0 yet feid is provided, will be computed
  • feid::AbstractVector{<:Integer}: data vector for one-way fixed effect group identifier
  • fedfadj::Integer=nfe: degrees of freedom that fixed effects (if any) consume
  • obswt::AbstractVector=[]: observation weight vector; default is equal weighting
  • fweights::Bool=false: true for frequency weights
  • maxmatsize::Number: maximum size of auxilliary weight matrix (v), in gigabytes
  • ptype::Symbol=:symmetric: p value type (:symmetric, :equaltail, :lower, :upper)
  • bootstrapc::Bool=false: true to request bootstrap-c instead of bootstrap-t
  • LIML::Bool=false: true for LIML or Fuller LIML
  • Fuller::Number: Fuller LIML factor
  • kappa::Number: fixed κ for k-class estimation
  • ARubin::Bool=false: true for Anderson-Rubin test
  • small::Bool=true: true to multiply test statistics by G/(G-1) × N/(N-k), where G, N, k are number of clusters, observations, and predictors
  • clusteradj::Bool=true: false to drop G/(G-1) factor
  • clustermin::Bool=false`: for multiway clustering, true to base G/(G-1) factor for all clusterings ]on the smallest G across clusterings
  • scorebs::Bool=false: true for score bootstrap instead of wild bootstrap
  • reps::Integer=999: number of bootstrap replications; reps = 0 requests classical Rao (or Wald) test if imposenull = true (or false)
  • imposenull::Bool=true: true to impose null
  • auxwttype::Symbol=:rademacher: auxilliary weight type (:rademacher, :mammen, :webb, :normal, :gamma)
  • rng::AbstractRNG=MersenneTwister(): randon number generator
  • level::Number=.95: significance level (0-1)
  • rtol::Number=1e-6: tolerance for CI bound determination
  • madjtype::Symbol=:none: multiple hypothesis adjustment (none, :bonferroni, :sidak)
  • NH0::Integer=1: number of hypotheses tested, including one being tested now
  • ML::Bool=false: true for (nonlinear) ML estimation
  • scores::AbstractVecOrMat: for ML, pre-computed scores
  • beta::AbstractVector: for ML, parameter estimates
  • A::AbstractMatrix: for ML, covariance estimates
  • gridmin: vector of graph lower bounds; max length 2, missing/NaN entries ask wildboottest() to choose
  • gridmax: vector of graph upper bounds; missing/NaN entries ask wildboottest() to choose
  • gridpoints: vector of number of sampling points; missing/NaN entries ask wildboottest() to choose
  • diststat::Symbole=:none: :t to save bootstrap distribution of t/z/F/χ² statistics; :numer to save numerators thereof
  • getCI::Bool=true: whether to return CI
  • getplot::Bool=getCI: whether to generate plot data
  • getauxweights::Bool=false: whether to save auxilliary weight matrix (v)

Notes

T, ptype, auxwttype, madjtype, and diststat may also be strings. Examples: "Float32" and "webb".

The columns of R in the statement of the null should correspond to those of the matrix [predexog predendog], where predendog is non-empty only in regressions with instruments.

Order the columns of clustid this way:

  1. Variables only used to define bootstrapping clusters, as in the subcluster bootstrap.
  2. Variables used to define both bootstrapping and error clusters.
  3. Variables only used to define error clusters.

nbootclustvar is then the number of columns of type 1 or 2; nerrclustvar is the number of columns of type 2 or 3. Typically clustid is a single column of type 2.

wildboottest() does not handle missing data values: all data and identifier matrices must be restricted to the estimation sample.

WildBootTests.padjFunction

Return p value after multiple-hypothesis adjustment, if any

WildBootTests.repsfeasFunction

Return actual number of replications, subject to enumeration of Rademacher draws

WildBootTests.plotpointsFunction

Return data for confidence plot of test. Return value is a 2-tuple with named entries X and p holding the confidence sampling locations and p values respectively. X is in turn a 1- or 2-tuple of vectors of sampling coordinates for each dimension of the tested hypothesis.

WildBootTests.CIFunction

Return confidence interval matrix from test, one row per disjoint piece

WildBootTests.distFunction

Return bootstrap distribution of statistic or statistic numerator in bootstrap test

+Public functions and types · WildBootTests.jl
WildBootTests.wildboottestFunction

wildboottest([T::DataType=Float64,] R::AbstractMatrix, r::AbstractVector; resp, <optional keyword arguments>) -> WildBootTests.BootTestResult

Function to perform wild-bootstrap-based hypothesis test

Positional arguments

  • T::DataType: data type for inputs, results, and computations: Float32 or Float64 (default)
  • R::AbstractMatrix and r::AbstractVector: required matrix and vector expressing the null Rβ=r; see notes below

Required keyword argument

  • resp::AbstractVector: response/dependent variable (y or y₁ in Roodman et al. (2019))

Optional keyword arguments

  • predexog::AbstractVecOrMat: exogenous predictors, including constant term, if any (X/X₁)
  • predendog::AbstractVecOrMat: endogenous predictors (Y₂)
  • inst::AbstractVecOrMat: instruments (X₂)
  • R1::AbstractMatrix and r1::AbstractVector: model constraints; same format as for R and r
  • clustid::AbstractVecOrMat{<:Integer}: data vector/matrix of error and bootstrapping cluster identifiers; see Notes
  • nbootclustvar::Integer=1: number of bootstrap-clustering variables
  • nerrclustvar::Integer=nbootclustvar: number of error-clustering variables
  • issorted:Bool=false: time-saving flag: data matrices are already sort by column types 2, then 3, then 1 (see notes)
  • hetrobust::Bool=true: true unless errors are treated as iid
  • nfe::Integer=0: number of fixed-effect groups; if 0 yet feid is provided, will be computed
  • feid::AbstractVector{<:Integer}: data vector for one-way fixed effect group identifier
  • fedfadj::Integer=nfe: degrees of freedom that fixed effects (if any) consume
  • obswt::AbstractVector=[]: observation weight vector; default is equal weighting
  • fweights::Bool=false: true for frequency weights
  • maxmatsize::Number: maximum size of auxilliary weight matrix (v), in gigabytes
  • ptype::Symbol=:symmetric: p value type (:symmetric, :equaltail, :lower, :upper)
  • bootstrapc::Bool=false: true to request bootstrap-c instead of bootstrap-t
  • liml::Bool=false: true for liml or Fuller liml
  • Fuller::Number: Fuller liml factor
  • kappa::Number: fixed κ for k-class estimation
  • arubin::Bool=false: true for Anderson-Rubin test
  • small::Bool=true: true to multiply test statistics by G/(G-1) × N/(N-k), where G, N, k are number of clusters, observations, and predictors
  • clusteradj::Bool=true: false to drop G/(G-1) factor
  • clustermin::Bool=false`: for multiway clustering, true to base G/(G-1) factor for all clusterings ]on the smallest G across clusterings
  • scorebs::Bool=false: true for score bootstrap instead of wild bootstrap
  • reps::Integer=999: number of bootstrap replications; reps = 0 requests classical Rao (or Wald) test if imposenull = true (or false)
  • imposenull::Bool=true: true to impose null
  • auxwttype::Symbol=:rademacher: auxilliary weight type (:rademacher, :mammen, :webb, :normal, :gamma)
  • rng::AbstractRNG=MersenneTwister(): randon number generator
  • level::Number=.95: significance level (0-1)
  • rtol::Number=1e-6: tolerance for ci bound determination
  • madjtype::Symbol=:none: multiple hypothesis adjustment (none, :bonferroni, :sidak)
  • nH0::Integer=1: number of hypotheses tested, including one being tested now
  • ml::Bool=false: true for (nonlinear) ML estimation
  • scores::AbstractVecOrMat: for ML, pre-computed scores
  • beta::AbstractVector: for ML, parameter estimates
  • A::AbstractMatrix: for ML, covariance estimates
  • gridmin: vector of graph lower bounds; max length 2, missing/NaN entries ask wildboottest() to choose
  • gridmax: vector of graph upper bounds; missing/NaN entries ask wildboottest() to choose
  • gridpoints: vector of number of sampling points; missing/NaN entries ask wildboottest() to choose
  • diststat::Symbole=:none: :t to save bootstrap distribution of t/z/F/χ² statistics; :numer to save numerators thereof
  • getci::Bool=true: whether to return confidence interval
  • getplot::Bool=getci: whether to generate plot data
  • getauxweights::Bool=false: whether to save auxilliary weight matrix (v)

Notes

T, ptype, auxwttype, madjtype, and diststat may also be strings. Examples: "Float32" and "webb".

The columns of R in the statement of the null should correspond to those of the matrix [predexog predendog], where predendog is non-empty only in regressions with instruments.

Order the columns of clustid this way:

  1. Variables only used to define bootstrapping clusters, as in the subcluster bootstrap.
  2. Variables used to define both bootstrapping and error clusters.
  3. Variables only used to define error clusters.

nbootclustvar is then the number of columns of type 1 or 2; nerrclustvar is the number of columns of type 2 or 3. Typically clustid is a single column of type 2.

wildboottest() does not handle missing data values: all data and identifier matrices must be restricted to the estimation sample.

WildBootTests.padjFunction

Return p value after multiple-hypothesis adjustment, if any

WildBootTests.repsfeasFunction

Return actual number of replications, subject to enumeration of Rademacher draws

WildBootTests.plotpointsFunction

Return data for confidence plot of test. Return value is a 2-tuple with named entries X and p holding the confidence sampling locations and p values respectively. X is in turn a 1- or 2-tuple of vectors of sampling coordinates for each dimension of the tested hypothesis.

WildBootTests.ciFunction

Return confidence interval matrix from test, one row per disjoint piece

WildBootTests.distFunction

Return bootstrap distribution of statistic or statistic numerator in bootstrap test

diff --git a/docs/build/index.html b/docs/build/index.html index ef2667b..f687d11 100644 --- a/docs/build/index.html +++ b/docs/build/index.html @@ -1,2 +1,2 @@ -Overview · WildBootTests.jl

WildBootTests.jl performs wild bootstrap-based hypothesis tests at extreme speed. It is intended mainly for linear models: ordinary least squares (OLS) and instrumental variables/two-stage least squares (IV/2SLS). For an introduction to the wild bootstrap and the algorithms deployed here, see Roodman et al. (2019).

The package offers and/or supports:

  • The wild bootstrap for OLS (Wu 1986).
  • The Wild Restricted Efficient bootstrap (WRE) for IV/2SLS/LIML (Davidson and MacKinnon 2010).
  • The subcluster bootstrap (MacKinnon and Webb 2018).
  • Non-bootstrapped Wald, Rao, and Anderson-Rubin tests, optionally with multiway clustering.
  • Confidence intervals formed by inverting the test and iteratively searching for bounds.
  • Multiway clustering.
  • Arbitrary and multiple linear hypotheses in the parameters.
  • Maintained linear constraints on the model (restricted OLS, IV/2SLS/LIML).
  • One-way fixed effects.
  • Generation of data for plotting of confidence curves or surfaces after one- or two-dimensional hypothesis tests.

WildBootTests.jl incorporates order-of-magnitude algorithmic speed-ups developed since Roodman et al. (2019) for OLS and IV/2SLS. And it exploits the efficiency of Julia, for example by offering single-precision (Float32) computation.

The interface is low-level: the exported function wildboottest() accepts scalars, vectors, and matrices, not DataFrames or results from estimation functions such as lm(). This design minimizes the package's dependency footprint while making the core functionality available to multiple programming environments, including Julia, R (through JuliaConnectoR), and Python (through PyJulia). A separate package will provide a higher-level Julia interface.

wildboottest() accepts many optional arguments. Most correspond to options of the Stata package boottest, which are documented in Roodman et al. (2019), §7. Julia-specific additions include an optional first argument T, which can be Float32 or Float64 to specify the precision of computation; and rng, which takes a random number generator such as MersenneTwister(2302394).

On latency

The first time you run wildboottest() in a session, Julia's just-in-time compilation will take ~10 seconds. The same will happen the first time you switch between Float32 and Float64 calculations, or between OLS and IV/2SLS estimation.

+Overview · WildBootTests.jl

WildBootTests.jl performs wild bootstrap-based hypothesis tests at extreme speed. It is intended mainly for linear models: ordinary least squares (OLS) and instrumental variables/two-stage least squares (IV/2SLS). For an introduction to the wild bootstrap and the algorithms deployed here, see Roodman et al. (2019).

The package offers and/or supports:

  • The wild bootstrap for OLS (Wu 1986).
  • The Wild Restricted Efficient bootstrap (WRE) for IV/2SLS/LIML (Davidson and MacKinnon 2010).
  • The subcluster bootstrap (MacKinnon and Webb 2018).
  • Non-bootstrapped Wald, Rao, and Anderson-Rubin tests, optionally with multiway clustering.
  • Confidence intervals formed by inverting the test and iteratively searching for bounds.
  • Multiway clustering.
  • Arbitrary and multiple linear hypotheses in the parameters.
  • Maintained linear constraints on the model (restricted OLS, IV/2SLS/LIML).
  • One-way fixed effects.
  • Generation of data for plotting of confidence curves or surfaces after one- or two-dimensional hypothesis tests.

WildBootTests.jl incorporates order-of-magnitude algorithmic speed-ups developed since Roodman et al. (2019) for OLS and IV/2SLS. And it exploits the efficiency of Julia, for example by offering single-precision (Float32) computation.

The interface is low-level: the exported function wildboottest() accepts scalars, vectors, and matrices, not DataFrames or results from estimation functions such as lm(). This design minimizes the package's dependency footprint while making the core functionality available to multiple programming environments, including Julia, R (through JuliaConnectoR), and Python (through PyJulia). A separate package will provide a higher-level Julia interface.

wildboottest() accepts many optional arguments. Most correspond to options of the Stata package boottest, which are documented in Roodman et al. (2019), §7. Julia-specific additions include an optional first argument T, which can be Float32 or Float64 to specify the precision of computation; and rng, which takes a random number generator such as MersenneTwister(2302394).

On latency

The first time you run wildboottest() in a session, Julia's just-in-time compilation will take ~10 seconds. The same will happen the first time you switch between Float32 and Float64 calculations, or between OLS and IV/2SLS estimation.

diff --git a/docs/build/search/index.html b/docs/build/search/index.html index 29057ef..8d7053b 100644 --- a/docs/build/search/index.html +++ b/docs/build/search/index.html @@ -1,2 +1,2 @@ -Search · WildBootTests.jl

Loading search...

    +Search · WildBootTests.jl

    Loading search...

      diff --git a/docs/build/search_index.js b/docs/build/search_index.js index 44301bb..48fb8f7 100644 --- a/docs/build/search_index.js +++ b/docs/build/search_index.js @@ -1,3 +1,3 @@ var documenterSearchIndex = {"docs": -[{"location":"exported/","page":"Public functions and types","title":"Public functions and types","text":"wildboottest\r\nteststat\r\nstattype\r\np\r\npadj\r\nreps\r\nrepsfeas\r\nnbootclust\r\ndof\r\ndof_r\r\nplotpoints\r\npeak\r\nCI\r\ndist\r\nstatnumer\r\nstatvar\r\nauxweights","category":"page"},{"location":"exported/#WildBootTests.wildboottest","page":"Public functions and types","title":"WildBootTests.wildboottest","text":"wildboottest([T::DataType=Float64,] R::AbstractMatrix, r::AbstractVector; resp, ) -> WildBootTests.BootTestResult\n\nFunction to perform wild-bootstrap-based hypothesis test\n\nPositional arguments\n\nT::DataType: data type for inputs, results, and computations: Float32 or Float64 (default)\nR::AbstractMatrix and r::AbstractVector: required matrix and vector expressing the null Rβ=r; see notes below\n\nRequired keyword argument\n\nresp::AbstractVector: response/dependent variable (y or y₁ in Roodman et al. (2019))\n\nOptional keyword arguments\n\npredexog::AbstractVecOrMat: exogenous predictors, including constant term, if any (X/X₁)\npredendog::AbstractVecOrMat: endogenous predictors (Y₂)\ninst::AbstractVecOrMat: instruments (X₂)\nR1::AbstractMatrix and r1::AbstractVector: model constraints; same format as for R and r\nclustid::AbstractVecOrMat{<:Integer}: data vector/matrix of error and bootstrapping cluster identifiers; see Notes \nnbootclustvar::Integer=1: number of bootstrap-clustering variables\nnerrclustvar::Integer=nbootclustvar: number of error-clustering variables\nissorted:Bool=false: time-saving flag: data matrices are already sort by column types 2, then 3, then 1 (see notes)\nhetrobust::Bool=true: true unless errors are treated as iid\nnfe::Integer=0: number of fixed-effect groups; if 0 yet feid is provided, will be computed\nfeid::AbstractVector{<:Integer}: data vector for one-way fixed effect group identifier\nfedfadj::Integer=nfe: degrees of freedom that fixed effects (if any) consume\nobswt::AbstractVector=[]: observation weight vector; default is equal weighting\nfweights::Bool=false: true for frequency weights\nmaxmatsize::Number: maximum size of auxilliary weight matrix (v), in gigabytes\nptype::Symbol=:symmetric: p value type (:symmetric, :equaltail, :lower, :upper)\nbootstrapc::Bool=false: true to request bootstrap-c instead of bootstrap-t\nLIML::Bool=false: true for LIML or Fuller LIML\nFuller::Number: Fuller LIML factor\nkappa::Number: fixed κ for k-class estimation\nARubin::Bool=false: true for Anderson-Rubin test\nsmall::Bool=true: true to multiply test statistics by G/(G-1) × N/(N-k), where G, N, k are number of clusters, observations, and predictors\nclusteradj::Bool=true: false to drop G/(G-1) factor\nclustermin::Bool=false`: for multiway clustering, true to base G/(G-1) factor for all clusterings ]on the smallest G across clusterings\nscorebs::Bool=false: true for score bootstrap instead of wild bootstrap\nreps::Integer=999: number of bootstrap replications; reps = 0 requests classical Rao (or Wald) test if imposenull = true (or false)\nimposenull::Bool=true: true to impose null\nauxwttype::Symbol=:rademacher: auxilliary weight type (:rademacher, :mammen, :webb, :normal, :gamma)\nrng::AbstractRNG=MersenneTwister(): randon number generator\nlevel::Number=.95: significance level (0-1)\nrtol::Number=1e-6: tolerance for CI bound determination\nmadjtype::Symbol=:none: multiple hypothesis adjustment (none, :bonferroni, :sidak)\nNH0::Integer=1: number of hypotheses tested, including one being tested now\nML::Bool=false: true for (nonlinear) ML estimation\nscores::AbstractVecOrMat: for ML, pre-computed scores\nbeta::AbstractVector: for ML, parameter estimates\nA::AbstractMatrix: for ML, covariance estimates\ngridmin: vector of graph lower bounds; max length 2, missing/NaN entries ask wildboottest() to choose\ngridmax: vector of graph upper bounds; missing/NaN entries ask wildboottest() to choose\ngridpoints: vector of number of sampling points; missing/NaN entries ask wildboottest() to choose\ndiststat::Symbole=:none: :t to save bootstrap distribution of t/z/F/χ² statistics; :numer to save numerators thereof\ngetCI::Bool=true: whether to return CI\ngetplot::Bool=getCI: whether to generate plot data\ngetauxweights::Bool=false: whether to save auxilliary weight matrix (v)\n\nNotes\n\nT, ptype, auxwttype, madjtype, and diststat may also be strings. Examples: \"Float32\" and \"webb\".\n\nThe columns of R in the statement of the null should correspond to those of the matrix [predexog predendog], where predendog is non-empty only in regressions with instruments. \n\nOrder the columns of clustid this way:\n\nVariables only used to define bootstrapping clusters, as in the subcluster bootstrap.\nVariables used to define both bootstrapping and error clusters.\nVariables only used to define error clusters.\n\nnbootclustvar is then the number of columns of type 1 or 2; nerrclustvar is the number of columns of type 2 or 3. Typically clustid is a single column of type 2. \n\nwildboottest() does not handle missing data values: all data and identifier matrices must be restricted to the estimation sample.\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.teststat","page":"Public functions and types","title":"WildBootTests.teststat","text":"Return test statistic\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.stattype","page":"Public functions and types","title":"WildBootTests.stattype","text":"Return type of test statistic: \"t\", \"z\", \"F\", or \"χ²\" \n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.p","page":"Public functions and types","title":"WildBootTests.p","text":"Return p value\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.padj","page":"Public functions and types","title":"WildBootTests.padj","text":"Return p value after multiple-hypothesis adjustment, if any\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.reps","page":"Public functions and types","title":"WildBootTests.reps","text":"Return requested number of replications\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.repsfeas","page":"Public functions and types","title":"WildBootTests.repsfeas","text":"Return actual number of replications, subject to enumeration of Rademacher draws\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.nbootclust","page":"Public functions and types","title":"WildBootTests.nbootclust","text":"Return number of bootstrapping clusters in test\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.dof","page":"Public functions and types","title":"WildBootTests.dof","text":"Return degrees of freedom of test\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.dof_r","page":"Public functions and types","title":"WildBootTests.dof_r","text":"Return residual degrees of freedom of test\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.plotpoints","page":"Public functions and types","title":"WildBootTests.plotpoints","text":"Return data for confidence plot of test. Return value is a 2-tuple with named entries X and p holding the confidence sampling locations and p values respectively. X is in turn a 1- or 2-tuple of vectors of sampling coordinates for each dimension of the tested hypothesis.\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.peak","page":"Public functions and types","title":"WildBootTests.peak","text":"Return parameter value with peak p value in test\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.CI","page":"Public functions and types","title":"WildBootTests.CI","text":"Return confidence interval matrix from test, one row per disjoint piece\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.dist","page":"Public functions and types","title":"WildBootTests.dist","text":"Return bootstrap distribution of statistic or statistic numerator in bootstrap test\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.statnumer","page":"Public functions and types","title":"WildBootTests.statnumer","text":"Return numerator of test statistic\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.statvar","page":"Public functions and types","title":"WildBootTests.statvar","text":"Return denominator of test statistic\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.auxweights","page":"Public functions and types","title":"WildBootTests.auxweights","text":"Return auxilliary weight matrix for wild bootstrap\n\n\n\n\n\n","category":"function"},{"location":"IVexamples/","page":"IV/2SLS examples","title":"IV/2SLS examples","text":"using WildBootTests, CSV, DataFrames, StatsModels, GLM, Plots\r\n\r\n# specify exactly identified model: regress wage on on tenure, instrumented by union,\r\n# controlling for ttl_exp and collgrad\r\nd = download(\"http://www.stata-press.com/data/r8/nlsw88.dta\", tempname() * \".dta\")\r\ndf = DataFrame(load(d))[:, [:wage; :tenure; :ttl_exp; :collgrad; :industry; :union]]\r\ndropmissing!(df)\r\nf = @formula(wage ~ 1 + ttl_exp + collgrad)\r\nf = apply_schema(f, schema(f, df))\r\nresp, predexog = modelcols(f, df)\r\nivf = @formula(tenure ~ union)\r\nivf = apply_schema(ivf, schema(ivf, df))\r\npredendog, inst = modelcols(ivf, df)\r\n\r\n# test that coefficient on tenure = 0, clustering errors by industry\r\nR = [0 0 0 1]; r = [0]\r\nwildboottest(R, r; resp, predexog, predendog, inst, clustid=df.industry)\r\n\r\n# use equal-tailed instead of symmetric p value\r\nwildboottest(R, r; resp, predexog, predendog, inst, clustid=df.industry, ptype=:equaltail)\r\n\r\n# perform bootstrap-c instead of bootstrap-t, as advocated by Young (2019)\r\nwildboottest(R, r; resp, predexog, predendog, inst, clustid=df.industry, bootstrapc=true)\r\n\r\n# Rao/score test without bootstrap\r\nwildboottest(R, r; resp, predexog, predendog, inst, clustid=df.industry, reps=0)\r\n\r\n# Wald test without bootstrap\r\nwildboottest(R, r; resp, predexog, predendog, inst, clustid=df.industry, reps=0, imposenull=false)\r\n\r\n# Anderson-Rubin test that hypothesis holds and instrument is valid\r\nwildboottest(R, r; resp, predexog, predendog, inst, clustid=df.industry, ARubin=true)\r\n\r\n# modify model to drop controls and make ttl_exp an instrument\r\nf = @formula(wage ~ 1)\r\nf = apply_schema(f, schema(f, df))\r\nresp, predexog = modelcols(f, df)\r\nivf = @formula(tenure ~ collgrad + ttl_exp)\r\nivf = apply_schema(ivf, schema(ivf, df))\r\npredendog, inst = modelcols(ivf, df)\r\n\r\n# test same hypothesis in context of LIML regression\r\nR = [0 1]; r = [0]\r\nwildboottest(R, r; resp, predexog, predendog, inst, LIML=true, clustid=df.industry)","category":"page"},{"location":"","page":"Overview","title":"Overview","text":"WildBootTests.jl performs wild bootstrap-based hypothesis tests at extreme speed. It is intended mainly for linear models: ordinary least squares (OLS) and instrumental variables/two-stage least squares (IV/2SLS). For an introduction to the wild bootstrap and the algorithms deployed here, see Roodman et al. (2019).","category":"page"},{"location":"","page":"Overview","title":"Overview","text":"The package offers and/or supports:","category":"page"},{"location":"","page":"Overview","title":"Overview","text":"The wild bootstrap for OLS (Wu 1986).\nThe Wild Restricted Efficient bootstrap (WRE) for IV/2SLS/LIML (Davidson and MacKinnon 2010).\nThe subcluster bootstrap (MacKinnon and Webb 2018).\nNon-bootstrapped Wald, Rao, and Anderson-Rubin tests, optionally with multiway clustering.\nConfidence intervals formed by inverting the test and iteratively searching for bounds.\nMultiway clustering.\nArbitrary and multiple linear hypotheses in the parameters.\nMaintained linear constraints on the model (restricted OLS, IV/2SLS/LIML).\nOne-way fixed effects.\nGeneration of data for plotting of confidence curves or surfaces after one- or two-dimensional hypothesis tests.","category":"page"},{"location":"","page":"Overview","title":"Overview","text":"WildBootTests.jl incorporates order-of-magnitude algorithmic speed-ups developed since Roodman et al. (2019) for OLS and IV/2SLS. And it exploits the efficiency of Julia, for example by offering single-precision (Float32) computation.","category":"page"},{"location":"","page":"Overview","title":"Overview","text":"The interface is low-level: the exported function wildboottest() accepts scalars, vectors, and matrices, not DataFrames or results from estimation functions such as lm(). This design minimizes the package's dependency footprint while making the core functionality available to multiple programming environments, including Julia, R (through JuliaConnectoR), and Python (through PyJulia). A separate package will provide a higher-level Julia interface.","category":"page"},{"location":"","page":"Overview","title":"Overview","text":"wildboottest() accepts many optional arguments. Most correspond to options of the Stata package boottest, which are documented in Roodman et al. (2019), §7. Julia-specific additions include an optional first argument T, which can be Float32 or Float64 to specify the precision of computation; and rng, which takes a random number generator such as MersenneTwister(2302394).","category":"page"},{"location":"#On-latency","page":"Overview","title":"On latency","text":"","category":"section"},{"location":"","page":"Overview","title":"Overview","text":"The first time you run wildboottest() in a session, Julia's just-in-time compilation will take ~10 seconds. The same will happen the first time you switch between Float32 and Float64 calculations, or between OLS and IV/2SLS estimation.","category":"page"},{"location":"OLSexamples/#Basic-OLS-example","page":"OLS examples","title":"Basic OLS example","text":"","category":"section"},{"location":"OLSexamples/","page":"OLS examples","title":"OLS examples","text":"julia> using WildBootTests, CSV, DataFrames, StatsModels, GLM, Plots\r\n\r\njulia> d = download(\"https://raw.github.com/vincentarelbundock/Rdatasets/master/csv/sandwich/PetersenCL.csv\");\r\n\r\njulia> df = CSV.read(d, DataFrame);\r\n\r\njulia> f = @formula(y ~ 1 + x); # state OLS model\r\n\r\njulia> f = apply_schema(f, schema(f, df)); # link model to data\r\n\r\njulia> lm(f, df) # run OLS for illustration; not needed for following lines\r\nStatsModels.TableRegressionModel{LinearModel{GLM.LmResp{Vector{Float64}}, GLM.DensePredChol{Float64, LinearAlgebra.CholeskyPivoted{Float64, Matrix{Float64}}}}, Matrix{Float64}}\r\n\r\ny ~ 1 + x\r\n\r\nCoefficients:\r\n─────────────────────────────────────────────────────────────────────────\r\n Coef. Std. Error t Pr(>|t|) Lower 95% Upper 95%\r\n─────────────────────────────────────────────────────────────────────────\r\n(Intercept) 0.0296797 0.0283593 1.05 0.2954 -0.025917 0.0852764\r\nx 1.03483 0.0285833 36.20 <1e-99 0.978798 1.09087\r\n─────────────────────────────────────────────────────────────────────────\r\n\r\njulia> resp, predexog = modelcols(f, df); # extract response & (exogenous) predictor variables\r\n\r\njulia> clustid = df.firm; # extract clustering variable\r\n\r\njulia> R = [0 1]; r = [1]; # put null that coefficient on x = 1 in Rβ̂ = r form, where β̂ is parameter vector\r\n\r\njulia> test = wildboottest(R, r; resp=resp, predexog=predexog, clustid=clustid)\r\nWildBootTests.BootTestResult{Float32}\r\n\r\np = 0.492\r\nCI = Float32[0.93461335 1.1347668]\r\n\r\njulia> test = wildboottest(R, r; resp, predexog, clustid); # same, using Julia syntactic sugar\r\n\r\njulia> p(test) # programmatically extract p value\r\n0.49459493f0\r\n\r\njulia> CI(test) # programmatically extract confidence interval\r\n1×2 Matrix{Float32}:\r\n 0.934961 1.13469\r\n\r\njulia> plot(plotpoints(test)...) # plot confidence curve","category":"page"},{"location":"OLSexamples/#Further-examples","page":"OLS examples","title":"Further examples","text":"","category":"section"},{"location":"OLSexamples/","page":"OLS examples","title":"OLS examples","text":"using WildBootTests, CSV, DataFrames, StatsModels, GLM, Plots\r\n\r\n# use Webb instead of Rademacher weights, 99,999 bootstrap replications instead of 999\r\nwildboottest(R, r; resp, predexog, clustid, reps=99999, auxwttype=:webb)\r\n\r\n# bootstrap in double-precision (Float64) instead of single (Float32)\r\n# slow on first use because of recompile\r\nwildboottest(Float64, R, r; resp, predexog, clustid)\r\n\r\n# use guaranteed-stable random number generator for exact replicability\r\nusing StableRNGs\r\nwildboottest(R, r; resp, predexog, clustid, rng=StableRNG(23948572))\r\n\r\n# test that coefficient on intercept = 0 and coefficient on x = 1; plot confidence surface\r\ntest = wildboottest([1 0; 0 1], [0;1]; resp, predexog, clustid, reps=9999)\r\nplot(plotpoints(test).X..., plotpoints(test).p, st=:contourf)\r\n\r\n# multiway-cluster errors by firm and year; bootstrap by firm\r\nwildboottest(R, r; resp, predexog, clustid=Matrix(df[:,[:firm, :year]]), nerrclustvar=2, nbootclustvar=1)\r\n\r\n# same but bootstrap by year\r\nwildboottest(R, r; resp, predexog, clustid=Matrix(df[:,[:year, :firm]]), nerrclustvar=2, nbootclustvar=1)\r\n\r\n# same but bootstrap by year-firm pair\r\nwildboottest(R, r; resp, predexog, clustid=Matrix(df[:,[:year, :firm]]), nerrclustvar=2, nbootclustvar=2)\r\n\r\n# Rao/score test with multiway clustering of errors but no bootstrap\r\nwildboottest(R, r; resp, predexog, predendog, inst, Matrix(df[:,[:year, :firm]]), reps=0)\r\n\r\n# Same but Wald test: i.e., conventional, multiway clustered errors\r\nwildboottest(R, r; resp, predexog, predendog, inst, clustid=Matrix(df[:,[:year, :firm]]), reps=0, imposenull=false)\r\n\r\n# add year fixed effects to model; cluster by firm\r\nwildboottest(R, r; resp, predexog, feid=df.year, clustid=df.firm)\r\n\r\n# test hypotheses, while imposing model constraint that constant term = 0.2\r\nR1 = [1 0]; r1 = [.2]\r\nwildboottest(R, r; R1, r1, resp, predexog, clustid=df.firm)","category":"page"}] +[{"location":"exported/","page":"Public functions and types","title":"Public functions and types","text":"wildboottest\r\nteststat\r\nstattype\r\np\r\npadj\r\nreps\r\nrepsfeas\r\nnbootclust\r\ndof\r\ndof_r\r\nplotpoints\r\npeak\r\nci\r\ndist\r\nstatnumer\r\nstatvar\r\nauxweights","category":"page"},{"location":"exported/#WildBootTests.wildboottest","page":"Public functions and types","title":"WildBootTests.wildboottest","text":"wildboottest([T::DataType=Float64,] R::AbstractMatrix, r::AbstractVector; resp, ) -> WildBootTests.BootTestResult\n\nFunction to perform wild-bootstrap-based hypothesis test\n\nPositional arguments\n\nT::DataType: data type for inputs, results, and computations: Float32 or Float64 (default)\nR::AbstractMatrix and r::AbstractVector: required matrix and vector expressing the null Rβ=r; see notes below\n\nRequired keyword argument\n\nresp::AbstractVector: response/dependent variable (y or y₁ in Roodman et al. (2019))\n\nOptional keyword arguments\n\npredexog::AbstractVecOrMat: exogenous predictors, including constant term, if any (X/X₁)\npredendog::AbstractVecOrMat: endogenous predictors (Y₂)\ninst::AbstractVecOrMat: instruments (X₂)\nR1::AbstractMatrix and r1::AbstractVector: model constraints; same format as for R and r\nclustid::AbstractVecOrMat{<:Integer}: data vector/matrix of error and bootstrapping cluster identifiers; see Notes \nnbootclustvar::Integer=1: number of bootstrap-clustering variables\nnerrclustvar::Integer=nbootclustvar: number of error-clustering variables\nissorted:Bool=false: time-saving flag: data matrices are already sort by column types 2, then 3, then 1 (see notes)\nhetrobust::Bool=true: true unless errors are treated as iid\nnfe::Integer=0: number of fixed-effect groups; if 0 yet feid is provided, will be computed\nfeid::AbstractVector{<:Integer}: data vector for one-way fixed effect group identifier\nfedfadj::Integer=nfe: degrees of freedom that fixed effects (if any) consume\nobswt::AbstractVector=[]: observation weight vector; default is equal weighting\nfweights::Bool=false: true for frequency weights\nmaxmatsize::Number: maximum size of auxilliary weight matrix (v), in gigabytes\nptype::Symbol=:symmetric: p value type (:symmetric, :equaltail, :lower, :upper)\nbootstrapc::Bool=false: true to request bootstrap-c instead of bootstrap-t\nliml::Bool=false: true for liml or Fuller liml\nFuller::Number: Fuller liml factor\nkappa::Number: fixed κ for k-class estimation\narubin::Bool=false: true for Anderson-Rubin test\nsmall::Bool=true: true to multiply test statistics by G/(G-1) × N/(N-k), where G, N, k are number of clusters, observations, and predictors\nclusteradj::Bool=true: false to drop G/(G-1) factor\nclustermin::Bool=false`: for multiway clustering, true to base G/(G-1) factor for all clusterings ]on the smallest G across clusterings\nscorebs::Bool=false: true for score bootstrap instead of wild bootstrap\nreps::Integer=999: number of bootstrap replications; reps = 0 requests classical Rao (or Wald) test if imposenull = true (or false)\nimposenull::Bool=true: true to impose null\nauxwttype::Symbol=:rademacher: auxilliary weight type (:rademacher, :mammen, :webb, :normal, :gamma)\nrng::AbstractRNG=MersenneTwister(): randon number generator\nlevel::Number=.95: significance level (0-1)\nrtol::Number=1e-6: tolerance for ci bound determination\nmadjtype::Symbol=:none: multiple hypothesis adjustment (none, :bonferroni, :sidak)\nnH0::Integer=1: number of hypotheses tested, including one being tested now\nml::Bool=false: true for (nonlinear) ML estimation\nscores::AbstractVecOrMat: for ML, pre-computed scores\nbeta::AbstractVector: for ML, parameter estimates\nA::AbstractMatrix: for ML, covariance estimates\ngridmin: vector of graph lower bounds; max length 2, missing/NaN entries ask wildboottest() to choose\ngridmax: vector of graph upper bounds; missing/NaN entries ask wildboottest() to choose\ngridpoints: vector of number of sampling points; missing/NaN entries ask wildboottest() to choose\ndiststat::Symbole=:none: :t to save bootstrap distribution of t/z/F/χ² statistics; :numer to save numerators thereof\ngetci::Bool=true: whether to return confidence interval\ngetplot::Bool=getci: whether to generate plot data\ngetauxweights::Bool=false: whether to save auxilliary weight matrix (v)\n\nNotes\n\nT, ptype, auxwttype, madjtype, and diststat may also be strings. Examples: \"Float32\" and \"webb\".\n\nThe columns of R in the statement of the null should correspond to those of the matrix [predexog predendog], where predendog is non-empty only in regressions with instruments. \n\nOrder the columns of clustid this way:\n\nVariables only used to define bootstrapping clusters, as in the subcluster bootstrap.\nVariables used to define both bootstrapping and error clusters.\nVariables only used to define error clusters.\n\nnbootclustvar is then the number of columns of type 1 or 2; nerrclustvar is the number of columns of type 2 or 3. Typically clustid is a single column of type 2. \n\nwildboottest() does not handle missing data values: all data and identifier matrices must be restricted to the estimation sample.\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.teststat","page":"Public functions and types","title":"WildBootTests.teststat","text":"Return test statistic\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.stattype","page":"Public functions and types","title":"WildBootTests.stattype","text":"Return type of test statistic: \"t\", \"z\", \"F\", or \"χ²\" \n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.p","page":"Public functions and types","title":"WildBootTests.p","text":"Return p value\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.padj","page":"Public functions and types","title":"WildBootTests.padj","text":"Return p value after multiple-hypothesis adjustment, if any\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.reps","page":"Public functions and types","title":"WildBootTests.reps","text":"Return requested number of replications\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.repsfeas","page":"Public functions and types","title":"WildBootTests.repsfeas","text":"Return actual number of replications, subject to enumeration of Rademacher draws\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.nbootclust","page":"Public functions and types","title":"WildBootTests.nbootclust","text":"Return number of bootstrapping clusters in test\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.dof","page":"Public functions and types","title":"WildBootTests.dof","text":"Return degrees of freedom of test\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.dof_r","page":"Public functions and types","title":"WildBootTests.dof_r","text":"Return residual degrees of freedom of test\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.plotpoints","page":"Public functions and types","title":"WildBootTests.plotpoints","text":"Return data for confidence plot of test. Return value is a 2-tuple with named entries X and p holding the confidence sampling locations and p values respectively. X is in turn a 1- or 2-tuple of vectors of sampling coordinates for each dimension of the tested hypothesis.\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.peak","page":"Public functions and types","title":"WildBootTests.peak","text":"Return parameter value with peak p value in test\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.ci","page":"Public functions and types","title":"WildBootTests.ci","text":"Return confidence interval matrix from test, one row per disjoint piece\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.dist","page":"Public functions and types","title":"WildBootTests.dist","text":"Return bootstrap distribution of statistic or statistic numerator in bootstrap test\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.statnumer","page":"Public functions and types","title":"WildBootTests.statnumer","text":"Return numerator of test statistic\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.statvar","page":"Public functions and types","title":"WildBootTests.statvar","text":"Return denominator of test statistic\n\n\n\n\n\n","category":"function"},{"location":"exported/#WildBootTests.auxweights","page":"Public functions and types","title":"WildBootTests.auxweights","text":"Return auxilliary weight matrix for wild bootstrap\n\n\n\n\n\n","category":"function"},{"location":"IVexamples/","page":"IV/2SLS examples","title":"IV/2SLS examples","text":"using WildBootTests, CSV, DataFrames, StatsModels, GLM, Plots\r\n\r\n# specify exactly identified model: regress wage on on tenure, instrumented by union,\r\n# controlling for ttl_exp and collgrad\r\nd = download(\"http://www.stata-press.com/data/r8/nlsw88.dta\", tempname() * \".dta\")\r\ndf = DataFrame(load(d))[:, [:wage; :tenure; :ttl_exp; :collgrad; :industry; :union]]\r\ndropmissing!(df)\r\nf = @formula(wage ~ 1 + ttl_exp + collgrad)\r\nf = apply_schema(f, schema(f, df))\r\nresp, predexog = modelcols(f, df)\r\nivf = @formula(tenure ~ union)\r\nivf = apply_schema(ivf, schema(ivf, df))\r\npredendog, inst = modelcols(ivf, df)\r\n\r\n# test that coefficient on tenure = 0, clustering errors by industry\r\nR = [0 0 0 1]; r = [0]\r\nwildboottest(R, r; resp, predexog, predendog, inst, clustid=df.industry)\r\n\r\n# use equal-tailed instead of symmetric p value\r\nwildboottest(R, r; resp, predexog, predendog, inst, clustid=df.industry, ptype=:equaltail)\r\n\r\n# perform bootstrap-c instead of bootstrap-t, as advocated by Young (2019)\r\nwildboottest(R, r; resp, predexog, predendog, inst, clustid=df.industry, bootstrapc=true)\r\n\r\n# Rao/score test without bootstrap\r\nwildboottest(R, r; resp, predexog, predendog, inst, clustid=df.industry, reps=0)\r\n\r\n# Wald test without bootstrap\r\nwildboottest(R, r; resp, predexog, predendog, inst, clustid=df.industry, reps=0, imposenull=false)\r\n\r\n# Anderson-Rubin test that hypothesis holds and instrument is valid\r\nwildboottest(R, r; resp, predexog, predendog, inst, clustid=df.industry, arubin=true)\r\n\r\n# modify model to drop controls and make ttl_exp an instrument\r\nf = @formula(wage ~ 1)\r\nf = apply_schema(f, schema(f, df))\r\nresp, predexog = modelcols(f, df)\r\nivf = @formula(tenure ~ collgrad + ttl_exp)\r\nivf = apply_schema(ivf, schema(ivf, df))\r\npredendog, inst = modelcols(ivf, df)\r\n\r\n# test same hypothesis in context of LIML regression\r\nR = [0 1]; r = [0]\r\nwildboottest(R, r; resp, predexog, predendog, inst, liml=true, clustid=df.industry)","category":"page"},{"location":"","page":"Overview","title":"Overview","text":"WildBootTests.jl performs wild bootstrap-based hypothesis tests at extreme speed. It is intended mainly for linear models: ordinary least squares (OLS) and instrumental variables/two-stage least squares (IV/2SLS). For an introduction to the wild bootstrap and the algorithms deployed here, see Roodman et al. (2019).","category":"page"},{"location":"","page":"Overview","title":"Overview","text":"The package offers and/or supports:","category":"page"},{"location":"","page":"Overview","title":"Overview","text":"The wild bootstrap for OLS (Wu 1986).\nThe Wild Restricted Efficient bootstrap (WRE) for IV/2SLS/LIML (Davidson and MacKinnon 2010).\nThe subcluster bootstrap (MacKinnon and Webb 2018).\nNon-bootstrapped Wald, Rao, and Anderson-Rubin tests, optionally with multiway clustering.\nConfidence intervals formed by inverting the test and iteratively searching for bounds.\nMultiway clustering.\nArbitrary and multiple linear hypotheses in the parameters.\nMaintained linear constraints on the model (restricted OLS, IV/2SLS/LIML).\nOne-way fixed effects.\nGeneration of data for plotting of confidence curves or surfaces after one- or two-dimensional hypothesis tests.","category":"page"},{"location":"","page":"Overview","title":"Overview","text":"WildBootTests.jl incorporates order-of-magnitude algorithmic speed-ups developed since Roodman et al. (2019) for OLS and IV/2SLS. And it exploits the efficiency of Julia, for example by offering single-precision (Float32) computation.","category":"page"},{"location":"","page":"Overview","title":"Overview","text":"The interface is low-level: the exported function wildboottest() accepts scalars, vectors, and matrices, not DataFrames or results from estimation functions such as lm(). This design minimizes the package's dependency footprint while making the core functionality available to multiple programming environments, including Julia, R (through JuliaConnectoR), and Python (through PyJulia). A separate package will provide a higher-level Julia interface.","category":"page"},{"location":"","page":"Overview","title":"Overview","text":"wildboottest() accepts many optional arguments. Most correspond to options of the Stata package boottest, which are documented in Roodman et al. (2019), §7. Julia-specific additions include an optional first argument T, which can be Float32 or Float64 to specify the precision of computation; and rng, which takes a random number generator such as MersenneTwister(2302394).","category":"page"},{"location":"#On-latency","page":"Overview","title":"On latency","text":"","category":"section"},{"location":"","page":"Overview","title":"Overview","text":"The first time you run wildboottest() in a session, Julia's just-in-time compilation will take ~10 seconds. The same will happen the first time you switch between Float32 and Float64 calculations, or between OLS and IV/2SLS estimation.","category":"page"},{"location":"OLSexamples/#Basic-OLS-example","page":"OLS examples","title":"Basic OLS example","text":"","category":"section"},{"location":"OLSexamples/","page":"OLS examples","title":"OLS examples","text":"julia> using WildBootTests, CSV, DataFrames, StatsModels, GLM, Plots\r\n\r\njulia> d = download(\"https://raw.github.com/vincentarelbundock/Rdatasets/master/csv/sandwich/PetersenCL.csv\");\r\n\r\njulia> df = CSV.read(d, DataFrame);\r\n\r\njulia> f = @formula(y ~ 1 + x); # state OLS model\r\n\r\njulia> f = apply_schema(f, schema(f, df)); # link model to data\r\n\r\njulia> lm(f, df) # run OLS for illustration; not needed for following lines\r\nStatsModels.TableRegressionModel{LinearModel{GLM.LmResp{Vector{Float64}}, GLM.DensePredChol{Float64, LinearAlgebra.CholeskyPivoted{Float64, Matrix{Float64}}}}, Matrix{Float64}}\r\n\r\ny ~ 1 + x\r\n\r\nCoefficients:\r\n─────────────────────────────────────────────────────────────────────────\r\n Coef. Std. Error t Pr(>|t|) Lower 95% Upper 95%\r\n─────────────────────────────────────────────────────────────────────────\r\n(Intercept) 0.0296797 0.0283593 1.05 0.2954 -0.025917 0.0852764\r\nx 1.03483 0.0285833 36.20 <1e-99 0.978798 1.09087\r\n─────────────────────────────────────────────────────────────────────────\r\n\r\njulia> resp, predexog = modelcols(f, df); # extract response & (exogenous) predictor variables\r\n\r\njulia> clustid = df.firm; # extract clustering variable\r\n\r\njulia> R = [0 1]; r = [1]; # put null that coefficient on x = 1 in Rβ̂ = r form, where β̂ is parameter vector\r\n\r\njulia> test = wildboottest(R, r; resp=resp, predexog=predexog, clustid=clustid)\r\nWildBootTests.BootTestResult{Float32}\r\n\r\np = 0.492\r\nci = Float32[0.93461335 1.1347668]\r\n\r\njulia> test = wildboottest(R, r; resp, predexog, clustid); # same, using Julia syntactic sugar\r\n\r\njulia> p(test) # programmatically extract p value\r\n0.49459493f0\r\n\r\njulia> ci(test) # programmatically extract confidence interval\r\n1×2 Matrix{Float32}:\r\n 0.934961 1.13469\r\n\r\njulia> plot(plotpoints(test)...) # plot confidence curve","category":"page"},{"location":"OLSexamples/#Further-examples","page":"OLS examples","title":"Further examples","text":"","category":"section"},{"location":"OLSexamples/","page":"OLS examples","title":"OLS examples","text":"using WildBootTests, CSV, DataFrames, StatsModels, GLM, Plots\r\n\r\n# use Webb instead of Rademacher weights, 99,999 bootstrap replications instead of 999\r\nwildboottest(R, r; resp, predexog, clustid, reps=99999, auxwttype=:webb)\r\n\r\n# bootstrap in double-precision (Float64) instead of single (Float32)\r\n# slow on first use because of recompile\r\nwildboottest(Float64, R, r; resp, predexog, clustid)\r\n\r\n# use guaranteed-stable random number generator for exact replicability\r\nusing StableRNGs\r\nwildboottest(R, r; resp, predexog, clustid, rng=StableRNG(23948572))\r\n\r\n# test that coefficient on intercept = 0 and coefficient on x = 1; plot confidence surface\r\ntest = wildboottest([1 0; 0 1], [0;1]; resp, predexog, clustid, reps=9999)\r\nplot(plotpoints(test).X..., plotpoints(test).p, st=:contourf)\r\n\r\n# multiway-cluster errors by firm and year; bootstrap by firm\r\nwildboottest(R, r; resp, predexog, clustid=Matrix(df[:,[:firm, :year]]), nerrclustvar=2, nbootclustvar=1)\r\n\r\n# same but bootstrap by year\r\nwildboottest(R, r; resp, predexog, clustid=Matrix(df[:,[:year, :firm]]), nerrclustvar=2, nbootclustvar=1)\r\n\r\n# same but bootstrap by year-firm pair\r\nwildboottest(R, r; resp, predexog, clustid=Matrix(df[:,[:year, :firm]]), nerrclustvar=2, nbootclustvar=2)\r\n\r\n# Rao/score test with multiway clustering of errors but no bootstrap\r\nwildboottest(R, r; resp, predexog, predendog, inst, Matrix(df[:,[:year, :firm]]), reps=0)\r\n\r\n# Same but Wald test: i.e., conventional, multiway clustered errors\r\nwildboottest(R, r; resp, predexog, predendog, inst, clustid=Matrix(df[:,[:year, :firm]]), reps=0, imposenull=false)\r\n\r\n# add year fixed effects to model; cluster by firm\r\nwildboottest(R, r; resp, predexog, feid=df.year, clustid=df.firm)\r\n\r\n# test hypotheses, while imposing model constraint that constant term = 0.2\r\nR1 = [1 0]; r1 = [.2]\r\nwildboottest(R, r; R1, r1, resp, predexog, clustid=df.firm)","category":"page"}] } diff --git a/docs/src/IVexamples.md b/docs/src/IVexamples.md index e623a1f..ea0c8b0 100644 --- a/docs/src/IVexamples.md +++ b/docs/src/IVexamples.md @@ -30,7 +30,7 @@ wildboottest(R, r; resp, predexog, predendog, inst, clustid=df.industry, reps=0) wildboottest(R, r; resp, predexog, predendog, inst, clustid=df.industry, reps=0, imposenull=false) # Anderson-Rubin test that hypothesis holds and instrument is valid -wildboottest(R, r; resp, predexog, predendog, inst, clustid=df.industry, ARubin=true) +wildboottest(R, r; resp, predexog, predendog, inst, clustid=df.industry, arubin=true) # modify model to drop controls and make ttl_exp an instrument f = @formula(wage ~ 1) @@ -42,5 +42,5 @@ predendog, inst = modelcols(ivf, df) # test same hypothesis in context of LIML regression R = [0 1]; r = [0] -wildboottest(R, r; resp, predexog, predendog, inst, LIML=true, clustid=df.industry) +wildboottest(R, r; resp, predexog, predendog, inst, liml=true, clustid=df.industry) ``` diff --git a/docs/src/OLSexamples.md b/docs/src/OLSexamples.md index 33641b6..ab8de30 100644 --- a/docs/src/OLSexamples.md +++ b/docs/src/OLSexamples.md @@ -33,14 +33,14 @@ julia> test = wildboottest(R, r; resp=resp, predexog=predexog, clustid=clustid) WildBootTests.BootTestResult{Float32} p = 0.492 -CI = Float32[0.93461335 1.1347668] +ci = Float32[0.93461335 1.1347668] julia> test = wildboottest(R, r; resp, predexog, clustid); # same, using Julia syntactic sugar julia> p(test) # programmatically extract p value 0.49459493f0 -julia> CI(test) # programmatically extract confidence interval +julia> ci(test) # programmatically extract confidence interval 1×2 Matrix{Float32}: 0.934961 1.13469 diff --git a/docs/src/exported.md b/docs/src/exported.md index 09b942b..10f21da 100644 --- a/docs/src/exported.md +++ b/docs/src/exported.md @@ -11,7 +11,7 @@ dof dof_r plotpoints peak -CI +ci dist statnumer statvar diff --git a/src/StrBootTest.jl b/src/StrBootTest.jl index 6097ecf..65d075f 100644 --- a/src/StrBootTest.jl +++ b/src/StrBootTest.jl @@ -13,7 +13,7 @@ struct StrFE{T<:Real} end mutable struct StrEstimator{T<:AbstractFloat} - isDGP::Bool; LIML::Bool; Fuller::T; κ::T + isDGP::Bool; liml::Bool; fuller::T; κ::T R₁perp::Matrix{T}; Rpar::Matrix{T} kZ::Int64 @@ -43,21 +43,21 @@ mutable struct StrEstimator{T<:AbstractFloat} Xpar₁::Matrix{T} invZperpZperpZperpX₁::Matrix{T}; invZperpZperpZperpX₂::Matrix{T}; invZperpZperpZperpy₁::Vector{T}; invZperpZperpZperpY₂::Matrix{T}; S✻UY₂::Matrix{T}; invZperpZperpZperpZpar::Matrix{T}; invZperpZperpZperpZR₁::Matrix{T} - StrEstimator{T}(isDGP, LIML, Fuller, κ) where T<:AbstractFloat = new(isDGP, LIML, Fuller, κ, Matrix{T}(undef,0,0)) + StrEstimator{T}(isDGP, liml, fuller, κ) where T<:AbstractFloat = new(isDGP, liml, fuller, κ, Matrix{T}(undef,0,0)) end mutable struct StrBootTest{T<:AbstractFloat} R::Matrix{T}; r::Vector{T}; R₁::Matrix{T}; r₁::Vector{T} y₁::Vector{T}; X₁::Matrix{T}; Y₂::Matrix{T}; X₂::Matrix{T} wt::Vector{T}; fweights::Bool - LIML::Bool; Fuller::T; κ::T; ARubin::Bool + liml::Bool; fuller::T; κ::T; arubin::Bool B::Int64; auxtwtype::Symbol; rng::AbstractRNG; maxmatsize::Float16 ptype::Symbol; null::Bool; bootstrapt::Bool ID::Matrix{Int64}; NBootClustVar::Int8; NErrClustVar::Int8; issorted::Bool; small::Bool; clusteradj::Bool; clustermin::Bool FEID::Vector{Int64}; FEdfadj::Int64 level::T; rtol::T madjtype::Symbol; NH₀::Int16 - ML::Bool; β̈::Vector{T}; A::Symmetric{T,Matrix{T}}; sc::Matrix{T} + ml::Bool; β̈::Vector{T}; A::Symmetric{T,Matrix{T}}; sc::Matrix{T} willplot::Bool; gridmin::Vector{T}; gridmax::Vector{T}; gridpoints::Vector{Float32} q::Int16; twotailed::Bool; scorebs::Bool; robust::Bool @@ -67,7 +67,7 @@ mutable struct StrBootTest{T<:AbstractFloat} confpeak::Vector{T} ID✻::Vector{Int64}; ID✻_✻⋂::Vector{Int64} anchor::Vector{T}; poles::Vector{T}; numer::Matrix{T} - CI::Matrix{T} + ci::Matrix{T} peak::NamedTuple{(:X, :p), Tuple{Vector{T}, T}} Nobs::Int64; NClustVar::Int8; kX₁::Int64; kX₂::Int64; kY₂::Int64; WREnonARubin::Bool; boottest!::Function @@ -110,18 +110,18 @@ mutable struct StrBootTest{T<:AbstractFloat} S⋂ReplZX::Array{T,3}; S⋂Xy₁::Matrix{T} S✻⋂XU₂::Array{T,3}; S✻⋂XU₂RparY::Array{T,3}; S✻XU₂::Array{T,3}; S✻XU₂RparY::Array{T,3}; S✻ZperpU₂::Array{T,3}; S✻ZperpU₂RparY::Array{T,3}; invZperpZperpS✻ZperpU₂::Array{T,3}; invZperpZperpS✻ZperpU₂RparY::Array{T,3}; invXXS✻XU₂::Array{T,3}; invXXS✻XU₂RparY::Array{T,3} - StrBootTest{T}(R, r, R₁, r₁, y₁, X₁, Y₂, X₂, wt, fweights, LIML, - Fuller, κ, ARubin, B, auxtwtype, rng, maxmatsize, ptype, null, scorebs, bootstrapt, ID, NBootClustVar, NErrClustVar, issorted, robust, small, clusteradj, clustermin, - NFE, FEID, FEdfadj, level, rtol, madjtype, NH₀, ML, + StrBootTest{T}(R, r, R₁, r₁, y₁, X₁, Y₂, X₂, wt, fweights, liml, + fuller, κ, arubin, B, auxtwtype, rng, maxmatsize, ptype, null, scorebs, bootstrapt, ID, NBootClustVar, NErrClustVar, issorted, robust, small, clusteradj, clustermin, + NFE, FEID, FEdfadj, level, rtol, madjtype, NH₀, ml, β̈, A, sc, willplot, gridmin, gridmax, gridpoints) where T<:Real = begin kX₂ = ncols(X₂) - scorebs = scorebs || iszero(B) || ML - WREnonARubin = !(iszero(kX₂) || scorebs) && !ARubin + scorebs = scorebs || iszero(B) || ml + WREnonARubin = !(iszero(kX₂) || scorebs) && !arubin - new(R, r, R₁, r₁, y₁, X₁, Y₂, X₂, wt, fweights, LIML || !iszero(Fuller), - Fuller, κ, ARubin, B, auxtwtype, rng, maxmatsize, ptype, null, bootstrapt, ID, NBootClustVar, NErrClustVar, issorted, small, clusteradj, clustermin, - FEID, FEdfadj, level, rtol, madjtype, NH₀, ML, + new(R, r, R₁, r₁, y₁, X₁, Y₂, X₂, wt, fweights, liml || !iszero(fuller), + fuller, κ, arubin, B, auxtwtype, rng, maxmatsize, ptype, null, bootstrapt, ID, NBootClustVar, NErrClustVar, issorted, small, clusteradj, clustermin, + FEID, FEdfadj, level, rtol, madjtype, NH₀, ml, β̈, A, sc, willplot, gridmin, gridmax, gridpoints, nrows(R), ptype == :symmetric || ptype == :equaltail, scorebs, robust || NErrClustVar>0, false, false, NFE, false, false, 0, 0, 0, false, diff --git a/src/WRE.jl b/src/WRE.jl index a582e3f..74202a6 100644 --- a/src/WRE.jl +++ b/src/WRE.jl @@ -4,7 +4,7 @@ function InitWRE!(o::StrBootTest{T}) where T iszero(o.granular) && (o.Repl.Zperp = o.DGP.Zperp = Matrix{T}(undef,0,0)) # drop this potentially large array - o.LIML && o.Repl.kZ==1 && o.Nw==1 && (o.As = o.β̈s = zeros(1, o.B+1)) + o.liml && o.Repl.kZ==1 && o.Nw==1 && (o.As = o.β̈s = zeros(1, o.B+1)) o.S✻ZperpU = [Matrix{T}(undef, o.Repl.kZperp, o.N✻) for _ ∈ 0:o.Repl.kZ] o.invZperpZperpS✻ZperpU = [Matrix{T}(undef, o.Repl.kZperp, o.N✻) for _ ∈ 0:o.Repl.kZ] @@ -35,11 +35,11 @@ function InitWRE!(o::StrBootTest{T}) where T o.S✻UMZperp = [Matrix{T}(undef, o.Nobs, o.N✻) for _ ∈ 0:o.Repl.kZ] o.S✻UPX = [Matrix{T}(undef, o.Nobs, o.N✻) for _ ∈ 0:o.Repl.kZ] end - if o.LIML || !o.robust + if o.liml || !o.robust o.YY✻_b = zeros(o.Repl.kZ+1, o.Repl.kZ+1) o.YPXY✻_b = zeros(o.Repl.kZ+1, o.Repl.kZ+1) end - o.NFE>0 && (o.bootstrapt || !isone(o.κ) || o.LIML) && (o.CTFEU = Vector{Matrix{T}}(undef, o.Repl.kZ+1)) + o.NFE>0 && (o.bootstrapt || !isone(o.κ) || o.liml) && (o.CTFEU = Vector{Matrix{T}}(undef, o.Repl.kZ+1)) end o.S✻⋂XY₂ = o.Repl.S✻⋂XY₂ - o.Repl.S✻⋂XZperp * o.Repl.invZperpZperpZperpY₂ - o.Repl.invZperpZperpZperpX' * (o.Repl.S✻⋂ZperpY₂ - o.Repl.S✻⋂ZperpZperp * o.Repl.invZperpZperpZperpY₂ ) o.S✻⋂XX = o.Repl.S✻⋂XX - o.Repl.S✻⋂XZperp * o.Repl.invZperpZperpZperpX - o.Repl.invZperpZperpZperpX' * (o.Repl.S✻⋂XZperp' - o.Repl.S✻⋂ZperpZperp * o.Repl.invZperpZperpZperpX ) @@ -76,7 +76,7 @@ function InitWRE!(o::StrBootTest{T}) where T o.S✻ZperpDGPZR₁ = @panelsum(o, o.DGP.S✻⋂ZperpZR₁ , o.info✻_✻⋂) - S✻ZperpZperp * o.DGP.invZperpZperpZperpZR₁ end - if o.NFE>0 && (o.LIML || !isone(o.κ) || o.bootstrapt) + if o.NFE>0 && (o.liml || !isone(o.κ) || o.bootstrapt) CT✻⋂FEX = [crosstabFE(o, o.Repl.X₁, o.info✻⋂) crosstabFE(o, o.Repl.X₂, o.info✻⋂)] o.CT✻FEX = @panelsum(o, CT✻⋂FEX, o.info✻_✻⋂) o.CT✻FEY₂ = crosstabFE(o, o.DGP.Y₂, o.info✻) @@ -86,7 +86,7 @@ function InitWRE!(o::StrBootTest{T}) where T (o.CT✻FEZR₁ = crosstabFE(o, o.DGP.ZR₁, o.info✻)) end - if ((o.robust && o.bootstrapt) || o.LIML || !o.robust || !isone(o.κ)) + if ((o.robust && o.bootstrapt) || o.liml || !o.robust || !isone(o.κ)) S✻⋂ReplZX = (o.Repl.S✻⋂XZpar - o.Repl.S✻⋂XZperp * o.Repl.invZperpZperpZperpZpar - o.Repl.invZperpZperpZperpX' * (o.Repl.S✻⋂ZperpZpar - o.Repl.S✻⋂ZperpZperp * o.Repl.invZperpZperpZperpZpar))' end @@ -124,7 +124,7 @@ function InitWRE!(o::StrBootTest{T}) where T end end - if o.LIML || !o.robust || !isone(o.κ) # cluster-wise moments after FWL + if o.liml || !o.robust || !isone(o.κ) # cluster-wise moments after FWL o.S✻Y₂Y₂ = o.Repl.S✻Y₂Y₂ - _S✻ZperpY₂' * o.DGP.invZperpZperpZperpY₂ - o.DGP.invZperpZperpZperpY₂' * o.S✻ZperpY₂ o.S✻DGPZDGPZ = o.DGP.S✻ZparZpar - _S✻ZperpDGPZpar' * o.DGP.invZperpZperpZperpZpar - o.DGP.invZperpZperpZperpZpar' * o.S✻ZperpDGPZ o.S✻DGPZY₂ = o.DGP.S✻ZparY₂ - _S✻ZperpDGPZpar' * o.DGP.invZperpZperpZperpY₂ - o.DGP.invZperpZperpZperpZpar' * o.S✻ZperpY₂ @@ -197,7 +197,7 @@ function PrepWRE!(o::StrBootTest{T}) where T o.invXXS✻XU₂ .= o.Repl.invXX * o.S✻XU₂ o.invXXS✻XU₂RparY .= o.invXXS✻XU₂ * o.Repl.RparY - if o.LIML || !o.robust || !isone(o.κ) + if o.liml || !o.robust || !isone(o.κ) S✻U₂y₁ = o.S✻Y₂y₁ - o.DGP.Π̂' * o.S✻Xy₁ S✻U₂RparYy₁ = o.Repl.RparY' * S✻U₂y₁ S✻ZU₂ = o.S✻ReplZY₂ - o.S✻ReplZX * o.DGP.Π̂ @@ -219,7 +219,7 @@ function PrepWRE!(o::StrBootTest{T}) where T end end - if (o.LIML || o.bootstrapt || !isone(o.κ)) && o.NFE>0 + if (o.liml || o.bootstrapt || !isone(o.κ)) && o.NFE>0 CT✻FEU = o.CT✻FEY₂ - o.CT✻FEX * o.DGP.Π̂ CT✻FEURparY = CT✻FEU * o.Repl.RparY end @@ -238,7 +238,7 @@ function PrepWRE!(o::StrBootTest{T}) where T o.invXXS✻XU[i+1] .= view(o.invXXS✻XU₂RparY,:,:,i) end - if o.LIML || !isone(o.κ) || o.bootstrapt + if o.liml || !isone(o.κ) || o.bootstrapt if iszero(i) o.S✻ZperpU[1] .= o.S✻Zperpy₁ - o.S✻ZperpDGPZ * o.DGP.β̈ + o.S✻ZperpU₂ * o.DGP.γ̈ o.invZperpZperpS✻ZperpU[1] .= o.invZperpZperpS✻Zperpy₁ - o.invZperpZperpS✻ZperpDGPZ * o.DGP.β̈ + o.invZperpZperpS✻ZperpU₂ * o.DGP.γ̈ @@ -262,7 +262,7 @@ function PrepWRE!(o::StrBootTest{T}) where T end end - if o.LIML || !isone(o.κ) || !o.robust + if o.liml || !isone(o.κ) || !o.robust if iszero(i) # panelsum2(o, o.Repl.y₁par, o.Repl.Z, uwt, o.info✻) o.S✻YU[1,1] .= o.S✻y₁y₁ - o.S✻DGPZy₁'o.DGP.β̈ + S✻U₂y₁'o.DGP.γ̈ o.DGP.restricted && @@ -514,7 +514,7 @@ end function MakeWREStats!(o::StrBootTest{T}, w::Integer) where T if isone(o.Repl.kZ) # optimized code for 1 coefficient in bootstrap regression - if o.LIML + if o.liml YY₁₁ = HessianFixedkappa(o, [0], 0, zero(T), w) # κ=0 => Y*MZperp*Y YY₁₂ = HessianFixedkappa(o, [0], 1, zero(T), w) YY₂₂ = HessianFixedkappa(o, [1], 1, zero(T), w) @@ -527,7 +527,7 @@ function MakeWREStats!(o::StrBootTest{T}, w::Integer) where T x₂₁ = YY₁₁ .* YPXY₁₂ .- YY₁₂ .* YPXY₁₁ x₂₂ = YY₁₁ .* YPXY₂₂ .- YY₁₂YPXY₁₂ κs = (x₁₁ .+ x₂₂)./2; κs .= 1 ./ (1 .- (κs .- sqrtNaN.(κs.^2 .- x₁₁ .* x₂₂ .+ x₁₂ .* x₂₁)) ./ (YY₁₁ .* YY₂₂ .- YY₁₂ .* YY₁₂)) # solve quadratic equation for smaller eignenvalue; last term is det(YY✻) - !iszero(o.Fuller) && (κs .-= o.Fuller / (o._Nobs - o.kX)) + !iszero(o.fuller) && (κs .-= o.fuller / (o._Nobs - o.kX)) o.As = κs .* (YPXY₂₂ .- YY₂₂) .+ YY₂₂ o.β̈s = (κs .* (YPXY₁₂ .- YY₁₂) .+ YY₁₂) ./ o.As else @@ -566,7 +566,7 @@ function MakeWREStats!(o::StrBootTest{T}, w::Integer) where T β̈s = zeros(T, o.Repl.kZ, ncols(o.v)) A = Vector{Matrix{T}}(undef, ncols(o.v)) - if o.LIML + if o.liml YY✻ = [HessianFixedkappa(o, collect(0:i), i, zero(T), w) for i ∈ 0:o.Repl.kZ] # κ=0 => Y*MZperp*Y YPXY✻ = [HessianFixedkappa(o, collect(0:i), i, one(T), w) for i ∈ 0:o.Repl.kZ] # κ=1 => Y*PXpar*Y @@ -576,7 +576,7 @@ function MakeWREStats!(o::StrBootTest{T}, w::Integer) where T o.YPXY✻_b[1:i+1,i+1] = YPXY✻[i+1][:,b] end o.κ = 1/(1 - real(eigvals(invsym(o.YY✻_b) * Symmetric(o.YPXY✻_b))[1])) - !iszero(o.Fuller) && (o.κ -= o.Fuller / (o._Nobs - o.kX)) + !iszero(o.fuller) && (o.κ -= o.fuller / (o._Nobs - o.kX)) β̈s[:,b] = (A[b] = invsym(o.κ*o.YPXY✻_b[2:end,2:end] + (1-o.κ)*o.YY✻_b[2:end,2:end])) * (o.κ*o.YPXY✻_b[1,2:end] + (1-o.κ)*o.YY✻_b[1,2:end]) end else diff --git a/src/WildBootTests.jl b/src/WildBootTests.jl index d88558c..34b78aa 100644 --- a/src/WildBootTests.jl +++ b/src/WildBootTests.jl @@ -1,5 +1,5 @@ module WildBootTests -export BootTestResult, wildboottest, teststat, stattype, p, padj, reps, repsfeas, nbootclust, dof, dof_r, plotpoints, peak, CI, dist, statnumer, statvar, auxweights +export BootTestResult, wildboottest, teststat, stattype, p, padj, reps, repsfeas, nbootclust, dof, dof_r, plotpoints, peak, ci, dist, statnumer, statvar, auxweights using LinearAlgebra, Random, Distributions, SortingAlgorithms, Printf @@ -12,7 +12,7 @@ include("nonWRE.jl") include("plot-CI.jl") include("interface.jl") -# top-level computation routine for OLS/ARubin (and score BS on IV/2SLS); split off to reduce latency when just doing WRE +# top-level computation routine for OLS/arubin (and score BS on IV/2SLS); split off to reduce latency when just doing WRE function boottestOLSARubin!(o::StrBootTest{T}) where T if !o.initialized Init!(o) @@ -40,7 +40,7 @@ function boottestOLSARubin!(o::StrBootTest{T}) where T nothing end -# top-level computation routine for non-ARubin WRE; split off to reduce latency when just doing other tests +# top-level computation routine for non-arubin WRE; split off to reduce latency when just doing other tests function boottestWRE!(o::StrBootTest{T}) where T if !o.initialized Init!(o) @@ -72,11 +72,11 @@ end function NoNullUpdate!(o::StrBootTest{T} where T) if o.WREnonARubin o.numer[:,1] = o.R * o.DGP.Rpar * o.β̈s[1] - o.r - elseif o.ARubin + elseif o.arubin EstimateARubin!(o.DGP, o, o.r) - o.numer[:,1] = o.v_sd * @view o.DGP.β̈[o.kX₁+1:end,:] # coefficients on excluded instruments in ARubin OLS + o.numer[:,1] = o.v_sd * @view o.DGP.β̈[o.kX₁+1:end,:] # coefficients on excluded instruments in arubin OLS else - o.numer[:,1] = o.v_sd * (o.R * (o.ML ? o.β̈ : iszero(o.κ) ? o.M.β̈ : o.M.Rpar * o.M.β̈) - o.r) # Analytical Wald numerator; if imposing null then numer[:,1] already equals this. If not, then it's 0 before this + o.numer[:,1] = o.v_sd * (o.R * (o.ml ? o.β̈ : iszero(o.κ) ? o.M.β̈ : o.M.Rpar * o.M.β̈) - o.r) # Analytical Wald numerator; if imposing null then numer[:,1] already equals this. If not, then it's 0 before this end o.dist[1] = isone(o.dof) ? o.numer[1] / sqrtNaN(o.statDenom[1]) : o.numer[:,1]'invsym(o.statDenom)*o.numer[:,1] nothing diff --git a/src/estimators.jl b/src/estimators.jl index 6bf5f24..1abe020 100644 --- a/src/estimators.jl +++ b/src/estimators.jl @@ -125,7 +125,7 @@ function InitVarsIV!(o::StrEstimator{T}, parent::StrBootTest{T}, Rperp::Abstract o.invZperpZperpZperpX₂ = o.invZperpZperp * ZperpX₂ o.invZperpZperpZperpX = [o.invZperpZperpZperpX₁ o.invZperpZperpZperpX₂] - if parent.NFE>0 && (parent.LIML || !isone(parent.κ) || parent.bootstrapt) || parent.granular && parent.robust && parent.bootstrapt || !o.LIML && !isempty(Rperp) + if parent.NFE>0 && (parent.liml || !isone(parent.κ) || parent.bootstrapt) || parent.granular && parent.robust && parent.bootstrapt || !o.liml && !isempty(Rperp) o.X₁ = o.Xpar₁ - o.Zperp * o.invZperpZperpZperpX₁ # shrink and FWL-process X₁; do it as an O(N) operation because it can be so size-reducing o.X₂ = o.Zperp * o.invZperpZperpZperpX₂; o.X₂ .= parent.X₂ .- o.X₂ # FWL-process X₂ end @@ -144,12 +144,12 @@ function InitVarsIV!(o::StrEstimator{T}, parent::StrBootTest{T}, Rperp::Abstract o.S✻⋂ZperpY₂ = panelcross(o.Zperp, parent.Y₂, parent.info✻⋂) ZperpY₂ = sumpanelcross(o.S✻⋂ZperpY₂) o.invZperpZperpZperpY₂ = o.invZperpZperp * ZperpY₂ - ((parent.NFE>0 && (parent.LIML || !isone(parent.κ) || parent.bootstrapt)) || (parent.robust && parent.bootstrapt && parent.granular)) && + ((parent.NFE>0 && (parent.liml || !isone(parent.κ) || parent.bootstrapt)) || (parent.robust && parent.bootstrapt && parent.granular)) && (o.Y₂ = parent.Y₂ - o.Zperp * o.invZperpZperpZperpY₂) o.S✻⋂Zperpy₁ = panelcross(o.Zperp, parent.y₁, parent.info✻⋂) Zperpy₁ = sumpanelcross(o.S✻⋂Zperpy₁) o.invZperpZperpZperpy₁ = o.invZperpZperp * Zperpy₁ - ((parent.NFE>0 && (parent.LIML || !isone(parent.κ) || parent.bootstrapt)) || (parent.scorebs || parent.robust && parent.bootstrapt && parent.granular)) && + ((parent.NFE>0 && (parent.liml || !isone(parent.κ) || parent.bootstrapt)) || (parent.scorebs || parent.robust && parent.bootstrapt && parent.granular)) && (o.y₁ = parent.y₁ - o.Zperp * o.invZperpZperpZperpy₁) o.S✻⋂X₁Y₂ = panelcross(o.Xpar₁, parent.Y₂, parent.info✻⋂) @@ -237,13 +237,13 @@ function InitVarsIV!(o::StrEstimator{T}, parent::StrBootTest{T}, Rperp::Abstract o.Z .-= o.Zperp * o.invZperpZperpZperpZpar - o.V = o.invXX * o.XZ # in 2SLS case, estimator is (V' XZ)^-1 * (V'Xy₁). Also used in k-class and LIML robust VCV by Stata convention + o.V = o.invXX * o.XZ # in 2SLS case, estimator is (V' XZ)^-1 * (V'Xy₁). Also used in k-class and liml robust VCV by Stata convention if o.isDGP parent.WREnonARubin && (o.ZY₂ = sumpanelcross(o.S✻ZparY₂) - ZperpZpar'o.invZperpZperpZperpY₂) o.H_2SLS = Symmetric(o.V'o.XZ) # Hessian - (o.LIML || !isone(o.κ)) && (o.H_2SLSmZZ = o.H_2SLS - o.ZZ) - !o.LIML && MakeH!(o, parent, !isempty(Rperp)) # DGP is LIML except possibly when getting confidence peak for A-R plot; but LIML=0 when exactly id'd, for then κ=1 always and Hessian doesn't depend on r₁ and can be computed now + (o.liml || !isone(o.κ)) && (o.H_2SLSmZZ = o.H_2SLS - o.ZZ) + !o.liml && MakeH!(o, parent, !isempty(Rperp)) # DGP is liml except possibly when getting confidence peak for A-R plot; but liml=0 when exactly id'd, for then κ=1 always and Hessian doesn't depend on r₁ and can be computed now else o.kZ = ncols(o.Rpar) o.Yendog = [true colsum(o.RparY .!= zero(T)).!=0] # columns of Y = [y₁par Zpar] that are endogenous (normally all) @@ -252,7 +252,7 @@ function InitVarsIV!(o::StrEstimator{T}, parent::StrBootTest{T}, Rperp::Abstract end -# do most of estimation; for LIML r₁ must be passed now in order to solve eigenvalue problem involving it +# do most of estimation; for liml r₁ must be passed now in order to solve eigenvalue problem involving it # inconsistency: for replication regression of Anderson-Rubin, r₁ refers to the *null*, not the maintained constraints, because that's what affects the endogenous variables # For OLS, compute β̈₀ (β̈ when r=0) and ∂β̈∂r without knowing r₁, for efficiency # For WRE, should only be called once for the replication regressions, since for them r₁ is the unchanging model constraints @@ -296,9 +296,9 @@ function EstimateIV!(o::StrEstimator{T}, parent::StrBootTest{T}, r₁::AbstractV o.YPXY = Symmetric([[o.invXXXy₁par'o.Xy₁par] o.ZXinvXXXy₁par' ; o.ZXinvXXXy₁par o.ZXinvXXXZ]) if o.isDGP - if o.LIML + if o.liml o.κ = 1/(1 - real(eigvals(invsym(o.YY) * o.YPXY)[1])) # like Fast & Wild (81), but more stable, at least in Mata - !iszero(o.Fuller) && (o.κ -= o.Fuller / (parent._Nobs - parent.kX)) + !iszero(o.fuller) && (o.κ -= o.fuller / (parent._Nobs - parent.kX)) MakeH!(o, parent) end @@ -338,7 +338,7 @@ end # non-WRE stuff that only depends on r in A-R case, for test stat denominators in replication regressions # since the non-AR OLS code never creates an object for replication regresssions, in that case this is called on the DGP regression object -# depends on results of Estimate() only when doing OLS-style bootstrap on an overidentified IV/GMM regression--score bootstrap or A-R. Then κ from DGP LIML affects Hessian, H. +# depends on results of Estimate() only when doing OLS-style bootstrap on an overidentified IV/GMM regression--score bootstrap or A-R. Then κ from DGP liml affects Hessian, H. function InitTestDenoms!(o::StrEstimator{T}, parent::StrBootTest{T}) where T if parent.bootstrapt && (parent.scorebs || parent.robust) (parent.granular || parent.purerobust) && (o.WXAR = o.XAR) # XXX simplify diff --git a/src/init.jl b/src/init.jl index 7dea331..3f2a287 100644 --- a/src/init.jl +++ b/src/init.jl @@ -5,16 +5,16 @@ function Init!(o::StrBootTest{T}) where T # for efficiency when varying r repea o.kX₂==0 && (o.X₂ = zeros(T,o.Nobs,0)) iszero(o.kY₂) && (o.Y₂ = zeros(T,o.Nobs,0)) o.kZ = o.kX₁ + o.kY₂ - if o.LIML && o.kX₂==o.kY₂ # exactly identified LIML = 2SLS + if o.liml && o.kX₂==o.kY₂ # exactly identified liml = 2SLS o.κ = one(T) - o.LIML = false + o.liml = false end if !(o.REst = length(o.R₁)>0) # base model contains no restrictions? o.R₁ = zeros(T,0,o.kZ) o.r₁ = zeros(T,0) end isnan(o.κ) && (o.κ = o.kX₂>0 ? one(T) : zero(T)) # if κ in κ-class estimation not specified, it's 0 or 1 for OLS or 2SLS - o.WRE = !(iszero(o.κ) || o.scorebs) || o.ARubin + o.WRE = !(iszero(o.κ) || o.scorebs) || o.arubin iszero(o.B) && (o.scorebs = true) @@ -184,26 +184,26 @@ function Init!(o::StrBootTest{T}) where T # for efficiency when varying r repea o.WeightGrp[end] = first(o.WeightGrp[end]):o.B+1 end - if o.ML + if o.ml o.dof = nrows(o.R) else - if o.ARubin + if o.arubin o.R = hcat(zeros(o.kX₂,o.kX₁), Matrix(I(o.kX₂))) # attack surface is all endog vars o.R₁ = o.kX₁>0 && nrows(o.R₁)>0 ? hcat(o.R₁[:,1:o.kX₁], zeros(nrows(o.R₁),o.kX₂)) : zeros(0, o.kX) # and convert model constraints from referring to X₁, Y₂ to X₁, X₂ end o.dof = nrows(o.R) if !o.WRE && iszero(o.κ) # regular OLS - o.DGP = StrEstimator{T}(true, o.LIML, o.Fuller, o.κ) + o.DGP = StrEstimator{T}(true, o.liml, o.fuller, o.κ) o.Repl = StrEstimator{T}(true, false, zero(T), zero(T)) # XXX isDGP=1 for Repl? doesn't matter? setR!(o.DGP, o, o.null ? [o.R₁ ; o.R] : o.R₁) # DGP constraints: model constraints + null if imposed setR!(o.Repl, o, o.R₁) # model constraints only InitVarsOLS!(o.DGP, o, o.Repl.R₁perp) InitTestDenoms!(o.DGP, o) o.M = o.DGP # StrEstimator object from which to get A, AR, XAR - elseif o.ARubin + elseif o.arubin if o.willplot # for plotting/CI purposes get original point estimate since not normally generated - o.DGP = StrEstimator{T}(true, o.LIML, o.Fuller, o.κ) + o.DGP = StrEstimator{T}(true, o.liml, o.fuller, o.κ) setR!(o.DGP, o, o.R₁, zeros(T,0,o.kZ)) # no-null model InitVarsIV!(o.DGP, o) EstimateIV!(o.DGP, o, o.r₁) @@ -218,7 +218,7 @@ function Init!(o::StrBootTest{T}) where T # for efficiency when varying r repea o.kZ = o.kX elseif o.WREnonARubin - o.Repl = StrEstimator{T}(false, o.LIML, o.Fuller, o.κ) + o.Repl = StrEstimator{T}(false, o.liml, o.fuller, o.κ) setR!(o.Repl, o, o.R₁, o.R) InitVarsIV!(o.Repl, o) EstimateIV!(o.Repl, o, o.r₁) @@ -235,10 +235,10 @@ function Init!(o::StrBootTest{T}) where T # for efficiency when varying r repea else # the score bootstrap for IV/GMM uses a IV/GMM DGP but then masquerades as an OLS test because most factors are fixed during the bootstrap. To conform, need DGP and Repl objects with different R, R₁, one with FWL, one not - o.DGP = StrEstimator{T}(true, o.LIML, o.Fuller, o.κ) + o.DGP = StrEstimator{T}(true, o.liml, o.fuller, o.κ) setR!(o.DGP, o, o.null ? [o.R₁ ; o.R] : o.R₁, zeros(T,0,o.kZ)) # DGP constraints: model constraints + null if imposed InitVarsIV!(o.DGP, o) - o.Repl = StrEstimator{T}(true, o.LIML, o.Fuller, o.κ) + o.Repl = StrEstimator{T}(true, o.liml, o.fuller, o.κ) setR!(o.Repl, o, o.R₁, I) # process replication restraints = model constraints only InitVarsIV!(o.Repl, o, o.Repl.R₁perp) EstimateIV!(o.Repl, o, o.r₁) # bit inefficient to estimate in both objects, but maintains the conformity @@ -280,7 +280,7 @@ function Init!(o::StrBootTest{T}) where T # for efficiency when varying r repea end o.clustermin && (o.smallsample *= (minN - 1) / minN) # ivreg2-style adjustment when multiway clustering o.multiplier = o.small ? o.smallsample / o.dof : o.smallsample # divide by # of constraints because F stat is so defined - !(o.robust || o.ML) && (o.multiplier *= o._Nobs) # will turn sum of squared errors in denom of t/z into mean + !(o.robust || o.ml) && (o.multiplier *= o._Nobs) # will turn sum of squared errors in denom of t/z into mean o.sqrt && (o.multiplier = √o.multiplier) o.dist = fill(T(NaN), 1, o.B+1) @@ -288,8 +288,8 @@ function Init!(o::StrBootTest{T}) where T # for efficiency when varying r repea if !o.WREnonARubin o.poles = o.anchor = zeros(T,0) - o.interpolable = o.bootstrapt && o.null && o.Nw==1 && (iszero(o.κ) || o.ARubin) - o.interpolate_u = !(o.robust || o.ML) + o.interpolable = o.bootstrapt && o.null && o.Nw==1 && (iszero(o.κ) || o.arubin) + o.interpolate_u = !(o.robust || o.ml) if o.interpolable o.∂numer∂r = Vector{Matrix{T}}(undef, o.q) o.interpolate_u && (o.∂u∂r = Vector{Matrix{T}}(undef, o.q)) diff --git a/src/interface.jl b/src/interface.jl index dd2f2db..aa99e53 100644 --- a/src/interface.jl +++ b/src/interface.jl @@ -9,7 +9,7 @@ struct BootTestResult{T} dof::Int64; dof_r::T plot::Union{Nothing, NamedTuple{(:X, :p), Tuple{Tuple{Vararg{Vector{T}, N} where N},Vector{T}}}} peak::Union{Nothing, NamedTuple{(:X, :p), Tuple{Vector{T}, T}}} - CI::Union{Nothing, Matrix{T}} + ci::Union{Nothing, Matrix{T}} dist::Matrix{T} b::Vector{T} V::Matrix{T} @@ -63,7 +63,7 @@ plotpoints(o::BootTestResult) = o.plot peak(o::BootTestResult) = o.peak "Return confidence interval matrix from test, one row per disjoint piece" -CI(o::BootTestResult) = o.CI +ci(o::BootTestResult) = o.ci "Return bootstrap distribution of statistic or statistic numerator in bootstrap test" dist(o::BootTestResult) = o.dist @@ -77,7 +77,7 @@ function Base.show(io::IO, o::BootTestResult{T}) where T isone(dof(o)) ? "(" * strint(dof_r(o)) * ")" : "(" * strint(dof(o)) * ", " * strint(dof_r(o)) * ")" ) # t, F Printf.@printf(io, "%s = %6.4f\n", s, teststat(o)) Printf.@printf(io, "p%s = %6.4f\n", repeat(" ", length(s)-1), p(o)) - isdefined(o, :CI) && !isnothing(o.CI) && length(o.CI)>0 && print(io, "CI" * repeat(" ", length(s)-2) * " = $(round.(CI(o); sigdigits=4))\n") + isdefined(o, :ci) && !isnothing(o.ci) && length(o.ci)>0 && print(io, "CI" * repeat(" ", length(s)-2) * " = $(round.(ci(o); sigdigits=4))\n") end # single entry point with arguments already converted to standardized types, to allow a smaller set of precompile() calls(?) @@ -103,10 +103,10 @@ function __wildboottest( maxmatsize::Float16, ptype::Symbol, bootstrapc::Bool, - LIML::Bool, - Fuller::T, + liml::Bool, + fuller::T, kappa::T, - ARubin::Bool, + arubin::Bool, small::Bool, clusteradj::Bool, clustermin::Bool, @@ -118,8 +118,8 @@ function __wildboottest( level::T, rtol::T, madjtype::Symbol, - NH0::Int16, - ML::Bool, + nH0::Int16, + ml::Bool, scores::Matrix{T}, beta::Vector{T}, A::Symmetric{T,Matrix{T}}, @@ -127,29 +127,29 @@ function __wildboottest( gridmax::VecOrMat{T}, gridpoints::VecOrMat{T}, diststat::Symbol, - getCI::Bool, + getci::Bool, getplot::Bool, getauxweights::Bool) where T - M = StrBootTest{T}(R, r, R1, r1, resp, predexog, predendog, inst, obswt, fweights, LIML, Fuller, kappa, ARubin, + M = StrBootTest{T}(R, r, R1, r1, resp, predexog, predendog, inst, obswt, fweights, liml, fuller, kappa, arubin, reps, auxwttype, rng, maxmatsize, ptype, imposenull, scorebs, !bootstrapc, clustid, nbootclustvar, nerrclustvar, issorted, hetrobust, small, clusteradj, clustermin, - nfe, feid, fedfadj, level, rtol, madjtype, NH0, ML, beta, A, scores, getplot, + nfe, feid, fedfadj, level, rtol, madjtype, nH0, ml, beta, A, scores, getplot, gridmin, gridmax, gridpoints) - if getplot || (level<1 && getCI) + if getplot || (level<1 && getci) plot!(M) plot = getplot & isdefined(M, :plotX) ? (X=Tuple(M.plotX), p=M.plotY) : nothing peak = M.peak - CI = level<1 & getCI ? M.CI : nothing + ci = level<1 & getci ? M.ci : nothing else - CI = plot = peak = nothing + ci = plot = peak = nothing end padj = getp(M) # trigger main (re)computation BootTestResult{T}(getstat(M), isone(nrows(R)) ? (small ? "t" : "z") : (small ? "F" : "χ²"), - M.p, padj, M.B, M.BFeas, M.N✻, M.dof, M.dof_r, plot, peak, CI, + M.p, padj, M.B, M.BFeas, M.N✻, M.dof, M.dof_r, plot, peak, ci, getdist(M, diststat), getb(M), getV(M), getauxweights && reps>0 ? getv(M) : nothing #=, M=#) @@ -180,10 +180,10 @@ function _wildboottest(T::DataType, maxmatsize::Number=0, ptype::Symbol=:symmetric, bootstrapc::Bool=false, - LIML::Bool=false, - Fuller::Number=0, + liml::Bool=false, + fuller::Number=0, kappa::Number=NaN, - ARubin::Bool=false, + arubin::Bool=false, small::Bool=true, clusteradj::Bool=small, clustermin::Bool=false, @@ -195,8 +195,8 @@ function _wildboottest(T::DataType, level::Number=.95, rtol::Number=1e-6, madjtype::Symbol=:none, - NH0::Integer=1, - ML::Bool=false, + nH0::Integer=1, + ml::Bool=false, scores::AbstractVecOrMat=Matrix{Float32}(undef,0,0), beta::AbstractVecOrMat=T[], A::AbstractMatrix=zeros(T,0,0), @@ -204,17 +204,17 @@ function _wildboottest(T::DataType, gridmax::Union{VecOrMat{S},VecOrMat{Union{S,Missing}}} where S<:Number = T[], gridpoints::Union{VecOrMat{S},VecOrMat{Union{S,Missing}}} where S<:Number = Int64[], diststat::Symbol=:none, - getCI::Bool=true, - getplot::Bool=getCI, + getci::Bool=true, + getplot::Bool=getci, getauxweights::Bool=false) - nrows(R)>2 && (getplot = getCI = false) + nrows(R)>2 && (getplot = getci = false) @assert any(auxwttype .== (:rademacher, :mammen, :webb, :gamma, :normal)) "auxwttype shoud be :rademacher, :mammen, :webb, :gamma, or :normal" @assert any(ptype .==(:symmetric, :equaltail, :lower, :upper)) "ptype should be :symmetric, :equaltail, :lower, or :upper" @assert any(madjtype .== (:none, :bonferroni, :sidak)) "madjtype should be :none, :bonferroni, or :sidak" @assert any(diststat .== (:none, :t, :numer)) - @assert ML || ncols(resp)==1 "resp should have one column" + @assert ml || ncols(resp)==1 "resp should have one column" @assert (length(predexog)==0 || nrows(predexog)==nrows(resp)) && (length(predendog)==0 || nrows(predendog)==nrows(resp)) && (length(inst)==0 || nrows(inst)==nrows(resp)) "All data vectors/matrices must have same height" @@ -225,7 +225,7 @@ function _wildboottest(T::DataType, @assert nrows(obswt)==0 || nrows(obswt)==nrows(resp) "obswt must have same height as data matrices" @assert ncols(obswt)≤1 "obswt must have one column" @assert nrows(R)==nrows(r) "R and r must have same height" - @assert (ncols(R) == (ML ? nrows(beta) : ncols(predexog)+ncols(predendog)) && isone(ncols(r))) "Wrong number of columns in null specification" + @assert (ncols(R) == (ml ? nrows(beta) : ncols(predexog)+ncols(predendog)) && isone(ncols(r))) "Wrong number of columns in null specification" @assert nrows(R1)==nrows(r1) "R₁ and r₁ must have same height" @assert length(R1)==0 || ncols(R1)==ncols(predexog)+ncols(predendog) "Wrong number of columns in model constraint specification" @assert ncols(r)==1 "r should have one column" @@ -235,13 +235,13 @@ function _wildboottest(T::DataType, @assert reps ≥ 0 "reps < 0" @assert level ≥ 0. && level≤1. "level must be in the range [0,1]" @assert rtol > 0. "rtol ≤ 0" - @assert NH0 > 0 "NH0 ≤ 0" - @assert !LIML || (ncols(predendog)>0 && ncols(inst)>0) "For LIML, non-empty predendog and inst arguments are needed" - @assert Fuller==0 || (ncols(predendog)>0 && ncols(inst)>0) "For Fuller LIML, non-empty predendog and inst arguments are needed" + @assert nH0 > 0 "nH0 ≤ 0" + @assert !liml || (ncols(predendog)>0 && ncols(inst)>0) "For liml, non-empty predendog and inst arguments are needed" + @assert fuller==0 || (ncols(predendog)>0 && ncols(inst)>0) "For Fuller liml, non-empty predendog and inst arguments are needed" @assert iszero(ncols(predendog)) || ncols(inst)>0 "predendog provided without inst" - @assert !ARubin || ncols(predendog)>0 "Anderson-Rubin test requested but predendog not provided" + @assert !arubin || ncols(predendog)>0 "Anderson-Rubin test requested but predendog not provided" - if getplot || getCI + if getplot || getci @assert iszero(length(gridmin )) || length(gridmin )==nrows(R) "Length of gridmin doesn't match number of hypotheses being jointly tested" @assert iszero(length(gridmax )) || length(gridmax )==nrows(R) "Length of gridmax doesn't match number of hypotheses being jointly tested" @assert iszero(length(gridpoints)) || length(gridpoints)==nrows(R) "Length of gridpoints doesn't match number of hypotheses being jointly tested" @@ -285,10 +285,10 @@ function _wildboottest(T::DataType, maxmatsize=Float16(maxmatsize), ptype, bootstrapc, - LIML, - Fuller=T(Fuller), + liml, + fuller=T(fuller), kappa=T(kappa), - ARubin, + arubin, small, clusteradj, clustermin, @@ -300,8 +300,8 @@ function _wildboottest(T::DataType, level=T(level), rtol=T(rtol), madjtype, - NH0=Int16(NH0), - ML, + nH0=Int16(nH0), + ml, scores=matconvert(T,scores), beta=vecconvert(T,beta), A=Symmetric(matconvert(T,A)), @@ -309,7 +309,7 @@ function _wildboottest(T::DataType, gridmax=_gridmax, gridpoints=_gridpoints, diststat, - getCI, + getci, getplot, getauxweights) end @@ -350,10 +350,10 @@ Function to perform wild-bootstrap-based hypothesis test * `maxmatsize::Number`: maximum size of auxilliary weight matrix (v), in gigabytes * `ptype::Symbol=:symmetric`: p value type (`:symmetric`, `:equaltail`, `:lower`, `:upper`) * `bootstrapc::Bool=false`: true to request bootstrap-c instead of bootstrap-t -* `LIML::Bool=false`: true for LIML or Fuller LIML -* `Fuller::Number`: Fuller LIML factor +* `liml::Bool=false`: true for LIML or Fuller LIML +* `fuller::Number`: Fuller LIML factor * `kappa::Number`: fixed κ for _k_-class estimation -* `ARubin::Bool=false`: true for Anderson-Rubin test +* `arubin::Bool=false`: true for Anderson-Rubin test * `small::Bool=true`: true to multiply test statistics by G/(G-1) × N/(N-k), where G, N, k are number of clusters, observations, and predictors * `clusteradj::Bool=true`: false to drop G/(G-1) factor * `clustermin::Bool=false``: for multiway clustering, true to base G/(G-1) factor for all clusterings ]on the smallest G across clusterings @@ -363,10 +363,10 @@ Function to perform wild-bootstrap-based hypothesis test * `auxwttype::Symbol=:rademacher`: auxilliary weight type (`:rademacher`, `:mammen`, `:webb`, `:normal`, `:gamma`) * `rng::AbstractRNG=MersenneTwister()`: randon number generator * `level::Number=.95`: significance level (0-1) -* `rtol::Number=1e-6`: tolerance for CI bound determination +* `rtol::Number=1e-6`: tolerance for ci bound determination * `madjtype::Symbol=:none`: multiple hypothesis adjustment (`none`, `:bonferroni`, `:sidak`) -* `NH0::Integer=1`: number of hypotheses tested, including one being tested now -* `ML::Bool=false`: true for (nonlinear) ML estimation +* `nH0::Integer=1`: number of hypotheses tested, including one being tested now +* `ml::Bool=false`: true for (nonlinear) ML estimation * `scores::AbstractVecOrMat`: for ML, pre-computed scores * `beta::AbstractVector`: for ML, parameter estimates * `A::AbstractMatrix`: for ML, covariance estimates @@ -374,8 +374,8 @@ Function to perform wild-bootstrap-based hypothesis test * `gridmax`: vector of graph upper bounds; `missing`/`NaN` entries ask wildboottest() to choose * `gridpoints`: vector of number of sampling points; `missing`/`NaN` entries ask wildboottest() to choose * `diststat::Symbole=:none`: `:t` to save bootstrap distribution of t/z/F/χ² statistics; `:numer` to save numerators thereof -* `getCI::Bool=true`: whether to return CI -* `getplot::Bool=getCI`: whether to generate plot data +* `getci::Bool=true`: whether to return confidence interval +* `getplot::Bool=getci`: whether to generate plot data * `getauxweights::Bool=false`: whether to save auxilliary weight matrix (v) # Notes diff --git a/src/nonWRE.jl b/src/nonWRE.jl index c093169..312553b 100644 --- a/src/nonWRE.jl +++ b/src/nonWRE.jl @@ -29,7 +29,7 @@ function MakeInterpolables!(o::StrBootTest{T}) where T o.∂numer∂r[h₁] = (o.numer .- o.numer₀) ./ o.poles[h₁] o.interpolate_u && (o.∂u∂r[h₁] = (o.u✻ .- o.u✻₀) ./ o.poles[h₁]) - if o.robust && !o.purerobust # dof > 1 for an ARubin test with >1 instruments. + if o.robust && !o.purerobust # dof > 1 for an arubin test with >1 instruments. for d₁ ∈ 1:o.dof for c ∈ 1:o.NErrClustCombs o.∂Jcd∂r[h₁,c,d₁] = (o.Jcd[c,d₁] .- o.Jcd₀[c,d₁]) ./ o.poles[h₁] @@ -97,10 +97,10 @@ end # Construct stuff that depends linearly or quadratically on r and doesn't depend on v. No interpolation. function _MakeInterpolables!(o::StrBootTest{T}, thisr::AbstractVector) where T - if o.ML + if o.ml o.uXAR = o.sc * (o.AR = o.A * o.R') else - if o.ARubin + if o.arubin EstimateARubin!(o.DGP, o, thisr) MakeResidualsOLSARubin!(o.DGP, o) elseif iszero(o.κ) # regular OLS @@ -191,10 +191,10 @@ function MakeNumerAndJ!(o::StrBootTest{T}, w::Integer, r::AbstractVector=Vector{ end if isone(w) - if o.ARubin - o.numerw[:,1] = o.v_sd * o.DGP.β̈[o.kX₁+1:end] # coefficients on excluded instruments in ARubin OLS + if o.arubin + o.numerw[:,1] = o.v_sd * o.DGP.β̈[o.kX₁+1:end] # coefficients on excluded instruments in arubin OLS elseif !o.null # Analytical Wald numerator; if imposing null then numer[:,1] already equals this. If not, then it's 0 before this. - o.numerw[:,1] = o.v_sd * (o.R * (o.ML ? o.β̈ : iszero(o.κ) ? o.M.β̈ : o.M.Rpar * o.M.β̈) - r) # κ≂̸0 score bootstrap of IV ⇒ using FWL and must factor in R∥ + o.numerw[:,1] = o.v_sd * (o.R * (o.ml ? o.β̈ : iszero(o.κ) ? o.M.β̈ : o.M.Rpar * o.M.β̈) - r) # κ≂̸0 score bootstrap of IV ⇒ using FWL and must factor in R∥ end end @@ -272,16 +272,16 @@ function MakeNonWREStats!(o::StrBootTest{T}, w::Integer) where T isone(w) && (o.statDenom = tmp) # original-sample denominator end else # non-robust - AR = o.ML ? o.AR : o.M.AR + AR = o.ml ? o.AR : o.M.AR if isone(o.dof) # optimize for one null constraint o.denom[1,1] = o.R * AR - !o.ML && (o.denom[1,1] = o.denom[1,1] .* coldot(o,o.u✻)) + !o.ml && (o.denom[1,1] = o.denom[1,1] .* coldot(o,o.u✻)) @storeWtGrpResults!(o.dist, o.numerw ./ sqrtNaN.(o.denom[1,1])) isone(w) && (o.statDenom = o.denom[1,1]) # original-sample denominator else o.denom[1,1] = o.R * AR - if o.ML + if o.ml for k ∈ 1:ncols(o.v) numer_k = view(o.numerw,:,k) o.dist[k+first(o.WeightGrp[w])-1] = numer_k'invsym(o.denom[1,1])*numer_k diff --git a/src/plot-CI.jl b/src/plot-CI.jl index 8cc722c..4ffbcb3 100644 --- a/src/plot-CI.jl +++ b/src/plot-CI.jl @@ -48,7 +48,7 @@ function plot!(o::StrBootTest{T}) where T o.boottest!(o) Phi = quantile(Normal{T}(zero(T),one(T)), α/2) - if o.ARubin + if o.arubin p = o.dist[1] * o.multiplier p = ccdf(Chisq{T}(T(o.dof)), o.sqrt ? p^2 : p) halfwidth = abs.(o.confpeak) * quantile(Normal{T}(zero(T),one(T)), p/2) / Phi @@ -59,7 +59,7 @@ function plot!(o::StrBootTest{T}) where T end if isone(o.q) # 1D plot - α≤0 && (α = T(.05)) # if level=100, no CI constructed, but we need a reasonable α to choose graphing bounds + α≤0 && (α = T(.05)) # if level=100, no ci constructed, but we need a reasonable α to choose graphing bounds p_lo = p_hi = T(NaN) if isnan(o.gridmin[1]) || isnan(o.gridmax[1]) if o.B>0 # initial guess based on classical distribution @@ -70,7 +70,7 @@ function plot!(o::StrBootTest{T}) where T lo = isnan(o.gridmin[1]) ? o.confpeak - tmp : o.gridmin hi = isnan(o.gridmax[1]) ? o.confpeak + tmp : o.gridmax if o.scorebs && !o.null && !o.willplot # if doing simple Wald test with no graph, we're done - o.CI = [lo hi] + o.ci = [lo hi] return end end @@ -155,8 +155,8 @@ function plot!(o::StrBootTest{T}) where T end if any(isnan.(o.plotY)) - o.CI = [T(-Inf) T(Inf)] - elseif isone(o.q) && o.level<100 # find CI bounds + o.ci = [T(-Inf) T(Inf)] + elseif isone(o.q) && o.level<100 # find ci bounds _CI = Vector{T}(undef, nrows(o.plotY)) for i in eachindex(_CI) # map() version hampers type inference in Julia 1.6.2 _CI[i] = isnan(o.plotY[i]) ? o.plotY[i] : T(o.plotY[i] > α) @@ -165,7 +165,7 @@ function plot!(o::StrBootTest{T}) where T lo = T.(findall(x->x== 1, _CI)) hi = T.(findall(x->x==-1, _CI)) if iszero(length(lo)) && iszero(length(hi)) - o.CI = [T(-Inf) T(Inf)] + o.ci = [T(-Inf) T(Inf)] else if iszero(length(lo)) lo = [T(-Inf)] @@ -175,12 +175,12 @@ function plot!(o::StrBootTest{T}) where T lo[1 ] > hi[1 ] && (lo = [T(-Inf) ; lo ]) # non-rejection ranges that are not within grid range lo[end] > hi[end] && (hi = [hi ; T(Inf)]) end - o.CI = [lo hi] + o.ci = [lo hi] for i ∈ 1:length(lo), j ∈ 1:2 - if !isinf(o.CI[i,j]) - t = Int(o.CI[i,j]) - o.CI[i,j] = search(o, α, o.plotY[t], o.plotX[1][t], o.plotY[t+1], o.plotX[1][t+1]) + if !isinf(o.ci[i,j]) + t = Int(o.ci[i,j]) + o.ci[i,j] = search(o, α, o.plotY[t], o.plotX[1][t], o.plotY[t+1], o.plotX[1][t+1]) end end end diff --git a/src/precompile_WildBootTests.jl b/src/precompile_WildBootTests.jl index e5285e3..a3809d7 100644 --- a/src/precompile_WildBootTests.jl +++ b/src/precompile_WildBootTests.jl @@ -51,104 +51,99 @@ end function _precompile_() ccall(:jl_generating_output, Cint, ()) == 1 || return nothing - Base.precompile(Tuple{typeof(boottestOLSARubin!),StrBootTest{Float32}}) # time: 8.548078 - Base.precompile(Tuple{typeof(boottestOLSARubin!),StrBootTest{Float64}}) # time: 7.01583 - Base.precompile(Tuple{typeof(boottestWRE!),StrBootTest{Float64}}) # time: 3.9323723 - Base.precompile(Tuple{typeof(boottestWRE!),StrBootTest{Float32}}) # time: 3.7068539 - Base.precompile(Tuple{Core.kwftype(typeof(wildboottest)),NamedTuple{(:resp, :predexog, :clustid, :auxwttype), Tuple{Vector{Float32}, Matrix{Float64}, Vector{Int32}, Symbol}},typeof(wildboottest),Type,Matrix{Int64},Vector{Float64}}) # time: 1.5354811 - Base.precompile(Tuple{Core.kwftype(typeof(__wildboottest)),NamedTuple{(:resp, :predexog, :predendog, :inst, :R1, :r1, :clustid, :nbootclustvar, :nerrclustvar, :issorted, :hetrobust, :nfe, :feid, :fedfadj, :obswt, :fweights, :maxmatsize, :ptype, :bootstrapc, :LIML, :Fuller, :kappa, :ARubin, :small, :clusteradj, :clustermin, :scorebs, :reps, :imposenull, :auxwttype, :rng, :level, :rtol, :madjtype, :NH0, :ML, :scores, :beta, :A, :gridmin, :gridmax, :gridpoints, :diststat, :getCI, :getplot, :getauxweights), Tuple{Vector{Float32}, Matrix{Float32}, Matrix{Float32}, Matrix{Float32}, Matrix{Float32}, Vector{Float32}, Matrix{Int64}, Int64, Int64, Bool, Bool, Int64, Vector{Int64}, Int64, Vector{Float32}, Bool, Float16, Symbol, Bool, Bool, Float32, Float32, Bool, Bool, Bool, Bool, Bool, Int64, Bool, Symbol, MersenneTwister, Float32, Float32, Symbol, Int16, Bool, Matrix{Float32}, Vector{Float32}, Symmetric{Float32, Matrix{Float32}}, Vector{Float32}, Vector{Float32}, Vector{Float32}, Symbol, Bool, Bool, Bool}},typeof(__wildboottest),Matrix{Float32},Vector{Float32}}) # time: 0.9318629 - Base.precompile(Tuple{Core.kwftype(typeof(__wildboottest)),NamedTuple{(:resp, :predexog, :predendog, :inst, :R1, :r1, :clustid, :nbootclustvar, :nerrclustvar, :issorted, :hetrobust, :nfe, :feid, :fedfadj, :obswt, :fweights, :maxmatsize, :ptype, :bootstrapc, :LIML, :Fuller, :kappa, :ARubin, :small, :clusteradj, :clustermin, :scorebs, :reps, :imposenull, :auxwttype, :rng, :level, :rtol, :madjtype, :NH0, :ML, :scores, :beta, :A, :gridmin, :gridmax, :gridpoints, :diststat, :getCI, :getplot, :getauxweights), Tuple{Vector{Float64}, Matrix{Float64}, Matrix{Float64}, Matrix{Float64}, Matrix{Float64}, Vector{Float64}, Matrix{Int64}, Int64, Int64, Bool, Bool, Int64, Vector{Int64}, Int64, Vector{Float64}, Bool, Float16, Symbol, Bool, Bool, Float64, Float64, Bool, Bool, Bool, Bool, Bool, Int64, Bool, Symbol, MersenneTwister, Float64, Float64, Symbol, Int16, Bool, Matrix{Float64}, Vector{Float64}, Symmetric{Float64, Matrix{Float64}}, Vector{Float64}, Vector{Float64}, Vector{Float64}, Symbol, Bool, Bool, Bool}},typeof(__wildboottest),Matrix{Float64},Vector{Float64}}) # time: 0.8815 - Base.precompile(Tuple{Core.kwftype(typeof(_wildboottest)),NamedTuple{(:resp, :clustid, :auxwttype, :predexog), Tuple{Vector{Float32}, Vector{Int32}, Symbol, Matrix{Float64}}},typeof(_wildboottest),DataType,Matrix{Int64},Vector{Float64}}) # time: 0.3031049 - Base.precompile(Tuple{Core.kwftype(typeof(_wildboottest)),NamedTuple,typeof(_wildboottest),DataType,Matrix{Int64},Vector{Int64}}) # time: 0.0915435 - Base.precompile(Tuple{Core.kwftype(typeof(_wildboottest)),NamedTuple,typeof(_wildboottest),DataType,Matrix{Float64},Vector{Int64}}) # time: 0.0544844 - Base.precompile(Tuple{typeof(matconvert),Type{Int64},Matrix{Int16}}) # time: 0.0478876 - Base.precompile(Tuple{Core.kwftype(typeof(wildboottest)),NamedTuple{(:resp, :predexog, :hetrobust, :scorebs), Tuple{Vector{Float32}, Matrix{Float64}, Bool, Bool}},typeof(wildboottest),Type,Matrix{Int64},Vector{Float64}}) # time: 0.0475819 - Base.precompile(Tuple{Core.kwftype(typeof(wildboottest)),NamedTuple{(:resp, :predexog, :hetrobust, :scorebs, :reps), Tuple{Vector{Float32}, Matrix{Float64}, Bool, Bool, Int64}},typeof(wildboottest),Type,Matrix{Int64},Vector{Float64}}) # time: 0.0454296 - Base.precompile(Tuple{Core.kwftype(typeof(_wildboottest)),NamedTuple{(:resp, :reps, :scorebs, :predexog, :hetrobust), Tuple{Vector{Float32}, Int64, Bool, Matrix{Float64}, Bool}},typeof(_wildboottest),DataType,Matrix{Int64},Vector{Float64}}) # time: 0.0448079 - Base.precompile(Tuple{Core.kwftype(typeof(wildboottest)),NamedTuple{(:resp, :predexog, :predendog, :inst, :clustid, :small, :reps), Tuple{Vector{Float32}, Matrix{Float64}, Vector{Float32}, Matrix{Int8}, Vector{Int8}, Bool, Int64}},typeof(wildboottest),Type,Matrix{Int64},Vector{Int64}}) # time: 0.0446246 - Base.precompile(Tuple{Core.kwftype(typeof(wildboottest)),NamedTuple{(:resp, :predexog, :predendog, :inst, :clustid, :small, :reps, :imposenull, :getplot, :ptype), Tuple{Vector{Float32}, Matrix{Float64}, Vector{Float32}, Matrix{Int8}, Vector{Int8}, Bool, Int64, Bool, Bool, Symbol}},typeof(wildboottest),Type,Matrix{Int64},Vector{Int64}}) # time: 0.0425788 - Base.precompile(Tuple{typeof(matconvert),Type{Int64},Matrix{Int8}}) # time: 0.0392885 - Base.precompile(Tuple{Core.kwftype(typeof(_wildboottest)),NamedTuple{(:resp, :inst, :clustid, :reps, :small, :predendog, :predexog), Tuple{Vector{Float32}, Matrix{Int8}, Vector{Int8}, Int64, Bool, Vector{Float32}, Matrix{Float64}}},typeof(_wildboottest),DataType,Matrix{Int64},Vector{Int64}}) # time: 0.0354483 - Base.precompile(Tuple{typeof(panelsum_nonturbo!),Matrix{Float64},Matrix{Float64},SubArray{Float64, 1, Matrix{Float64}, Tuple{Base.Slice{Base.OneTo{Int64}}, Int64}, true},Vector{UnitRange{Int64}}}) # time: 0.0328798 - Base.precompile(Tuple{typeof(panelsum_nonturbo!),Matrix{Float32},Matrix{Float32},SubArray{Float32, 1, Matrix{Float32}, Tuple{Base.Slice{Base.OneTo{Int64}}, Int64}, true},Vector{UnitRange{Int64}}}) # time: 0.0296697 + Base.precompile(Tuple{typeof(boottestOLSARubin!),StrBootTest{Float32}}) # time: 8.2192955 + Base.precompile(Tuple{typeof(boottestOLSARubin!),StrBootTest{Float64}}) # time: 6.797035 + Base.precompile(Tuple{typeof(boottestWRE!),StrBootTest{Float64}}) # time: 4.739786 + Base.precompile(Tuple{typeof(boottestWRE!),StrBootTest{Float32}}) # time: 4.028054 + Base.precompile(Tuple{Core.kwftype(typeof(wildboottest)),NamedTuple{(:resp, :predexog, :clustid, :auxwttype), Tuple{Vector{Float32}, Matrix{Float64}, Vector{Int32}, Symbol}},typeof(wildboottest),Type,Matrix{Int64},Vector{Float64}}) # time: 1.4766428 + Base.precompile(Tuple{Core.kwftype(typeof(__wildboottest)),NamedTuple{(:resp, :predexog, :predendog, :inst, :R1, :r1, :clustid, :nbootclustvar, :nerrclustvar, :issorted, :hetrobust, :nfe, :feid, :fedfadj, :obswt, :fweights, :maxmatsize, :ptype, :bootstrapc, :liml, :fuller, :kappa, :arubin, :small, :clusteradj, :clustermin, :scorebs, :reps, :imposenull, :auxwttype, :rng, :level, :rtol, :madjtype, :nH0, :ml, :scores, :beta, :A, :gridmin, :gridmax, :gridpoints, :diststat, :getci, :getplot, :getauxweights), Tuple{Vector{Float32}, Matrix{Float32}, Matrix{Float32}, Matrix{Float32}, Matrix{Float32}, Vector{Float32}, Matrix{Int64}, Int64, Int64, Bool, Bool, Int64, Vector{Int64}, Int64, Vector{Float32}, Bool, Float16, Symbol, Bool, Bool, Float32, Float32, Bool, Bool, Bool, Bool, Bool, Int64, Bool, Symbol, MersenneTwister, Float32, Float32, Symbol, Int16, Bool, Matrix{Float32}, Vector{Float32}, Symmetric{Float32, Matrix{Float32}}, Vector{Float32}, Vector{Float32}, Vector{Float32}, Symbol, Bool, Bool, Bool}},typeof(__wildboottest),Matrix{Float32},Vector{Float32}}) # time: 0.9131017 + Base.precompile(Tuple{Core.kwftype(typeof(__wildboottest)),NamedTuple{(:resp, :predexog, :predendog, :inst, :R1, :r1, :clustid, :nbootclustvar, :nerrclustvar, :issorted, :hetrobust, :nfe, :feid, :fedfadj, :obswt, :fweights, :maxmatsize, :ptype, :bootstrapc, :liml, :fuller, :kappa, :arubin, :small, :clusteradj, :clustermin, :scorebs, :reps, :imposenull, :auxwttype, :rng, :level, :rtol, :madjtype, :nH0, :ml, :scores, :beta, :A, :gridmin, :gridmax, :gridpoints, :diststat, :getci, :getplot, :getauxweights), Tuple{Vector{Float64}, Matrix{Float64}, Matrix{Float64}, Matrix{Float64}, Matrix{Float64}, Vector{Float64}, Matrix{Int64}, Int64, Int64, Bool, Bool, Int64, Vector{Int64}, Int64, Vector{Float64}, Bool, Float16, Symbol, Bool, Bool, Float64, Float64, Bool, Bool, Bool, Bool, Bool, Int64, Bool, Symbol, MersenneTwister, Float64, Float64, Symbol, Int16, Bool, Matrix{Float64}, Vector{Float64}, Symmetric{Float64, Matrix{Float64}}, Vector{Float64}, Vector{Float64}, Vector{Float64}, Symbol, Bool, Bool, Bool}},typeof(__wildboottest),Matrix{Float64},Vector{Float64}}) # time: 0.7967473 + Base.precompile(Tuple{Core.kwftype(typeof(_wildboottest)),NamedTuple{(:resp, :clustid, :auxwttype, :predexog), Tuple{Vector{Float32}, Vector{Int32}, Symbol, Matrix{Float64}}},typeof(_wildboottest),DataType,Matrix{Int64},Vector{Float64}}) # time: 0.2241874 + Base.precompile(Tuple{Core.kwftype(typeof(_wildboottest)),NamedTuple,typeof(_wildboottest),DataType,Matrix{Int64},Vector{Int64}}) # time: 0.0757203 + Base.precompile(Tuple{Core.kwftype(typeof(_wildboottest)),NamedTuple,typeof(_wildboottest),DataType,Matrix{Float64},Vector{Int64}}) # time: 0.0512666 + Base.precompile(Tuple{Core.kwftype(typeof(_wildboottest)),NamedTuple{(:resp, :reps, :scorebs, :predexog, :hetrobust), Tuple{Vector{Float32}, Int64, Bool, Matrix{Float64}, Bool}},typeof(_wildboottest),DataType,Matrix{Int64},Vector{Float64}}) # time: 0.0399314 + Base.precompile(Tuple{typeof(panelsum_nonturbo!),Vector{Float32},Vector{Float32},SubArray{Float32, 1, Matrix{Float32}, Tuple{Base.Slice{Base.OneTo{Int64}}, Int64}, true},Vector{UnitRange{Int64}}}) # time: 0.0370134 + Base.precompile(Tuple{Core.kwftype(typeof(wildboottest)),NamedTuple{(:resp, :predexog, :hetrobust, :scorebs), Tuple{Vector{Float32}, Matrix{Float64}, Bool, Bool}},typeof(wildboottest),Type,Matrix{Int64},Vector{Float64}}) # time: 0.036235 + Base.precompile(Tuple{Core.kwftype(typeof(_wildboottest)),NamedTuple{(:resp, :inst, :clustid, :reps, :small, :predendog, :predexog), Tuple{Vector{Float32}, Matrix{Int8}, Vector{Int8}, Int64, Bool, Vector{Float32}, Matrix{Float64}}},typeof(_wildboottest),DataType,Matrix{Int64},Vector{Int64}}) # time: 0.0359046 + Base.precompile(Tuple{Core.kwftype(typeof(wildboottest)),NamedTuple{(:resp, :predexog, :hetrobust, :scorebs, :reps), Tuple{Vector{Float32}, Matrix{Float64}, Bool, Bool, Int64}},typeof(wildboottest),Type,Matrix{Int64},Vector{Float64}}) # time: 0.0341064 + Base.precompile(Tuple{typeof(panelsum_nonturbo!),Matrix{Float64},Matrix{Float64},SubArray{Float64, 1, Matrix{Float64}, Tuple{Base.Slice{Base.OneTo{Int64}}, Int64}, true},Vector{UnitRange{Int64}}}) # time: 0.0334703 + Base.precompile(Tuple{typeof(panelsum_nonturbo!),Matrix{Float32},Matrix{Float32},SubArray{Float32, 1, Matrix{Float32}, Tuple{Base.Slice{Base.OneTo{Int64}}, Int64}, true},Vector{UnitRange{Int64}}}) # time: 0.0327042 + Base.precompile(Tuple{Core.kwftype(typeof(wildboottest)),NamedTuple{(:resp, :predexog, :predendog, :inst, :clustid, :small, :reps), Tuple{Vector{Float32}, Matrix{Float64}, Vector{Float32}, Matrix{Int8}, Vector{Int8}, Bool, Int64}},typeof(wildboottest),Type,Matrix{Int64},Vector{Int64}}) # time: 0.0324644 + Base.precompile(Tuple{Core.kwftype(typeof(wildboottest)),NamedTuple{(:resp, :predexog, :predendog, :inst, :clustid, :small, :reps, :imposenull, :getplot, :ptype), Tuple{Vector{Float32}, Matrix{Float64}, Vector{Float32}, Matrix{Int8}, Vector{Int8}, Bool, Int64, Bool, Bool, Symbol}},typeof(wildboottest),Type,Matrix{Int64},Vector{Int64}}) # time: 0.0324147 let fbody = try __lookup_kwbody__(which(_wildboottest, (DataType,Matrix{Int64},Vector{Float64},))) catch missing end if !ismissing(fbody) precompile(fbody, (Vector{Float32},Matrix{Float64},Matrix{Float32},Matrix{Float32},Matrix{Float32},Vector{Float32},Bool,Matrix{Int64},Int64,Int64,Bool,Int64,Vector{Int8},Int64,Vector{Float32},Bool,Int64,Symbol,Bool,Bool,Int64,Float64,Bool,Bool,Bool,Bool,Bool,Int64,Bool,Symbol,MersenneTwister,Float64,Float64,Symbol,Int64,Bool,Matrix{Float32},Vector{Float32},Matrix{Float32},Vector{Float32},Vector{Float32},Vector{Int64},Symbol,Bool,Bool,Bool,typeof(_wildboottest),DataType,Matrix{Int64},Vector{Float64},)) end -end # time: 0.0293988 - let fbody = try __lookup_kwbody__(which(_wildboottest, (DataType,Matrix{Int64},Vector{Float64},))) catch missing end +end # time: 0.0299475 + let fbody = try __lookup_kwbody__(which(_wildboottest, (DataType,Matrix{Int64},Vector{Int64},))) catch missing end if !ismissing(fbody) - precompile(fbody, (Vector{Float32},Matrix{Float64},Matrix{Float64},Matrix{Float64},Matrix{Float64},Vector{Float64},Bool,Matrix{Int64},Int64,Int64,Bool,Int64,Vector{Int8},Int64,Vector{Float64},Bool,Int64,Symbol,Bool,Bool,Int64,Float64,Bool,Bool,Bool,Bool,Bool,Int64,Bool,Symbol,MersenneTwister,Float64,Float64,Symbol,Int64,Bool,Matrix{Float32},Vector{Float64},Matrix{Float64},Vector{Float64},Vector{Float64},Vector{Int64},Symbol,Bool,Bool,Bool,typeof(_wildboottest),DataType,Matrix{Int64},Vector{Float64},)) + precompile(fbody, (Vector{Float32},Matrix{Float64},Vector{Float32},Matrix{Int8},Matrix{Float64},Vector{Float64},Bool,Vector{Int8},Int64,Int64,Bool,Int64,Vector{Int8},Int64,Vector{Float64},Bool,Int64,Symbol,Bool,Bool,Int64,Float64,Bool,Bool,Bool,Bool,Bool,Int64,Bool,Symbol,MersenneTwister,Float64,Float64,Symbol,Int64,Bool,Matrix{Float32},Vector{Float64},Matrix{Float64},Vector{Float64},Vector{Float64},Vector{Int64},Symbol,Bool,Bool,Bool,typeof(_wildboottest),DataType,Matrix{Int64},Vector{Int64},)) end -end # time: 0.0285995 - Base.precompile(Tuple{typeof(panelsum_nonturbo!),SubArray{Float64, 2, Matrix{Float64}, Tuple{Base.Slice{Base.OneTo{Int64}}, UnitRange{Int64}}, true},Matrix{Float64},Vector{Float64},Vector{UnitRange{Int64}}}) # time: 0.0285214 +end # time: 0.026748 let fbody = try __lookup_kwbody__(which(_wildboottest, (DataType,Matrix{Int64},Vector{Int64},))) catch missing end if !ismissing(fbody) - precompile(fbody, (Vector{Float32},Matrix{Float64},Vector{Float32},Matrix{Int8},Matrix{Float64},Vector{Float64},Bool,Vector{Int8},Int64,Int64,Bool,Int64,Vector{Int8},Int64,Vector{Float64},Bool,Int64,Symbol,Bool,Bool,Int64,Float64,Bool,Bool,Bool,Bool,Bool,Int64,Bool,Symbol,MersenneTwister,Float64,Float64,Symbol,Int64,Bool,Matrix{Float32},Vector{Float64},Matrix{Float64},Vector{Float64},Vector{Float64},Vector{Int64},Symbol,Bool,Bool,Bool,typeof(_wildboottest),DataType,Matrix{Int64},Vector{Int64},)) + precompile(fbody, (Vector{Float32},Matrix{Float64},Vector{Float32},Matrix{Int8},Matrix{Float32},Vector{Float32},Bool,Vector{Int8},Int64,Int64,Bool,Int64,Vector{Int8},Int64,Vector{Float32},Bool,Int64,Symbol,Bool,Bool,Int64,Float64,Bool,Bool,Bool,Bool,Bool,Int64,Bool,Symbol,MersenneTwister,Float64,Float64,Symbol,Int64,Bool,Matrix{Float32},Vector{Float32},Matrix{Float32},Vector{Float32},Vector{Float32},Vector{Int64},Symbol,Bool,Bool,Bool,typeof(_wildboottest),DataType,Matrix{Int64},Vector{Int64},)) end -end # time: 0.0276205 +end # time: 0.0262361 let fbody = try __lookup_kwbody__(which(_wildboottest, (DataType,Matrix{Int64},Vector{Float64},))) catch missing end if !ismissing(fbody) precompile(fbody, (Vector{Float32},Matrix{Float64},Matrix{Float64},Matrix{Float64},Matrix{Float64},Vector{Float64},Bool,Vector{Int32},Int64,Int64,Bool,Int64,Vector{Int8},Int64,Vector{Float64},Bool,Int64,Symbol,Bool,Bool,Int64,Float64,Bool,Bool,Bool,Bool,Bool,Int64,Bool,Symbol,MersenneTwister,Float64,Float64,Symbol,Int64,Bool,Matrix{Float32},Vector{Float64},Matrix{Float64},Vector{Float64},Vector{Float64},Vector{Int64},Symbol,Bool,Bool,Bool,typeof(_wildboottest),DataType,Matrix{Int64},Vector{Float64},)) end -end # time: 0.0271597 - let fbody = try __lookup_kwbody__(which(_wildboottest, (DataType,Matrix{Int64},Vector{Int64},))) catch missing end - if !ismissing(fbody) - precompile(fbody, (Vector{Float32},Matrix{Float64},Vector{Float32},Matrix{Int8},Matrix{Float32},Vector{Float32},Bool,Vector{Int8},Int64,Int64,Bool,Int64,Vector{Int8},Int64,Vector{Float32},Bool,Int64,Symbol,Bool,Bool,Int64,Float64,Bool,Bool,Bool,Bool,Bool,Int64,Bool,Symbol,MersenneTwister,Float64,Float64,Symbol,Int64,Bool,Matrix{Float32},Vector{Float32},Matrix{Float32},Vector{Float32},Vector{Float32},Vector{Int64},Symbol,Bool,Bool,Bool,typeof(_wildboottest),DataType,Matrix{Int64},Vector{Int64},)) - end -end # time: 0.0258724 +end # time: 0.0259131 + Base.precompile(Tuple{typeof(panelsum_nonturbo!),SubArray{Float64, 2, Matrix{Float64}, Tuple{Base.Slice{Base.OneTo{Int64}}, UnitRange{Int64}}, true},Matrix{Float64},Vector{Float64},Vector{UnitRange{Int64}}}) # time: 0.0253607 let fbody = try __lookup_kwbody__(which(_wildboottest, (DataType,Matrix{Int64},Vector{Float64},))) catch missing end if !ismissing(fbody) precompile(fbody, (Vector{Float32},Matrix{Float64},Matrix{Float32},Matrix{Float32},Matrix{Float32},Vector{Float32},Bool,Vector{Int32},Int64,Int64,Bool,Int64,Vector{Int8},Int64,Vector{Float32},Bool,Int64,Symbol,Bool,Bool,Int64,Float64,Bool,Bool,Bool,Bool,Bool,Int64,Bool,Symbol,MersenneTwister,Float64,Float64,Symbol,Int64,Bool,Matrix{Float32},Vector{Float32},Matrix{Float32},Vector{Float32},Vector{Float32},Vector{Int64},Symbol,Bool,Bool,Bool,typeof(_wildboottest),DataType,Matrix{Int64},Vector{Float64},)) end -end # time: 0.0252568 - Base.precompile(Tuple{typeof(panelsum_nonturbo!),Vector{Float64},Vector{Float64},SubArray{Float64, 1, Matrix{Float64}, Tuple{Base.Slice{Base.OneTo{Int64}}, Int64}, true},Vector{UnitRange{Int64}}}) # time: 0.024167 - Base.precompile(Tuple{typeof(panelsum_nonturbo!),SubArray{Float64, 2, Matrix{Float64}, Tuple{Base.Slice{Base.OneTo{Int64}}, UnitRange{Int64}}, true},Matrix{Float64},SubArray{Float64, 1, Matrix{Float64}, Tuple{Base.Slice{Base.OneTo{Int64}}, Int64}, true},Vector{UnitRange{Int64}}}) # time: 0.023559 - Base.precompile(Tuple{typeof(vecconvert),DataType,Vector{Int16}}) # time: 0.0226892 - Base.precompile(Tuple{typeof(panelsum_nonturbo!),SubArray{Float32, 2, Matrix{Float32}, Tuple{Base.Slice{Base.OneTo{Int64}}, UnitRange{Int64}}, true},Matrix{Float32},Vector{Float32},Vector{UnitRange{Int64}}}) # time: 0.0225655 - Base.precompile(Tuple{typeof(panelsum_nonturbo!),Vector{Float32},Vector{Float32},SubArray{Float32, 1, Matrix{Float32}, Tuple{Base.Slice{Base.OneTo{Int64}}, Int64}, true},Vector{UnitRange{Int64}}}) # time: 0.0224888 - Base.precompile(Tuple{typeof(vecconvert),DataType,BitVector}) # time: 0.0224703 - Base.precompile(Tuple{typeof(matconvert),DataType,Matrix{Int8}}) # time: 0.0223958 - Base.precompile(Tuple{typeof(panelsum_nonturbo!),SubArray{Float32, 2, Matrix{Float32}, Tuple{Base.Slice{Base.OneTo{Int64}}, UnitRange{Int64}}, true},Matrix{Float32},SubArray{Float32, 1, Matrix{Float32}, Tuple{Base.Slice{Base.OneTo{Int64}}, Int64}, true},Vector{UnitRange{Int64}}}) # time: 0.0222066 - Base.precompile(Tuple{typeof(matconvert),DataType,Matrix{Int16}}) # time: 0.0207188 - Base.precompile(Tuple{typeof(vecconvert),DataType,Vector{Int8}}) # time: 0.01979 - Base.precompile(Tuple{typeof(matconvert),DataType,Vector{Int32}}) # time: 0.0196539 - Base.precompile(Tuple{typeof(colquadformminus_nonturbo!),Matrix{Float32},Int64,Matrix{Float32},Symmetric{Float32, Matrix{Float32}},Matrix{Float32}}) # time: 0.0179556 - Base.precompile(Tuple{typeof(colquadformminus_nonturbo!),Matrix{Float64},Int64,Matrix{Float64},Symmetric{Float64, Matrix{Float64}},Matrix{Float64}}) # time: 0.0175957 - Base.precompile(Tuple{typeof(vecconvert),Type{Int64},Vector{Int16}}) # time: 0.0165086 - Base.precompile(Tuple{typeof(colquadformminus_nonturbo!),Adjoint{Float64, Vector{Float64}},Int64,Matrix{Float64},Matrix{Float64},Matrix{Float64}}) # time: 0.0103308 - Base.precompile(Tuple{typeof(colquadformminus_nonturbo!),SubArray{Float64, 2, Array{Float64, 3}, Tuple{Base.Slice{Base.OneTo{Int64}}, Base.Slice{Base.OneTo{Int64}}, Int64}, true},Int64,Matrix{Float64},Matrix{Float64},Matrix{Float64}}) # time: 0.0094425 - Base.precompile(Tuple{typeof(colquadformminus_nonturbo!),Adjoint{Float32, Vector{Float32}},Int64,Matrix{Float32},Matrix{Float32},Matrix{Float32}}) # time: 0.00942 - Base.precompile(Tuple{Core.kwftype(typeof(_wildboottest)),NamedTuple{(:resp, :scorebs, :predexog, :hetrobust), Tuple{Vector{Float32}, Bool, Matrix{Float64}, Bool}},typeof(_wildboottest),DataType,Matrix{Int64},Vector{Float64}}) # time: 0.0092454 - Base.precompile(Tuple{typeof(colquadformminus_nonturbo!),SubArray{Float32, 2, Array{Float32, 3}, Tuple{Base.Slice{Base.OneTo{Int64}}, Base.Slice{Base.OneTo{Int64}}, Int64}, true},Int64,Matrix{Float32},Matrix{Float32},Matrix{Float32}}) # time: 0.0089779 - Base.precompile(Tuple{typeof(colquadformminus_nonturbo!),Matrix{Float64},Int64,Matrix{Float64},Matrix{Float64},Matrix{Float64}}) # time: 0.0087563 - Base.precompile(Tuple{typeof(colquadformminus_nonturbo!),Matrix{Float32},Int64,Matrix{Float32},Matrix{Float32},Matrix{Float32}}) # time: 0.0075981 - Base.precompile(Tuple{Core.kwftype(typeof(_wildboottest)),NamedTuple{(:resp, :inst, :clustid, :reps, :imposenull, :getplot, :small, :predendog, :ptype, :predexog), Tuple{Vector{Float32}, Matrix{Int8}, Vector{Int8}, Int64, Bool, Bool, Bool, Vector{Float32}, Symbol, Matrix{Float64}}},typeof(_wildboottest),DataType,Matrix{Int64},Vector{Int64}}) # time: 0.0070773 - Base.precompile(Tuple{typeof(panelsum_nonturbo!),Matrix{Float32},Matrix{Float32},Vector{Float32},Vector{UnitRange{Int64}}}) # time: 0.0061267 - Base.precompile(Tuple{typeof(panelsum_nonturbo!),Matrix{Float64},Matrix{Float64},Vector{Float64},Vector{UnitRange{Int64}}}) # time: 0.0047216 - Base.precompile(Tuple{typeof(coldotplus_nonturbo!),SubArray{Float32, 2, Matrix{Float32}, Tuple{UnitRange{Int64}, Base.Slice{Base.OneTo{Int64}}}, false},Matrix{Float32},Matrix{Float32}}) # time: 0.0041153 - Base.precompile(Tuple{Type{BootTestResult{Float64}},Float64,String,Float64,Float64,Int64,Int64,Int64,Int64,Float64,NamedTuple{(:X, :p), Tuple{Tuple{Vector{Float64}}, Vector{Float64}}},NamedTuple{(:X, :p), Tuple{Vector{Float64}, Float64}},Matrix{Float64},Matrix{Float64},Vector{Float64},Matrix{Float64},Nothing}) # time: 0.0040239 - Base.precompile(Tuple{typeof(matmulplus_nonturbo!),Vector{Float64},Matrix{Float64},Vector{Float64}}) # time: 0.0040137 - Base.precompile(Tuple{typeof(matmulplus_nonturbo!),Vector{Float32},Matrix{Float32},Vector{Float32}}) # time: 0.0038687 - Base.precompile(Tuple{typeof(panelsum_nonturbo!),Matrix{Float64},Matrix{Float64},Vector{UnitRange{Int64}}}) # time: 0.0036602 - Base.precompile(Tuple{typeof(panelsum_nonturbo!),Matrix{Float32},Matrix{Float32},Vector{UnitRange{Int64}}}) # time: 0.0036449 - Base.precompile(Tuple{typeof(coldotplus_nonturbo!),SubArray{Float64, 2, Matrix{Float64}, Tuple{UnitRange{Int64}, Base.Slice{Base.OneTo{Int64}}}, false},Matrix{Float64},Matrix{Float64}}) # time: 0.0034332 - Base.precompile(Tuple{Type{BootTestResult{Float64}},Float64,String,Float64,Float64,Int64,Int64,Int64,Int64,Float64,NamedTuple{(:X, :p), Tuple{Tuple{Vector{Float64}, Vector{Float64}}, Vector{Float64}}},NamedTuple{(:X, :p), Tuple{Vector{Float64}, Float64}},Matrix{Float64},Matrix{Float64},Vector{Float64},Matrix{Float64},Nothing}) # time: 0.0030278 - Base.precompile(Tuple{Type{BootTestResult{Float32}},Float32,String,Float32,Float32,Int64,Int64,Int64,Int64,Float32,NamedTuple{(:X, :p), Tuple{Tuple{Vector{Float32}}, Vector{Float32}}},NamedTuple{(:X, :p), Tuple{Vector{Float32}, Float32}},Matrix{Float32},Matrix{Float32},Vector{Float32},Matrix{Float32},Nothing}) # time: 0.0029808 - Base.precompile(Tuple{typeof(panelsum_nonturbo!),Array{Float32, 3},Array{Float32, 3},Vector{UnitRange{Int64}}}) # time: 0.0029532 - Base.precompile(Tuple{Type{BootTestResult{Float32}},Float32,String,Float32,Float32,Int64,Int64,Int64,Int64,Float32,NamedTuple{(:X, :p), Tuple{Tuple{Vector{Float32}, Vector{Float32}}, Vector{Float32}}},NamedTuple{(:X, :p), Tuple{Vector{Float32}, Float32}},Matrix{Float32},Matrix{Float32},Vector{Float32},Matrix{Float32},Nothing}) # time: 0.0028374 - Base.precompile(Tuple{typeof(panelsum_nonturbo!),Array{Float64, 3},Array{Float64, 3},Vector{UnitRange{Int64}}}) # time: 0.002758 - Base.precompile(Tuple{typeof(panelsum_nonturbo!),Vector{Float32},Vector{Float32},Vector{UnitRange{Int64}}}) # time: 0.0024467 - Base.precompile(Tuple{typeof(panelsum_nonturbo!),Vector{Float64},Vector{Float64},Vector{UnitRange{Int64}}}) # time: 0.0024202 - Base.precompile(Tuple{typeof(matconvert),Type{Int64},Matrix{Int64}}) # time: 0.0019062 - Base.precompile(Tuple{typeof(coldotplus_nonturbo!),Matrix{Float32},Matrix{Float32},Matrix{Float32}}) # time: 0.001591 - Base.precompile(Tuple{typeof(coldotplus_nonturbo!),Matrix{Float64},Matrix{Float64},Matrix{Float64}}) # time: 0.0015697 - Base.precompile(Tuple{typeof(matconvert),DataType,Vector{Int8}}) # time: 0.0014787 - Base.precompile(Tuple{Type{BootTestResult{Float64}},Float64,String,Float64,Float64,Int64,Int64,Int64,Int64,Float64,Nothing,Nothing,Nothing,Matrix{Float64},Vector{Float64},Matrix{Float64},Nothing}) # time: 0.0013633 - Base.precompile(Tuple{typeof(matconvert),DataType,Vector{Float32}}) # time: 0.0013216 - Base.precompile(Tuple{Type{BootTestResult{Float64}},Float64,String,Float64,Float64,Int64,Int64,Int64,Int64,Float64,Nothing,Nothing,Nothing,Matrix{Float64},Vector{Float64},Matrix{Float64},SubArray{Float64, 2, Matrix{Float64}, Tuple{Base.Slice{Base.OneTo{Int64}}, UnitRange{Int64}}, true}}) # time: 0.0012732 - Base.precompile(Tuple{Type{BootTestResult{Float32}},Float32,String,Float32,Float32,Int64,Int64,Int64,Int64,Float32,Nothing,Nothing,Nothing,Matrix{Float32},Vector{Float32},Matrix{Float32},Nothing}) # time: 0.0012667 - Base.precompile(Tuple{Type{BootTestResult{Float32}},Float32,String,Float32,Float32,Int64,Int64,Int64,Int64,Float32,Nothing,Nothing,Nothing,Matrix{Float32},Vector{Float32},Matrix{Float32},SubArray{Float32, 2, Matrix{Float32}, Tuple{Base.Slice{Base.OneTo{Int64}}, UnitRange{Int64}}, true}}) # time: 0.0011392 - Base.precompile(Tuple{Type{BootTestResult{Float64}},Float64,String,Float64,Float64,Int64,Int64,Int64,Int64,Float64,Nothing,NamedTuple{(:X, :p), Tuple{Vector{Float64}, Float64}},Matrix{Float64},Matrix{Float64},Vector{Float64},Matrix{Float64},Nothing}) # time: 0.001121 - Base.precompile(Tuple{Type{BootTestResult{Float32}},Float32,String,Float32,Float32,Int64,Int64,Int64,Int64,Float32,Nothing,NamedTuple{(:X, :p), Tuple{Vector{Float32}, Float32}},Matrix{Float32},Matrix{Float32},Vector{Float32},Matrix{Float32},Nothing}) # time: 0.0010394 +end # time: 0.0253141 + Base.precompile(Tuple{typeof(panelsum_nonturbo!),Vector{Float64},Vector{Float64},SubArray{Float64, 1, Matrix{Float64}, Tuple{Base.Slice{Base.OneTo{Int64}}, Int64}, true},Vector{UnitRange{Int64}}}) # time: 0.0238532 + Base.precompile(Tuple{typeof(panelsum_nonturbo!),SubArray{Float32, 2, Matrix{Float32}, Tuple{Base.Slice{Base.OneTo{Int64}}, UnitRange{Int64}}, true},Matrix{Float32},Vector{Float32},Vector{UnitRange{Int64}}}) # time: 0.022691 + Base.precompile(Tuple{typeof(panelsum_nonturbo!),SubArray{Float64, 2, Matrix{Float64}, Tuple{Base.Slice{Base.OneTo{Int64}}, UnitRange{Int64}}, true},Matrix{Float64},SubArray{Float64, 1, Matrix{Float64}, Tuple{Base.Slice{Base.OneTo{Int64}}, Int64}, true},Vector{UnitRange{Int64}}}) # time: 0.0226257 + Base.precompile(Tuple{typeof(vecconvert),DataType,BitVector}) # time: 0.0223217 + Base.precompile(Tuple{typeof(panelsum_nonturbo!),SubArray{Float32, 2, Matrix{Float32}, Tuple{Base.Slice{Base.OneTo{Int64}}, UnitRange{Int64}}, true},Matrix{Float32},SubArray{Float32, 1, Matrix{Float32}, Tuple{Base.Slice{Base.OneTo{Int64}}, Int64}, true},Vector{UnitRange{Int64}}}) # time: 0.0221149 + Base.precompile(Tuple{typeof(matconvert),DataType,Matrix{Int8}}) # time: 0.0216603 + Base.precompile(Tuple{typeof(matconvert),Type{Int64},Matrix{Int16}}) # time: 0.0211393 + Base.precompile(Tuple{typeof(matconvert),DataType,Vector{Int32}}) # time: 0.0209911 + Base.precompile(Tuple{typeof(vecconvert),DataType,Vector{Int16}}) # time: 0.0208953 + Base.precompile(Tuple{typeof(colquadformminus_nonturbo!),Matrix{Float64},Int64,Matrix{Float64},Symmetric{Float64, Matrix{Float64}},Matrix{Float64}}) # time: 0.0207441 + Base.precompile(Tuple{typeof(vecconvert),DataType,Vector{Int8}}) # time: 0.020406 + Base.precompile(Tuple{typeof(matconvert),DataType,Matrix{Int16}}) # time: 0.0191304 + Base.precompile(Tuple{typeof(matconvert),Type{Int64},Matrix{Int8}}) # time: 0.0187837 + Base.precompile(Tuple{typeof(colquadformminus_nonturbo!),Matrix{Float32},Int64,Matrix{Float32},Symmetric{Float32, Matrix{Float32}},Matrix{Float32}}) # time: 0.0172341 + Base.precompile(Tuple{typeof(vecconvert),Type{Int64},Vector{Int16}}) # time: 0.0142607 + Base.precompile(Tuple{typeof(colquadformminus_nonturbo!),SubArray{Float64, 2, Array{Float64, 3}, Tuple{Base.Slice{Base.OneTo{Int64}}, Base.Slice{Base.OneTo{Int64}}, Int64}, true},Int64,Matrix{Float64},Matrix{Float64},Matrix{Float64}}) # time: 0.0126324 + Base.precompile(Tuple{typeof(colquadformminus_nonturbo!),Adjoint{Float64, Vector{Float64}},Int64,Matrix{Float64},Matrix{Float64},Matrix{Float64}}) # time: 0.0094184 + Base.precompile(Tuple{typeof(colquadformminus_nonturbo!),Adjoint{Float32, Vector{Float32}},Int64,Matrix{Float32},Matrix{Float32},Matrix{Float32}}) # time: 0.0093277 + Base.precompile(Tuple{typeof(colquadformminus_nonturbo!),SubArray{Float32, 2, Array{Float32, 3}, Tuple{Base.Slice{Base.OneTo{Int64}}, Base.Slice{Base.OneTo{Int64}}, Int64}, true},Int64,Matrix{Float32},Matrix{Float32},Matrix{Float32}}) # time: 0.0087927 + Base.precompile(Tuple{Core.kwftype(typeof(_wildboottest)),NamedTuple{(:resp, :scorebs, :predexog, :hetrobust), Tuple{Vector{Float32}, Bool, Matrix{Float64}, Bool}},typeof(_wildboottest),DataType,Matrix{Int64},Vector{Float64}}) # time: 0.0080437 + Base.precompile(Tuple{Core.kwftype(typeof(_wildboottest)),NamedTuple{(:resp, :inst, :clustid, :reps, :imposenull, :getplot, :small, :predendog, :ptype, :predexog), Tuple{Vector{Float32}, Matrix{Int8}, Vector{Int8}, Int64, Bool, Bool, Bool, Vector{Float32}, Symbol, Matrix{Float64}}},typeof(_wildboottest),DataType,Matrix{Int64},Vector{Int64}}) # time: 0.0079362 + Base.precompile(Tuple{typeof(colquadformminus_nonturbo!),Matrix{Float64},Int64,Matrix{Float64},Matrix{Float64},Matrix{Float64}}) # time: 0.0074214 + Base.precompile(Tuple{typeof(colquadformminus_nonturbo!),Matrix{Float32},Int64,Matrix{Float32},Matrix{Float32},Matrix{Float32}}) # time: 0.0071387 + Base.precompile(Tuple{typeof(panelsum_nonturbo!),Matrix{Float32},Matrix{Float32},Vector{Float32},Vector{UnitRange{Int64}}}) # time: 0.0066268 + Base.precompile(Tuple{typeof(panelsum_nonturbo!),Matrix{Float64},Matrix{Float64},Vector{Float64},Vector{UnitRange{Int64}}}) # time: 0.0055284 + Base.precompile(Tuple{typeof(matmulplus_nonturbo!),Vector{Float64},Matrix{Float64},Vector{Float64}}) # time: 0.0041138 + Base.precompile(Tuple{typeof(panelsum_nonturbo!),Matrix{Float32},Matrix{Float32},Vector{UnitRange{Int64}}}) # time: 0.0040009 + Base.precompile(Tuple{typeof(matmulplus_nonturbo!),Vector{Float32},Matrix{Float32},Vector{Float32}}) # time: 0.0038271 + Base.precompile(Tuple{typeof(panelsum_nonturbo!),Matrix{Float64},Matrix{Float64},Vector{UnitRange{Int64}}}) # time: 0.0037496 + Base.precompile(Tuple{Type{BootTestResult{Float64}},Float64,String,Float64,Float64,Int64,Int64,Int64,Int64,Float64,NamedTuple{(:X, :p), Tuple{Tuple{Vector{Float64}, Vector{Float64}}, Vector{Float64}}},NamedTuple{(:X, :p), Tuple{Vector{Float64}, Float64}},Matrix{Float64},Matrix{Float64},Vector{Float64},Matrix{Float64},Nothing}) # time: 0.0034816 + Base.precompile(Tuple{Type{BootTestResult{Float64}},Float64,String,Float64,Float64,Int64,Int64,Int64,Int64,Float64,NamedTuple{(:X, :p), Tuple{Tuple{Vector{Float64}}, Vector{Float64}}},NamedTuple{(:X, :p), Tuple{Vector{Float64}, Float64}},Matrix{Float64},Matrix{Float64},Vector{Float64},Matrix{Float64},Nothing}) # time: 0.003184 + Base.precompile(Tuple{typeof(coldotplus_nonturbo!),SubArray{Float64, 2, Matrix{Float64}, Tuple{UnitRange{Int64}, Base.Slice{Base.OneTo{Int64}}}, false},Matrix{Float64},Matrix{Float64}}) # time: 0.0031641 + Base.precompile(Tuple{typeof(coldotplus_nonturbo!),SubArray{Float32, 2, Matrix{Float32}, Tuple{UnitRange{Int64}, Base.Slice{Base.OneTo{Int64}}}, false},Matrix{Float32},Matrix{Float32}}) # time: 0.0028257 + Base.precompile(Tuple{Type{BootTestResult{Float32}},Float32,String,Float32,Float32,Int64,Int64,Int64,Int64,Float32,NamedTuple{(:X, :p), Tuple{Tuple{Vector{Float32}}, Vector{Float32}}},NamedTuple{(:X, :p), Tuple{Vector{Float32}, Float32}},Matrix{Float32},Matrix{Float32},Vector{Float32},Matrix{Float32},Nothing}) # time: 0.0028149 + Base.precompile(Tuple{typeof(panelsum_nonturbo!),Array{Float64, 3},Array{Float64, 3},Vector{UnitRange{Int64}}}) # time: 0.002783 + Base.precompile(Tuple{Type{BootTestResult{Float32}},Float32,String,Float32,Float32,Int64,Int64,Int64,Int64,Float32,NamedTuple{(:X, :p), Tuple{Tuple{Vector{Float32}, Vector{Float32}}, Vector{Float32}}},NamedTuple{(:X, :p), Tuple{Vector{Float32}, Float32}},Matrix{Float32},Matrix{Float32},Vector{Float32},Matrix{Float32},Nothing}) # time: 0.0027813 + Base.precompile(Tuple{typeof(panelsum_nonturbo!),Vector{Float32},Vector{Float32},Vector{UnitRange{Int64}}}) # time: 0.0027138 + Base.precompile(Tuple{typeof(panelsum_nonturbo!),Vector{Float64},Vector{Float64},Vector{UnitRange{Int64}}}) # time: 0.0026729 + Base.precompile(Tuple{typeof(panelsum_nonturbo!),Array{Float32, 3},Array{Float32, 3},Vector{UnitRange{Int64}}}) # time: 0.0026243 + Base.precompile(Tuple{typeof(coldotplus_nonturbo!),Matrix{Float64},Matrix{Float64},Matrix{Float64}}) # time: 0.0018618 + Base.precompile(Tuple{typeof(coldotplus_nonturbo!),Matrix{Float32},Matrix{Float32},Matrix{Float32}}) # time: 0.0017882 + Base.precompile(Tuple{typeof(matconvert),DataType,Vector{Int8}}) # time: 0.0016576 + Base.precompile(Tuple{Type{BootTestResult{Float64}},Float64,String,Float64,Float64,Int64,Int64,Int64,Int64,Float64,Nothing,Nothing,Nothing,Matrix{Float64},Vector{Float64},Matrix{Float64},Nothing}) # time: 0.0014966 + Base.precompile(Tuple{typeof(matconvert),DataType,Vector{Float32}}) # time: 0.0013353 + Base.precompile(Tuple{Type{BootTestResult{Float32}},Float32,String,Float32,Float32,Int64,Int64,Int64,Int64,Float32,Nothing,NamedTuple{(:X, :p), Tuple{Vector{Float32}, Float32}},Matrix{Float32},Matrix{Float32},Vector{Float32},Matrix{Float32},Nothing}) # time: 0.0013019 + Base.precompile(Tuple{Type{BootTestResult{Float64}},Float64,String,Float64,Float64,Int64,Int64,Int64,Int64,Float64,Nothing,Nothing,Nothing,Matrix{Float64},Vector{Float64},Matrix{Float64},SubArray{Float64, 2, Matrix{Float64}, Tuple{Base.Slice{Base.OneTo{Int64}}, UnitRange{Int64}}, true}}) # time: 0.0012976 + Base.precompile(Tuple{Type{BootTestResult{Float32}},Float32,String,Float32,Float32,Int64,Int64,Int64,Int64,Float32,Nothing,Nothing,Nothing,Matrix{Float32},Vector{Float32},Matrix{Float32},Nothing}) # time: 0.001277 + Base.precompile(Tuple{Type{BootTestResult{Float32}},Float32,String,Float32,Float32,Int64,Int64,Int64,Int64,Float32,Nothing,Nothing,Nothing,Matrix{Float32},Vector{Float32},Matrix{Float32},SubArray{Float32, 2, Matrix{Float32}, Tuple{Base.Slice{Base.OneTo{Int64}}, UnitRange{Int64}}, true}}) # time: 0.00116 + Base.precompile(Tuple{Type{BootTestResult{Float64}},Float64,String,Float64,Float64,Int64,Int64,Int64,Int64,Float64,Nothing,NamedTuple{(:X, :p), Tuple{Vector{Float64}, Float64}},Matrix{Float64},Matrix{Float64},Vector{Float64},Matrix{Float64},Nothing}) # time: 0.0010974 + Base.precompile(Tuple{typeof(matconvert),Type{Int64},Matrix{Int64}}) # time: 0.0010696 end diff --git a/test/runtests.jl b/test/runtests.jl index 80a4302..381f79d 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -2,7 +2,7 @@ push!(LOAD_PATH, ".") using WildBootTests using StatFiles, StatsModels, DataFrames, DataFramesMeta, BenchmarkTools, Plots, CategoricalArrays, Random, StableRNGs -open("test/unittests.log", "w") do log # use Github Desktop to detect changes in output +open("unittests.log", "w") do log # use Github Desktop to detect changes in output df = DataFrame(load(raw"d:\OneDrive\Documents\Macros\collapsed.dta")) dropmissing!(df) @@ -19,19 +19,19 @@ test = wildboottest([0 0 0 1], [.04]; resp, predexog, clustid=Int32.(df.year), a println(log, test) println(log, "\nboottest post_self=.04, weight(webb) reps(9999999) noci") -test = wildboottest([0 0 0 1], [.04]; resp, predexog, clustid=Int32.(df.year), reps=9999999, auxwttype=:webb, getCI=false, rng=StableRNG(1231)) +test = wildboottest([0 0 0 1], [.04]; resp, predexog, clustid=Int32.(df.year), reps=9999999, auxwttype=:webb, getci=false, rng=StableRNG(1231)) println(log, test) println(log, "\nboottest post_self=.04, weight(normal) reps(9999) noci") -test = wildboottest([0 0 0 1], [.04]; resp, predexog, clustid=Int32.(df.year), reps=9999, auxwttype=:normal, getCI=false, rng=StableRNG(1231)) +test = wildboottest([0 0 0 1], [.04]; resp, predexog, clustid=Int32.(df.year), reps=9999, auxwttype=:normal, getci=false, rng=StableRNG(1231)) println(log, test) println(log, "\nboottest post_self=.04, weight(gamma) reps(9999) noci svv") -test = wildboottest([0 0 0 1], [.04]; resp, predexog, clustid=Int32.(df.year), reps=9999, auxwttype=:gamma, getCI=false, rng=StableRNG(1231), getauxweights=true) +test = wildboottest([0 0 0 1], [.04]; resp, predexog, clustid=Int32.(df.year), reps=9999, auxwttype=:gamma, getci=false, rng=StableRNG(1231), getauxweights=true) println(log, test) println(log, "\nboottest post_self=.04, weight(mammen) reps(9999) noci") -test = wildboottest([0 0 0 1], [.04]; resp, predexog, clustid=Int32.(df.year), reps=9999, auxwttype=:mammen, getCI=false, rng=StableRNG(1231)) +test = wildboottest([0 0 0 1], [.04]; resp, predexog, clustid=Int32.(df.year), reps=9999, auxwttype=:mammen, getci=false, rng=StableRNG(1231)) println(log, test) println(log, "\nboottest post_self=.04, weight(mammen) reps(9999) boottype(score)") @@ -69,10 +69,10 @@ println(log, "scoretest (post_self=.05) (post=-.02)") test = wildboottest([0 0 0 1; 0 0 1 0], [.05; -.02]; resp, predexog, hetrobust=false, scorebs=true, reps=0) println(log, test) println(log, "boottest (post_self=.08), boottype(score)") -test = wildboottest([0 0 0 1], [.08]; resp, predexog, hetrobust=false, scorebs=true) +test = wildboottest([0 0 0 1], [.08]; resp, predexog, hetrobust=false, scorebs=true, rng=StableRNG(1231)) println(log, test) println(log, "boottest (post_self=.05) (post=-.02), boottype(score)") -test = wildboottest([0 0 0 1; 0 0 1 0], [.05; -.02]; resp, predexog, hetrobust=false, scorebs=true) +test = wildboottest([0 0 0 1; 0 0 1 0], [.05; -.02]; resp, predexog, hetrobust=false, scorebs=true, rng=StableRNG(1231)) println(log, test) df = DataFrame(load(raw"d:\OneDrive\Documents\Macros\nlsw88.dta"))[:,[:wage; :tenure; :ttl_exp; :collgrad; :industry]] @@ -123,30 +123,30 @@ println(log, test) println(log, "\nivregress liml wage ttl_exp collgrad (tenure = union), cluster(industry)") println(log, "boottest tenure, ptype(equaltail) reps(9999)") -test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, clustid=df.industry, small=false, LIML=true, reps=9999, ptype=:equaltail, rng=StableRNG(1231)) +test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, clustid=df.industry, small=false, liml=true, reps=9999, ptype=:equaltail, rng=StableRNG(1231)) println(log, test) println(log, "\nivregress liml wage ttl_exp collgrad (tenure = union) if industry<., robust") println(log, "boottest tenure, ptype(equaltail) reps(99) noci") -test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, small=false, LIML=true, reps=99, ptype=:equaltail, getCI=false, rng=StableRNG(1231)) +test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, small=false, liml=true, reps=99, ptype=:equaltail, getci=false, rng=StableRNG(1231)) println(log, test) println(log, "\nivregress 2sls wage ttl_exp collgrad (tenure = union) if industry<., robust") println(log, "boottest tenure, ptype(equaltail) reps(99) noci") -test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, small=false, reps=99, ptype=:equaltail, getCI=false, rng=StableRNG(1231)) +test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, small=false, reps=99, ptype=:equaltail, getci=false, rng=StableRNG(1231)) println(log, test) println(log, "boottest collgrad tenure, ptype(equaltail) reps(99) noci") -test = wildboottest([0 0 0 1; 0 0 1 0], [0;0]; resp, predexog, predendog, inst, small=false, reps=99, ptype=:equaltail, getCI=false, rng=StableRNG(1231)) +test = wildboottest([0 0 0 1; 0 0 1 0], [0;0]; resp, predexog, predendog, inst, small=false, reps=99, ptype=:equaltail, getci=false, rng=StableRNG(1231)) println(log, test) println(log, "\nivregress 2sls wage ttl_exp collgrad (tenure = union) if industry<.") println(log, "boottest tenure, ptype(equaltail) reps(99) noci") -test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, hetrobust=false, small=false, reps=99, ptype=:equaltail, getCI=false, rng=StableRNG(1231)) +test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, hetrobust=false, small=false, reps=99, ptype=:equaltail, getci=false, rng=StableRNG(1231)) println(log, test) println(log, "boottest tenure collgrad, ptype(equaltail) reps(99) noci") -test = wildboottest([0 0 0 1; 0 0 1 0], [0;0]; resp, predexog, predendog, inst, hetrobust=false, small=false, reps=99, ptype=:equaltail, getCI=false, rng=StableRNG(1231)) +test = wildboottest([0 0 0 1; 0 0 1 0], [0;0]; resp, predexog, predendog, inst, hetrobust=false, small=false, reps=99, ptype=:equaltail, getci=false, rng=StableRNG(1231)) println(log, test) println(log, "\nivregress 2sls wage ttl_exp collgrad (tenure = union), cluster(industry)") @@ -160,20 +160,20 @@ println(log, test) println(log, "\nivregress 2sls wage ttl_exp collgrad (tenure = union) if industry<., robust") println(log, "\nboottest tenure, ptype(equaltail) maxmatsize(0.005) noci weight(webb) ptype(equaltail)") -test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, small=false, auxwttype=:webb, ptype=:equaltail, rng=StableRNG(1231), getCI=false, maxmatsize=.005) +test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, small=false, auxwttype=:webb, ptype=:equaltail, rng=StableRNG(1231), getci=false, maxmatsize=.005) println(log, test) -test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, clustid=[collect(1:1000); collect(1:855)], small=false, auxwttype=:webb, ptype=:equaltail, getCI=false, rng=StableRNG(1231), maxmatsize=.005) +test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, clustid=[collect(1:1000); collect(1:855)], small=false, auxwttype=:webb, ptype=:equaltail, getci=false, rng=StableRNG(1231), maxmatsize=.005) println(log, test) println(log, "\nivregress 2sls wage ttl_exp collgrad (tenure = union), cluster(industry)") println(log, "\nboottest, ar") -test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, clustid=df.industry, small=false, ARubin=true, rng=StableRNG(1231)) +test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, clustid=df.industry, small=false, arubin=true, rng=StableRNG(1231)) println(log, test) plot(plotpoints(test)...) println(log, "\nboottest, ar nonull") -test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, clustid=df.industry, small=false, ARubin=true, rng=StableRNG(1231), imposenull=false) +test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, clustid=df.industry, small=false, arubin=true, rng=StableRNG(1231), imposenull=false) println(log, test) println(log, "\nscoretest tenure") @@ -195,7 +195,7 @@ resp, predexog = modelcols(f, df) ivf = @formula(tenure ~ collgrad + ttl_exp) ivf = apply_schema(ivf, schema(ivf, df)) predendog, inst = modelcols(ivf, df) -test = wildboottest([0 1], [0]; resp, predexog, predendog, inst, LIML=true, clustid=df.industry, small=false, rng=StableRNG(1231)) +test = wildboottest([0 1], [0]; resp, predexog, predendog, inst, liml=true, clustid=df.industry, small=false, rng=StableRNG(1231)) println(log, test) println(log, "\nivreg2 wage collgrad smsa race age (tenure = union married), cluster(industry) fuller(1)") @@ -209,15 +209,15 @@ resp, predexog = modelcols(f, df) ivf = @formula(tenure ~ union + married) ivf = apply_schema(ivf, schema(ivf, df)) predendog, inst = modelcols(ivf, df) -test = wildboottest([0 0 0 0 0 1], [0]; resp, predexog, predendog, inst, Fuller=1, clustid=df.industry, small=false, reps=9999, auxwttype=:webb, rng=StableRNG(1231)) +test = wildboottest([0 0 0 0 0 1], [0]; resp, predexog, predendog, inst, fuller=1, clustid=df.industry, small=false, reps=9999, auxwttype=:webb, rng=StableRNG(1231)) println(log, test) println(log, "boottest tenure, noci bootcluster(individual) weight(webb)") -test = wildboottest([0 0 0 0 0 1], [0]; resp, predexog, predendog, inst, Fuller=1, clustid=[collect(1:nrow(df)) df.industry], nbootclustvar=1, nerrclustvar=1, small=false, auxwttype=:webb, getCI=false, rng=StableRNG(1231)) +test = wildboottest([0 0 0 0 0 1], [0]; resp, predexog, predendog, inst, fuller=1, clustid=[collect(1:nrow(df)) df.industry], nbootclustvar=1, nerrclustvar=1, small=false, auxwttype=:webb, getci=false, rng=StableRNG(1231)) println(log, test) println(log, "boottest tenure, nograph bootcluster(collgrad) cluster(collgrad industry) weight(webb) reps(9999)") -test = wildboottest([0 0 0 0 0 1], [0]; resp, predexog, predendog, inst, Fuller=1, clustid=clustid=Matrix(df[:, [:collgrad, :industry]]), nbootclustvar=1, nerrclustvar=2, small=false, reps=9999, auxwttype=:webb, rng=StableRNG(1231)) +test = wildboottest([0 0 0 0 0 1], [0]; resp, predexog, predendog, inst, fuller=1, clustid=clustid=Matrix(df[:, [:collgrad, :industry]]), nbootclustvar=1, nerrclustvar=2, small=false, reps=9999, auxwttype=:webb, rng=StableRNG(1231)) println(log, test) println(log, "\nareg wage ttl_exp collgrad tenure [aw=hours] if occupation<., cluster(age) absorb(industry)") @@ -245,27 +245,27 @@ ivf = @formula(occupation ~ union + married) ivf = apply_schema(ivf, schema(ivf, df)) predendog, inst = modelcols(ivf, df) println(log, "boottest tenure") -test = wildboottest([0 0 1 0], [0]; resp, predexog, predendog, inst, clustid=df.industry, obswt=df.hours, feid=df.industry, fedfadj=1, rng=StableRNG(1231), LIML=true) +test = wildboottest([0 0 1 0], [0]; resp, predexog, predendog, inst, clustid=df.industry, obswt=df.hours, feid=df.industry, fedfadj=1, rng=StableRNG(1231), liml=true) println(log, test) println(log, "boottest occupation") -test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, clustid=df.industry, obswt=df.hours, feid=df.industry, fedfadj=1, rng=StableRNG(1231), LIML=true) +test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, clustid=df.industry, obswt=df.hours, feid=df.industry, fedfadj=1, rng=StableRNG(1231), liml=true) println(log, test) println(log, "\nivreghdfe wage ttl_exp collgrad tenure (occupation = union married) [aw=hours] if grade<., liml cluster(industry) absorb(age)") println(log, "boottest tenure") -test = wildboottest([0 0 1 0], [0]; resp, predexog, predendog, inst, clustid=df.industry, obswt=df.hours, feid=df.age, rng=StableRNG(1231), LIML=true) +test = wildboottest([0 0 1 0], [0]; resp, predexog, predendog, inst, clustid=df.industry, obswt=df.hours, feid=df.age, rng=StableRNG(1231), liml=true) println(log, test) println(log, "boottest collgrad tenure") -test = wildboottest([0 0 1 0; 0 1 0 0], [0; 0]; resp, predexog, predendog, inst, clustid=df.industry, obswt=df.hours, feid=df.age, rng=StableRNG(1231), LIML=true, reps=99) +test = wildboottest([0 0 1 0; 0 1 0 0], [0; 0]; resp, predexog, predendog, inst, clustid=df.industry, obswt=df.hours, feid=df.age, rng=StableRNG(1231), liml=true, reps=99) println(log, test) println(log, "boottest occupation") -test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, clustid=df.industry, obswt=df.hours, feid=df.age, rng=StableRNG(1231), LIML=true, gridmin=[-1], gridmax=[1]) +test = wildboottest([0 0 0 1], [0]; resp, predexog, predendog, inst, clustid=df.industry, obswt=df.hours, feid=df.age, rng=StableRNG(1231), liml=true, gridmin=[-1], gridmax=[1]) println(log, test) println(log, "boottest tenure | _b[collgrad] = 0") -test = wildboottest([0 0 1 0], [0]; R1=[0 1 0 0], r1=[0], resp, predexog , predendog, inst , clustid=df.industry, obswt=df.hours, feid=df.age, rng=StableRNG(1231), LIML=true, gridmin=[-1], gridmax=[1]) +test = wildboottest([0 0 1 0], [0]; R1=[0 1 0 0], r1=[0], resp, predexog , predendog, inst , clustid=df.industry, obswt=df.hours, feid=df.age, rng=StableRNG(1231), liml=true, gridmin=[-1], gridmax=[1]) println(log, test) -test = wildboottest([0 1 0], [0]; resp, predexog=predexog[:,[1,3]], predendog, inst=[inst predexog[:,2]], clustid=df.industry, obswt=df.hours, feid=df.age, rng=StableRNG(1231), LIML=true, gridmin=[-1], gridmax=[1]) +test = wildboottest([0 1 0], [0]; resp, predexog=predexog[:,[1,3]], predendog, inst=[inst predexog[:,2]], clustid=df.industry, obswt=df.hours, feid=df.age, rng=StableRNG(1231), liml=true, gridmin=[-1], gridmax=[1]) println(log, test) df = DataFrame(load(raw"d:\OneDrive\Documents\Macros\abdata.dta"))[:,[:n; :w; :k; :ys; :id; :year; :ind]] diff --git a/test/unittests.log b/test/unittests.log index 06efc5c..c8d49f2 100644 --- a/test/unittests.log +++ b/test/unittests.log @@ -80,12 +80,12 @@ p = 0.8345 boottest (post_self=.08), boottype(score) t(12) = -2.0664 -p = 0.0140 -CI = [0.03593 0.07418] +p = 0.0230 +CI = [0.03539 0.0747] boottest (post_self=.05) (post=-.02), boottype(score) F(2, 12) = 0.1837 -p = 0.7808 +p = 0.7948 constraint 1 ttl_exp = .2