From 1175c9f3121f5224f2b1ec1bfb49b404a9679d6f Mon Sep 17 00:00:00 2001 From: Xiongtao Dai Date: Fri, 14 Aug 2015 14:44:10 -0700 Subject: [PATCH] fixed export import --- DESCRIPTION | 13 ++++-- NAMESPACE | 40 ++++++++++------ R/CheckData.R | 3 ++ R/CheckOptions.R | 3 ++ R/CreateOptions.R | 62 ++++++++++++------------- R/FPCA.R | 5 +- R/FPCAder.R | 3 +- R/TruncateObs.R | 2 + R/createCorrPlot.R | 2 + R/createDesignPlot.R | 4 ++ R/createDiagnosticsPlot.R | 1 + R/createScreePlot.R | 4 ++ R/fitted.FPCA.R | 2 + R/makePACEinputs.R | 3 ++ R/pkgname.R | 36 +++++++++++++++ R/print.FPCA.R | 8 +++- R/sparsify.R | 2 + R/wiener.R | 3 ++ man/CheckData.Rd | 4 +- man/CheckOptions.Rd | 4 +- man/CreateOptions.Rd | 88 ++++++++++++++++++++++++------------ man/FPCA.Rd | 6 +-- man/FPCAder.Rd | 4 +- man/SetOptions.Rd | 2 +- man/createDiagnosticsPlot.Rd | 2 +- man/makePACEinputs.Rd | 4 +- man/print.FPCA.Rd | 17 +++++++ man/tPACE.Rd | 37 +++++++++++++++ src/trapzRcppP.cpp | 4 +- 29 files changed, 272 insertions(+), 96 deletions(-) create mode 100644 R/pkgname.R create mode 100644 man/print.FPCA.Rd create mode 100644 man/tPACE.Rd diff --git a/DESCRIPTION b/DESCRIPTION index aee21fc9..e3d7de31 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,15 +1,20 @@ Package: tPACE Type: Package Title: PACE package for Functional Data Analysis and Empirical Dynamics +URL: https://github.com/hadjipantelis/tPACE Version: 0.0.0.9000 -Date: 2015-05-22 -Author: Dai, Hadjipantelis, Hao, Mueller and Wang +Date: 2015-08-14 +Author: Xiongtao Dai, + Pantelis Z. Hadjipantelis, + Hao Ji, + Hans-Georg Mueller, + Jane-Ling Wang Maintainer: Pantelis Z. Hadjipantelis Description: PACE is a versatile package that provides implementation of various methods of Functional Data Analysis (FDA) and Empirical Dynamics. The core of this package is Functional Principal Component Analysis (FPCA), a key technique for functional data analysis, for sparsely or densely sampled random trajectories and time courses, via the Principal Analysis by Conditional Estimation (PACE) algorithm. PACE is useful for the analysis of data that have been generated by a sample of underlying (but usually not fully observed) random trajectories. It does not rely on pre-smoothing of trajectories, which is problematic if functional data are sparsely sampled. PACE provides options for functional regression and correlation, for Longitudinal Data Analysis, the analysis of stochastic processes from samples of realized trajectories, and for the analysis of underlying dynamics. -Depends: R (>= 3.1.1), rARPACK, gtools, Hmisc, caret, plot3D, MASS, pracma, numDeriv +Depends: R (>= 3.1.1) License: BSD_3_clause LazyData: false -Imports: Rcpp (>= 0.11.5) +Imports: Rcpp (>= 0.11.5), RcppEigen, rARPACK, gtools, Hmisc, caret, plot3D, MASS, pracma, numDeriv LinkingTo: Rcpp, RcppEigen Suggests: testthat, rgl NeedsCompilation: yes diff --git a/NAMESPACE b/NAMESPACE index 608c3ce8..99616a1d 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -1,15 +1,29 @@ -exportPattern("^[[:alpha:]]+") -useDynLib(tPACE) -importFrom(Rcpp, evalCpp) -importFrom("grDevices", "dev.new") -importFrom("graphics", "abline", "barplot", "grid", "legend", "lines", - "matplot", "par", "plot", "points") -importFrom("stats", "approx", "cov", "dist", "lm", "median", "na.omit", - "predict", "rnorm", "spline", "var") -importFrom("utils", "installed.packages") +# Generated by roxygen2 (4.1.1): do not edit by hand - - -export(FPCA) -S3method(fitted, FPCA) S3method(print,FPCA) +export(CheckData) +export(CheckOptions) +export(FPCA) +export(FPCAder) +export(createCorrPlot) +export(createDesignPlot) +export(createDiagnosticsPlot) +export(createScreePlot) +export(makePACEinputs) +export(sparsify) +export(wiener) +import(Hmisc) +import(MASS) +import(Rcpp) +import(RcppEigen) +import(caret) +import(gtools) +import(plot3D) +import(rARPACK) +importFrom(numDeriv,grad) +importFrom(pracma,meshgrid) +importFrom(pracma,midpoint) +importFrom(pracma,mod) +importFrom(pracma,ones) +importFrom(pracma,uniq) +useDynLib(tPACE) diff --git a/R/CheckData.R b/R/CheckData.R index 1ec1991c..2fa9db46 100644 --- a/R/CheckData.R +++ b/R/CheckData.R @@ -1,3 +1,5 @@ +#' Check data format +#' #' Check the form and basic structure of the functional data 'y' and the recorded times 'tt'. #' #' @param y is a n-by-1 list of vectors @@ -5,6 +7,7 @@ #' @return logical #' @examples #' 1 + 3 +#' @export CheckData = function(y,t){ diff --git a/R/CheckOptions.R b/R/CheckOptions.R index 892db087..78f25563 100644 --- a/R/CheckOptions.R +++ b/R/CheckOptions.R @@ -1,3 +1,5 @@ +#' Check option format +#' #' Check if the options structure is valid and set the ones that are NULL #' #' @param n is a total number of sample curves @@ -5,6 +7,7 @@ #' @return logical #' @examples #' 1 + 3 +#' @export CheckOptions = function(t,optns,n){ diff --git a/R/CreateOptions.R b/R/CreateOptions.R index 5b532ca3..fe728a1a 100644 --- a/R/CreateOptions.R +++ b/R/CreateOptions.R @@ -1,34 +1,34 @@ -#' Create the options list used by FPCA -#' -#' @param bwcov : bandwidth value for covariance function; positive numeric - default: determine automatically based on 'bwcovMethod' -#' @param bwcovMethod : bandwidth choice method for covariance function; 'GMeanAndGCV','CV','GCV - default: 'GMeanAndGCV'') -#' @param bwmu : bandwidth value for mean function is using CV or GCV; positive numeric - default: determine automatically based on 'bwmuMethod' -#' @param bwmuMethod : bandwidth choice method for mean function; 'GMeanAndGCV','CV','GCV - default: 'GMeanAndGCV'' -#' @param dataType : do we have sparse or dense functional data; 'Sparse', 'Dense', 'DenseWithMV', 'p>>n' - default: determine automatically based on 'IsRegular' -# '@param diagnosticsPlot : make diagnostics plot (design plot, mean, scree plot and first k (<=3) eigenfunctions); logical - default: FALSE} -#' @param error : assume measurement error in the dataset; logical - default: TRUE -#' @param FVEthreshold : Fraction-of-Variance-Explained threshold used during the SVD of the fitted covar. function; numeric (0,1] - default: 0.9999 -#' @param kernel : smoothing kernel choice, common for mu and covariance; "rect", "gauss", "epan", "gausvar", "quar" - default: "epan" for dense data else "gauss" -#' @param methodCov : method to estimate covariance; 'PACE','RARE','CrossSectional' - automatically determined, user input ignored -#' @param methodMu : method to estimate mu; 'PACE','RARE','CrossSectional' - automatically determined, user input ignored -#' @param maxK : maximum number of principal components to consider; positive integer - default: min(20, N-1), N : # of curves -#' @param methodXi : method to estimate the PC scores; 'CE', 'IN' - default: 'CE' -#' @param ntest1 : number of curves used for CV when choosing bandwidth; [1,N] - default: min(30, N-1), N : # of curves -#' @param nRegGrid : number of support points in each direction of covariance surface; numeric - default: 51 -#' @param numBins : number of bins to bin the data into; positive integer > 10, default: NULL -#' @param selectionMethod : the method of choosing the number of principal components K; 'FVE','AIC','BIC' : default 'FVE' - only 'FVE' avaiable now/ default 'FVE') -#' @param shrink : apply shrinkage to estimates of random coefficients (dense data only); logical - default: FALSE -#' @param outPercent : 2-element vector in [0,1] indicating the outPercent data in the boundary - default (0,1) -#' @param rho : truncation threshold for the iterative residual. 'cv': choose rho by leave-one-observation out cross-validation; 'no': use the iterative sigma2 estimate - default "cv". -#' @param rotationCut : 2-element vector in [0,1] indicating the percent of data truncated during sigma^2 estimation; default (0.25, 0.75)) -#' @param useBinnedData : 'FORCE' (Enforce the # of bins), 'AUTO' (Select the # of bins automatically), 'OFF' (Do not bin) - default: 'AUTO' -#' @param useBins: testing purpose: whether to bin the same observed time points when 2D smoothing; logical - default: FALSE -#' @param userCov : user-defined smoothed covariance function; numerical matrix - default: NULL -#' @param userMu : user-defined smoothed mean function; numerical vector - default: NULL -#' @param verbose : display diagnostic messages; logical - default: FALSE -#' @return an option list -#' @examples -#' optLst = CreateOptions(kernel='rect'); # Create options list with rectangular kernel +# #' Create the options list used by FPCA +# #' +# #' @param bwcov : bandwidth value for covariance function; positive numeric - default: determine automatically based on 'bwcovMethod' +# #' @param bwcovMethod : bandwidth choice method for covariance function; 'GMeanAndGCV','CV','GCV - default: 'GMeanAndGCV'') +# #' @param bwmu : bandwidth value for mean function is using CV or GCV; positive numeric - default: determine automatically based on 'bwmuMethod' +# #' @param bwmuMethod : bandwidth choice method for mean function; 'GMeanAndGCV','CV','GCV - default: 'GMeanAndGCV'' +# #' @param dataType : do we have sparse or dense functional data; 'Sparse', 'Dense', 'DenseWithMV', 'p>>n' - default: determine automatically based on 'IsRegular' +# # '@param diagnosticsPlot : make diagnostics plot (design plot, mean, scree plot and first k (<=3) eigenfunctions); logical - default: FALSE} +# #' @param error : assume measurement error in the dataset; logical - default: TRUE +# #' @param FVEthreshold : Fraction-of-Variance-Explained threshold used during the SVD of the fitted covar. function; numeric (0,1] - default: 0.9999 +# #' @param kernel : smoothing kernel choice, common for mu and covariance; "rect", "gauss", "epan", "gausvar", "quar" - default: "epan" for dense data else "gauss" +# #' @param methodCov : method to estimate covariance; 'PACE','RARE','CrossSectional' - automatically determined, user input ignored +# #' @param methodMu : method to estimate mu; 'PACE','RARE','CrossSectional' - automatically determined, user input ignored +# #' @param maxK : maximum number of principal components to consider; positive integer - default: min(20, N-1), N : # of curves +# #' @param methodXi : method to estimate the PC scores; 'CE', 'IN' - default: 'CE' +# #' @param ntest1 : number of curves used for CV when choosing bandwidth; [1,N] - default: min(30, N-1), N : # of curves +# #' @param nRegGrid : number of support points in each direction of covariance surface; numeric - default: 51 +# #' @param numBins : number of bins to bin the data into; positive integer > 10, default: NULL +# #' @param selectionMethod : the method of choosing the number of principal components K; 'FVE','AIC','BIC' : default 'FVE' - only 'FVE' avaiable now/ default 'FVE') +# #' @param shrink : apply shrinkage to estimates of random coefficients (dense data only); logical - default: FALSE +# #' @param outPercent : 2-element vector in [0,1] indicating the outPercent data in the boundary - default (0,1) +# #' @param rho : truncation threshold for the iterative residual. 'cv': choose rho by leave-one-observation out cross-validation; 'no': use the iterative sigma2 estimate - default "cv". +# #' @param rotationCut : 2-element vector in [0,1] indicating the percent of data truncated during sigma^2 estimation; default (0.25, 0.75)) +# #' @param useBinnedData : 'FORCE' (Enforce the # of bins), 'AUTO' (Select the # of bins automatically), 'OFF' (Do not bin) - default: 'AUTO' +# #' @param useBins: testing purpose: whether to bin the same observed time points when 2D smoothing; logical - default: FALSE +# #' @param userCov : user-defined smoothed covariance function; numerical matrix - default: NULL +# #' @param userMu : user-defined smoothed mean function; numerical vector - default: NULL +# #' @param verbose : display diagnostic messages; logical - default: FALSE +# #' @return an option list +# #' @examples +# #' optLst = CreateOptions(kernel='rect'); # Create options list with rectangular kernel diff --git a/R/FPCA.R b/R/FPCA.R index 954ce99d..3122e96d 100644 --- a/R/FPCA.R +++ b/R/FPCA.R @@ -63,11 +63,12 @@ #' res <- FPCA(sampWiener$yList, sampWiener$tList, list(dataType='Sparse', error=FALSE, kernel='epan', verbose=TRUE)) #' createCorrPlot(res, 'Fitted') #' @references -#' \cite{Yao, Fang, Hans-Georg Müller, and Jane-Ling Wang. "Functional data analysis for sparse longitudinal data." Journal of the American Statistical Association 100, no. 470 (2005): 577-590. (Sparse data FPCA)} +#' \cite{Yao, Fang, Hans-Georg Mueller, and Jane-Ling Wang. "Functional data analysis for sparse longitudinal data." Journal of the American Statistical Association 100, no. 470 (2005): 577-590. (Sparse data FPCA)} #' -#' \cite{Liu, Bitao, and Hans-Georg Müller. "Estimating derivatives for samples of sparsely observed functions, with application to online auction dynamics." Journal of the American Statistical Association 104, no. 486 (2009): 704-717. (Sparse data FPCA)} +#' \cite{Liu, Bitao, and Hans-Georg Mueller. "Estimating derivatives for samples of sparsely observed functions, with application to online auction dynamics." Journal of the American Statistical Association 104, no. 486 (2009): 704-717. (Sparse data FPCA)} #' #' \cite{Castro, P. E., W. H. Lawton, and E. A. Sylvestre. "Principal modes of variation for processes with continuous sample curves." Technometrics 28, no. 4 (1986): 329-337. (Dense data FPCA)} +#' @export FPCA = function(y, t, optns = list()){ diff --git a/R/FPCAder.R b/R/FPCAder.R index 406387de..46069407 100644 --- a/R/FPCAder.R +++ b/R/FPCAder.R @@ -13,7 +13,8 @@ #' res <- FPCA(sampWiener$yList, sampWiener$tList, list(dataType='Sparse', error=FALSE, kernel='epan', verbose=TRUE)) #' resder <- FPCAder(res) #' @references -#' \cite{Liu, Bitao, and Hans-Georg Müller. "Estimating derivatives for samples of sparsely observed functions, with application to online auction dynamics." Journal of the American Statistical Association 104, no. 486 (2009): 704-717. (Sparse data FPCA)} +#' \cite{Liu, Bitao, and Hans-Georg Mueller. "Estimating derivatives for samples of sparsely observed functions, with application to online auction dynamics." Journal of the American Statistical Association 104, no. 486 (2009): 704-717. (Sparse data FPCA)} +#' @export FPCAder <- function (fpcaObj, ...) { diff --git a/R/TruncateObs.R b/R/TruncateObs.R index 6f8b9217..a3d271a9 100644 --- a/R/TruncateObs.R +++ b/R/TruncateObs.R @@ -1,3 +1,5 @@ +# TODO: Roxygen documentation + TruncateObs <- function(y, t, obsGrid, buff=.Machine$double.eps * max(abs(obsGrid)) * 3) { tmpInd <- mapply(function(yVec, tVec) { diff --git a/R/createCorrPlot.R b/R/createCorrPlot.R index 672a61d2..8eadbf41 100644 --- a/R/createCorrPlot.R +++ b/R/createCorrPlot.R @@ -1,3 +1,5 @@ +# TODO: Roxygen documentation +#' @export # This function creates the covariance surface plot based on the # results from FPCA() or FPCder() ###### diff --git a/R/createDesignPlot.R b/R/createDesignPlot.R index 71995000..59892661 100644 --- a/R/createDesignPlot.R +++ b/R/createDesignPlot.R @@ -1,3 +1,7 @@ +# TODO: Roxygen documentation +#' @export + + # This function creates the design plot of the data ###### # Input: diff --git a/R/createDiagnosticsPlot.R b/R/createDiagnosticsPlot.R index 61cbf468..d4e1443b 100644 --- a/R/createDiagnosticsPlot.R +++ b/R/createDiagnosticsPlot.R @@ -14,6 +14,7 @@ #' sampWiener <- sparsify(sampWiener, pts, 10) #' res <- FPCA(sampWiener$yList, sampWiener$tList, list(dataType='Sparse', error=FALSE, kernel='epan', verbose=TRUE)) #' createDiagnosticsPlot(sampWiener$tList, res) +#' @export createDiagnosticsPlot <-function(t, ret, openNewDev = TRUE){ if(openNewDev){ diff --git a/R/createScreePlot.R b/R/createScreePlot.R index 4b87f2ed..a6a2563d 100644 --- a/R/createScreePlot.R +++ b/R/createScreePlot.R @@ -1,3 +1,7 @@ +# TODO: Roxygen documentation +#' @export + + createScreePlot <-function(ys){ diff --git a/R/fitted.FPCA.R b/R/fitted.FPCA.R index a99a25ac..3e7f12dc 100644 --- a/R/fitted.FPCA.R +++ b/R/fitted.FPCA.R @@ -1,3 +1,5 @@ +# TODO: Roxygen documentation + fitted.FPCA <- function (object, objectDer = NULL, ...) { # Combine the zero-meaned fitted values (ZMFV) and the interpolated mean (IM) # to get the final estimates diff --git a/R/makePACEinputs.R b/R/makePACEinputs.R index f2dfad98..9c6c9d38 100644 --- a/R/makePACEinputs.R +++ b/R/makePACEinputs.R @@ -1,9 +1,12 @@ +#' Format FPCA input +#' #' Turn vector inputs to the list so they can be used in FPCA #' #' @param IDs : n-by-1 vector of subject IDs #' @param tVec : n-by-1 vector of measurement times #' @param yVec : n-by-1 vector of measurements from the variable of interest #' @return L : list containing 3 lists each of length 'm', 'm' being the number of unique subject IDs +#' @export makePACEinputs <- function(IDs, tVec, yVec){ diff --git a/R/pkgname.R b/R/pkgname.R new file mode 100644 index 00000000..a77c0396 --- /dev/null +++ b/R/pkgname.R @@ -0,0 +1,36 @@ +#' PACE: Principal Analysis by Conditional Expectation +#' +#' PACE package for Functional Data Analysis and Empirical Dynamics. +#' +#' PACE is a versatile package that provides implementation of various methods +#' of Functional Data Analysis (FDA) and Empirical Dynamics. The core of this +#' package is Functional Principal Component Analysis (FPCA), a key technique +#' for functional data analysis, for sparsely or densely sampled random +#' trajectories and time courses, via the Principal Analysis by Conditional +#' Estimation (PACE) algorithm. PACE is useful for the analysis of data that +#' have been generated by a sample of underlying (but usually not fully +#' observed) random trajectories. It does not rely on pre-smoothing of +#' trajectories, which is problematic if functional data are sparsely sampled. +#' PACE provides options for functional regression and correlation, for +#' Longitudinal Data Analysis, the analysis of stochastic processes from +#' samples of realized trajectories, and for the analysis of underlying +#' dynamics. +#' +#' Functions on creating, moving, plotting and summarizing BML models are +#' provided in this package. For a list of functions provided in this package +#' see \code{help(package='A4')}. +#' +#' @author +#' Xiongtao Dai \email{dai@@ucdavis.edu}, +#' Pantelis Z. Hadjipantelis \email{pantelis@@ucdavis.edu}, +#' Hao Ji \email{haoji@@ucdavis.edu} +#' +#' Maintainer: Pantelis Z. Hadjipantelis \email{pantelis@@ucdavis.edu} +#' +#' @docType package +#' @name tPACE +#' @useDynLib tPACE +#' @import rARPACK gtools Hmisc caret plot3D MASS Rcpp RcppEigen +#' @importFrom numDeriv grad +#' @importFrom pracma meshgrid midpoint mod ones uniq +NULL diff --git a/R/print.FPCA.R b/R/print.FPCA.R index 8ff622fa..c9bd5d0e 100644 --- a/R/print.FPCA.R +++ b/R/print.FPCA.R @@ -1,4 +1,10 @@ -print.FPCA <- function(x){ +#' Print an FPCA object +#' +#' @param x An FPCA object. +#' @param ... Not used. +#' +#' @export +print.FPCA <- function(x, ...){ obj = x; cat("Functional Principal Components Object for", tolower(obj$optns$dataType), "data.\n\n") cat("The optimal number of components selected is:", length(obj$lambda),"and \nthe first k (<=3) corresponding eigenvalues are: "); diff --git a/R/sparsify.R b/R/sparsify.R index 14229f65..e39e522d 100644 --- a/R/sparsify.R +++ b/R/sparsify.R @@ -1,3 +1,5 @@ +# TODO: roxygen comments +#' @export # sparsify samp # samp: a matrix of samples, with rows containing the samples # pts: a vector of grid points, should be from 0 to 1 diff --git a/R/wiener.R b/R/wiener.R index f20dbb9e..cc9b5f6d 100644 --- a/R/wiener.R +++ b/R/wiener.R @@ -1,3 +1,6 @@ +# TODO: Roxygen documentation +#' @export + # A test on standard Wiener process (brownian motion) # n: sample sizeDiss # pts: a vector of grid points, should be from 0 to 1 diff --git a/man/CheckData.Rd b/man/CheckData.Rd index e3978803..953f038b 100644 --- a/man/CheckData.Rd +++ b/man/CheckData.Rd @@ -1,8 +1,8 @@ -% Generated by roxygen2 (4.1.1.9000): do not edit by hand +% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/CheckData.R \name{CheckData} \alias{CheckData} -\title{Check the form and basic structure of the functional data 'y' and the recorded times 'tt'.} +\title{Check data format} \usage{ CheckData(y, t) } diff --git a/man/CheckOptions.Rd b/man/CheckOptions.Rd index 98283f83..c40945a6 100644 --- a/man/CheckOptions.Rd +++ b/man/CheckOptions.Rd @@ -1,8 +1,8 @@ -% Generated by roxygen2 (4.1.1.9000): do not edit by hand +% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/CheckOptions.R \name{CheckOptions} \alias{CheckOptions} -\title{Check if the options structure is valid and set the ones that are NULL} +\title{Check option format} \usage{ CheckOptions(t, optns, n) } diff --git a/man/CreateOptions.Rd b/man/CreateOptions.Rd index f5aa0126..28a15835 100644 --- a/man/CreateOptions.Rd +++ b/man/CreateOptions.Rd @@ -1,8 +1,10 @@ -% Generated by roxygen2 (4.1.1.9000): do not edit by hand +% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/CreateOptions.R \name{CreateOptions} \alias{CreateOptions} -\title{Create the options list used by FPCA} +\title{#Create the options list used by FPCA +# +#} \usage{ CreateOptions(bwmu = NULL, bwmuMethod = NULL, bwuserCov = NULL, bwuserCovGcv = NULL, ntest1 = NULL, selectionMethod = NULL, @@ -14,63 +16,91 @@ CreateOptions(bwmu = NULL, bwmuMethod = NULL, bwuserCov = NULL, useBinnedData = NULL, rotationCut = NULL) } \arguments{ -\item{bwmu}{: bandwidth value for mean function is using CV or GCV; positive numeric - default: determine automatically based on 'bwmuMethod'} +\item{bwmu}{: bandwidth value for mean function is using CV or GCV; positive numeric - default: determine automatically based on 'bwmuMethod' +#} -\item{bwmuMethod}{: bandwidth choice method for mean function; 'GMeanAndGCV','CV','GCV - default: 'GMeanAndGCV''} +\item{bwmuMethod}{: bandwidth choice method for mean function; 'GMeanAndGCV','CV','GCV - default: 'GMeanAndGCV'' +#} -\item{ntest1}{: number of curves used for CV when choosing bandwidth; [1,N] - default: min(30, N-1), N : # of curves} +\item{ntest1}{: number of curves used for CV when choosing bandwidth; [1,N] - default: min(30, N-1), N : # of curves +#} -\item{selectionMethod}{: the method of choosing the number of principal components K; 'FVE','AIC','BIC' : default 'FVE' - only 'FVE' avaiable now/ default 'FVE')} +\item{selectionMethod}{: the method of choosing the number of principal components K; 'FVE','AIC','BIC' : default 'FVE' - only 'FVE' avaiable now/ default 'FVE') +#} -\item{FVEthreshold}{: Fraction-of-Variance-Explained threshold used during the SVD of the fitted covar. function; numeric (0,1] - default: 0.9999} +\item{FVEthreshold}{: Fraction-of-Variance-Explained threshold used during the SVD of the fitted covar. function; numeric (0,1] - default: 0.9999 +#} -\item{maxK}{: maximum number of principal components to consider; positive integer - default: min(20, N-1), N : # of curves} +\item{maxK}{: maximum number of principal components to consider; positive integer - default: min(20, N-1), N : # of curves +#} -\item{dataType}{: do we have sparse or dense functional data; 'Sparse', 'Dense', 'DenseWithMV', 'p>>n' - default: determine automatically based on 'IsRegular'} +\item{dataType}{: do we have sparse or dense functional data; 'Sparse', 'Dense', 'DenseWithMV', 'p>>n' - default: determine automatically based on 'IsRegular' +#} -\item{error}{: assume measurement error in the dataset; logical - default: TRUE} +\item{error}{: assume measurement error in the dataset; logical - default: TRUE +#} -\item{nRegGrid}{: number of support points in each direction of covariance surface; numeric - default: 51} +\item{nRegGrid}{: number of support points in each direction of covariance surface; numeric - default: 51 +#} -\item{methodXi}{: method to estimate the PC scores; 'CE', 'IN' - default: 'CE'} +\item{methodXi}{: method to estimate the PC scores; 'CE', 'IN' - default: 'CE' +#} -\item{shrink}{: apply shrinkage to estimates of random coefficients (dense data only); logical - default: FALSE} +\item{shrink}{: apply shrinkage to estimates of random coefficients (dense data only); logical - default: FALSE +#} -\item{kernel}{: smoothing kernel choice, common for mu and covariance; "rect", "gauss", "epan", "gausvar", "quar" - default: "epan" for dense data else "gauss"} +\item{kernel}{: smoothing kernel choice, common for mu and covariance; "rect", "gauss", "epan", "gausvar", "quar" - default: "epan" for dense data else "gauss" +#} -\item{numBins}{: number of bins to bin the data into; positive integer > 10, default: NULL} +\item{numBins}{: number of bins to bin the data into; positive integer > 10, default: NULL +#} -\item{rho}{: truncation threshold for the iterative residual. 'cv': choose rho by leave-one-observation out cross-validation; 'no': use the iterative sigma2 estimate - default "cv".} +\item{rho}{: truncation threshold for the iterative residual. 'cv': choose rho by leave-one-observation out cross-validation; 'no': use the iterative sigma2 estimate - default "cv". +#} -\item{verbose}{: display diagnostic messages; logical - default: FALSE} +\item{verbose}{: display diagnostic messages; logical - default: FALSE +#} -\item{userMu}{: user-defined smoothed mean function; numerical vector - default: NULL} +\item{userMu}{: user-defined smoothed mean function; numerical vector - default: NULL +#} -\item{userCov}{: user-defined smoothed covariance function; numerical matrix - default: NULL} +\item{userCov}{: user-defined smoothed covariance function; numerical matrix - default: NULL +#} -\item{methodCov}{: method to estimate covariance; 'PACE','RARE','CrossSectional' - automatically determined, user input ignored} +\item{methodCov}{: method to estimate covariance; 'PACE','RARE','CrossSectional' - automatically determined, user input ignored +#} -\item{methodMu}{: method to estimate mu; 'PACE','RARE','CrossSectional' - automatically determined, user input ignored} +\item{methodMu}{: method to estimate mu; 'PACE','RARE','CrossSectional' - automatically determined, user input ignored +#} -\item{outPercent}{: 2-element vector in [0,1] indicating the outPercent data in the boundary - default (0,1)} +\item{outPercent}{: 2-element vector in [0,1] indicating the outPercent data in the boundary - default (0,1) +#} -\item{useBinnedData}{: 'FORCE' (Enforce the # of bins), 'AUTO' (Select the # of bins automatically), 'OFF' (Do not bin) - default: 'AUTO'} +\item{useBinnedData}{: 'FORCE' (Enforce the # of bins), 'AUTO' (Select the # of bins automatically), 'OFF' (Do not bin) - default: 'AUTO' +#} -\item{rotationCut}{: 2-element vector in [0,1] indicating the percent of data truncated during sigma^2 estimation; default (0.25, 0.75))} +\item{rotationCut}{: 2-element vector in [0,1] indicating the percent of data truncated during sigma^2 estimation; default (0.25, 0.75)) +#} -\item{bwcov}{: bandwidth value for covariance function; positive numeric - default: determine automatically based on 'bwcovMethod'} +\item{bwcov}{: bandwidth value for covariance function; positive numeric - default: determine automatically based on 'bwcovMethod' +#} -\item{bwcovMethod}{: bandwidth choice method for covariance function; 'GMeanAndGCV','CV','GCV - default: 'GMeanAndGCV'')} +\item{bwcovMethod}{: bandwidth choice method for covariance function; 'GMeanAndGCV','CV','GCV - default: 'GMeanAndGCV'') +#} -\item{useBins:}{testing purpose: whether to bin the same observed time points when 2D smoothing; logical - default: FALSE} +\item{useBins:}{testing purpose: whether to bin the same observed time points when 2D smoothing; logical - default: FALSE +#} } \value{ an option list +# } \description{ -Create the options list used by FPCA +#Create the options list used by FPCA +# +# } \examples{ -optLst = CreateOptions(kernel='rect'); # Create options list with rectangular kernel +#optLst = CreateOptions(kernel='rect'); # Create options list with rectangular kernel } diff --git a/man/FPCA.Rd b/man/FPCA.Rd index db773b09..d9ee9001 100644 --- a/man/FPCA.Rd +++ b/man/FPCA.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1.9000): do not edit by hand +% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/FPCA.R \name{FPCA} \alias{FPCA} @@ -77,9 +77,9 @@ res <- FPCA(sampWiener$yList, sampWiener$tList, list(dataType='Sparse', error=FA createCorrPlot(res, 'Fitted') } \references{ -\cite{Yao, Fang, Hans-Georg Müller, and Jane-Ling Wang. "Functional data analysis for sparse longitudinal data." Journal of the American Statistical Association 100, no. 470 (2005): 577-590. (Sparse data FPCA)} +\cite{Yao, Fang, Hans-Georg Mueller, and Jane-Ling Wang. "Functional data analysis for sparse longitudinal data." Journal of the American Statistical Association 100, no. 470 (2005): 577-590. (Sparse data FPCA)} -\cite{Liu, Bitao, and Hans-Georg Müller. "Estimating derivatives for samples of sparsely observed functions, with application to online auction dynamics." Journal of the American Statistical Association 104, no. 486 (2009): 704-717. (Sparse data FPCA)} +\cite{Liu, Bitao, and Hans-Georg Mueller. "Estimating derivatives for samples of sparsely observed functions, with application to online auction dynamics." Journal of the American Statistical Association 104, no. 486 (2009): 704-717. (Sparse data FPCA)} \cite{Castro, P. E., W. H. Lawton, and E. A. Sylvestre. "Principal modes of variation for processes with continuous sample curves." Technometrics 28, no. 4 (1986): 329-337. (Dense data FPCA)} } diff --git a/man/FPCAder.Rd b/man/FPCAder.Rd index 145ef648..48efb227 100644 --- a/man/FPCAder.Rd +++ b/man/FPCAder.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1.9000): do not edit by hand +% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/FPCAder.R \name{FPCAder} \alias{FPCAder} @@ -22,6 +22,6 @@ res <- FPCA(sampWiener$yList, sampWiener$tList, list(dataType='Sparse', error=FA resder <- FPCAder(res) } \references{ -\cite{Liu, Bitao, and Hans-Georg Müller. "Estimating derivatives for samples of sparsely observed functions, with application to online auction dynamics." Journal of the American Statistical Association 104, no. 486 (2009): 704-717. (Sparse data FPCA)} +\cite{Liu, Bitao, and Hans-Georg Mueller. "Estimating derivatives for samples of sparsely observed functions, with application to online auction dynamics." Journal of the American Statistical Association 104, no. 486 (2009): 704-717. (Sparse data FPCA)} } diff --git a/man/SetOptions.Rd b/man/SetOptions.Rd index 83d210d3..0ef30e46 100644 --- a/man/SetOptions.Rd +++ b/man/SetOptions.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1.9000): do not edit by hand +% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/SetOptions.R \name{SetOptions} \alias{SetOptions} diff --git a/man/createDiagnosticsPlot.Rd b/man/createDiagnosticsPlot.Rd index dfa0b156..d17d2c16 100644 --- a/man/createDiagnosticsPlot.Rd +++ b/man/createDiagnosticsPlot.Rd @@ -1,4 +1,4 @@ -% Generated by roxygen2 (4.1.1.9000): do not edit by hand +% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/createDiagnosticsPlot.R \name{createDiagnosticsPlot} \alias{createDiagnosticsPlot} diff --git a/man/makePACEinputs.Rd b/man/makePACEinputs.Rd index 4b7699ed..338f4ffb 100644 --- a/man/makePACEinputs.Rd +++ b/man/makePACEinputs.Rd @@ -1,8 +1,8 @@ -% Generated by roxygen2 (4.1.1.9000): do not edit by hand +% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/makePACEinputs.R \name{makePACEinputs} \alias{makePACEinputs} -\title{Turn vector inputs to the list so they can be used in FPCA} +\title{Format FPCA input} \usage{ makePACEinputs(IDs, tVec, yVec) } diff --git a/man/print.FPCA.Rd b/man/print.FPCA.Rd new file mode 100644 index 00000000..dd29b638 --- /dev/null +++ b/man/print.FPCA.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/print.FPCA.R +\name{print.FPCA} +\alias{print.FPCA} +\title{Print an FPCA object} +\usage{ +\method{print}{FPCA}(x, ...) +} +\arguments{ +\item{x}{An FPCA object.} + +\item{...}{Not used.} +} +\description{ +Print an FPCA object +} + diff --git a/man/tPACE.Rd b/man/tPACE.Rd new file mode 100644 index 00000000..85fdcb76 --- /dev/null +++ b/man/tPACE.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2 (4.1.1): do not edit by hand +% Please edit documentation in R/pkgname.R +\docType{package} +\name{tPACE} +\alias{tPACE} +\alias{tPACE-package} +\title{PACE: Principal Analysis by Conditional Expectation} +\description{ +PACE package for Functional Data Analysis and Empirical Dynamics. +} +\details{ +PACE is a versatile package that provides implementation of various methods +of Functional Data Analysis (FDA) and Empirical Dynamics. The core of this +package is Functional Principal Component Analysis (FPCA), a key technique +for functional data analysis, for sparsely or densely sampled random +trajectories and time courses, via the Principal Analysis by Conditional +Estimation (PACE) algorithm. PACE is useful for the analysis of data that +have been generated by a sample of underlying (but usually not fully +observed) random trajectories. It does not rely on pre-smoothing of +trajectories, which is problematic if functional data are sparsely sampled. +PACE provides options for functional regression and correlation, for +Longitudinal Data Analysis, the analysis of stochastic processes from +samples of realized trajectories, and for the analysis of underlying +dynamics. + +Functions on creating, moving, plotting and summarizing BML models are +provided in this package. For a list of functions provided in this package +see \code{help(package='A4')}. +} +\author{ +Xiongtao Dai \email{dai@ucdavis.edu}, +Pantelis Z. Hadjipantelis \email{pantelis@ucdavis.edu}, +Hao Ji \email{haoji@ucdavis.edu} + +Maintainer: Pantelis Z. Hadjipantelis \email{pantelis@ucdavis.edu} +} + diff --git a/src/trapzRcppP.cpp b/src/trapzRcppP.cpp index e5684cb4..a7ecad65 100644 --- a/src/trapzRcppP.cpp +++ b/src/trapzRcppP.cpp @@ -1,4 +1,4 @@ -#include +// #include // OSX Mavericks doesn't have omp.h with Xcode #include // [[Rcpp::depends(Rcpp)]] @@ -26,7 +26,7 @@ double trapzRcppP(const Rcpp::NumericVector X, const Rcpp::NumericVector Y){ } if(is_sorted(X.begin(),X.end())){ double trapzsum = 0; - #pragma omp for + // #pragma omp for for (unsigned int ind = 0; ind < X.size()-1; ++ind){ trapzsum += 0.5 * (X[ind + 1] - X[ind]) *(Y[ind] + Y[ind + 1]); }