Skip to content

Commit

Permalink
[pre-commit.ci] auto fixes from pre-commit.com hooks
Browse files Browse the repository at this point in the history
for more information, see https://pre-commit.ci
  • Loading branch information
pre-commit-ci[bot] committed Feb 5, 2024
1 parent b5274c4 commit dead27b
Show file tree
Hide file tree
Showing 5 changed files with 23 additions and 0 deletions.
1 change: 1 addition & 0 deletions src/resample/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
- Permutation-based variants of traditional statistical tests (t-test, K-S test, etc.)
- Tools for working with empirical distributions (CDF, quantile, etc.)
"""

from importlib.metadata import version

from resample import bootstrap, empirical, jackknife, permutation
Expand Down
6 changes: 6 additions & 0 deletions src/resample/bootstrap.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,7 @@ def resample(
replicated sample. This is a stricter constraint than that offered by the
balanced bootstrap, which only guarantees that classes have the original
proportions over all replicates, but not within each one replicate.
"""
sample_np = np.atleast_1d(sample)
n_sample = len(sample_np)
Expand Down Expand Up @@ -278,6 +279,7 @@ def bootstrap(
>>> fb = bootstrap(np.mean, x, size=10000, random_state=1)
>>> print(f"f(x) = {fx:.1f} +/- {np.std(fb):.1f}")
f(x) = 4.5 +/- 0.9
"""
gen = resample(sample, *args, **kwargs)
if args:
Expand Down Expand Up @@ -331,6 +333,7 @@ def bias(
This function has special space requirements, it needs to hold `size` replicates of
the original sample in memory at once. The balanced bootstrap is recommended over
the ordinary bootstrap for bias estimation, it tends to converge faster.
"""
thetas = []
if args:
Expand Down Expand Up @@ -390,6 +393,7 @@ def bias_corrected(
8.2
>>> round(bias_corrected(np.var, x, size=10000, random_state=1), 1)
9.1
"""
return fn(sample, *args) - bias(fn, sample, *args, **kwargs)

Expand Down Expand Up @@ -429,6 +433,7 @@ def variance(
>>> x = np.arange(10)
>>> round(variance(np.mean, x, size=10000, random_state=1), 1)
0.8
"""
thetas = bootstrap(fn, sample, *args, **kwargs)
return np.var(thetas, ddof=1, axis=0)
Expand Down Expand Up @@ -488,6 +493,7 @@ def confidence_interval(
increases the number of function evaluations in a direct comparison to
'percentile'. However the increase in accuracy should compensate for this, with the
result that less bootstrap replicas are needed overall to achieve the same accuracy.
"""
if args and not isinstance(args[0], Collection):
import warnings
Expand Down
3 changes: 3 additions & 0 deletions src/resample/empirical.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ def cdf_gen(sample: "ArrayLike") -> Callable[[np.ndarray], np.ndarray]:
-------
callable
Empirical distribution function.
"""
sample_np = np.sort(sample)
n = len(sample_np)
Expand All @@ -49,6 +50,7 @@ def quantile_function_gen(
-------
callable
Empirical quantile function.
"""

class QuantileFn:
Expand Down Expand Up @@ -89,6 +91,7 @@ def influence(
-------
ndarray
Empirical influence values.
"""
sample = np.atleast_1d(sample)
n = len(sample)
Expand Down
5 changes: 5 additions & 0 deletions src/resample/jackknife.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ def resample(
... r2.append(x) # x is now a view into the same array in memory
>>> print(r2)
[array([1, 2]), array([1, 2]), array([1, 2])]
"""
sample_np = np.atleast_1d(sample)
n_sample = len(sample_np)
Expand Down Expand Up @@ -174,6 +175,7 @@ def jackknife(
>>> fb = jackknife(np.mean, x)
>>> print(f"f(x) = {fx:.1f} +/- {np.std(fb):.1f}")
f(x) = 4.5 +/- 0.3
"""
gen = resample(sample, *args, copy=False)
if args:
Expand Down Expand Up @@ -219,6 +221,7 @@ def bias(
-0.9
>>> round(bias(lambda x: np.var(x, ddof=1), x), 1)
0.0
"""
sample = np.atleast_1d(sample)
n = len(sample)
Expand Down Expand Up @@ -266,6 +269,7 @@ def bias_corrected(
8.2
>>> round(bias_corrected(np.var, x), 1)
9.2
"""
sample = np.atleast_1d(sample)
n = len(sample)
Expand Down Expand Up @@ -307,6 +311,7 @@ def variance(
>>> x = np.arange(10)
>>> round(variance(np.mean, x), 1)
0.9
"""
# formula is (n - 1) / n * sum((fj - mean(fj)) ** 2)
# = np.var(fj, ddof=0) * (n - 1)
Expand Down
8 changes: 8 additions & 0 deletions src/resample/permutation.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ class TestResult:
hypothesis. See https://en.wikipedia.org/wiki/P-value for details.
samples: array
Values of the test statistic from the permutated samples.
"""

statistic: float
Expand Down Expand Up @@ -142,6 +143,7 @@ def usp(
Returns
-------
TestResult
"""
if size <= 0:
raise ValueError("size must be positive")
Expand Down Expand Up @@ -238,6 +240,7 @@ def same_population(
Returns
-------
TestResult
"""
if size <= 0:
raise ValueError("max_size must be positive")
Expand Down Expand Up @@ -317,6 +320,7 @@ def anova(
-----
https://en.wikipedia.org/wiki/One-way_analysis_of_variance
https://en.wikipedia.org/wiki/F-test
"""
kwargs["transform"] = None
return same_population(_ANOVA(), x, y, *args, **kwargs)
Expand Down Expand Up @@ -350,6 +354,7 @@ def kruskal(
Notes
-----
https://en.wikipedia.org/wiki/Kruskal%E2%80%93Wallis_one-way_analysis_of_variance
"""
kwargs["transform"] = None
return same_population(_kruskal, x, y, *args, **kwargs)
Expand Down Expand Up @@ -377,6 +382,7 @@ def pearsonr(x: "ArrayLike", y: "ArrayLike", **kwargs: Any) -> TestResult:
Returns
-------
TestResult
"""
if len(x) != len(y):
raise ValueError("x and y must have have the same length")
Expand All @@ -403,6 +409,7 @@ def spearmanr(x: "ArrayLike", y: "ArrayLike", **kwargs: Any) -> TestResult:
Returns
-------
TestResult
"""
if len(x) != len(y):
raise ValueError("x and y must have have the same length")
Expand Down Expand Up @@ -432,6 +439,7 @@ def ttest(x: "ArrayLike", y: "ArrayLike", **kwargs: Any) -> TestResult:
Returns
-------
TestResult
"""
kwargs["transform"] = np.abs
return same_population(_ttest, x, y, **kwargs)
Expand Down

0 comments on commit dead27b

Please sign in to comment.