diff --git a/.gitignore b/.gitignore index d7f1163..1caac9b 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,6 @@ __pycache__ .idea build +*.DS_Store +*~ +.*swp diff --git a/README.md b/README.md new file mode 100644 index 0000000..bac209a --- /dev/null +++ b/README.md @@ -0,0 +1,5 @@ +# bangmetric + +# License + +New BSD \ No newline at end of file diff --git a/bangmetric/__init__.py b/bangmetric/__init__.py index e4b0765..2f7a683 100644 --- a/bangmetric/__init__.py +++ b/bangmetric/__init__.py @@ -5,3 +5,7 @@ from rmse import * # pyflakes.ignore from kernel_analysis import * # pyflakes.ignore from nk import * # pyflakes.ignore +from utils import * # pyflakes.ignore +from human_metric import * # pyflakes.ignore + +__version__ = '0.0.1' \ No newline at end of file diff --git a/bangmetric/accuracy.py b/bangmetric/accuracy.py index f34b06d..6d67d12 100644 --- a/bangmetric/accuracy.py +++ b/bangmetric/accuracy.py @@ -8,49 +8,100 @@ __all__ = ['accuracy'] import numpy as np +from .utils import confusion_matrix_stats +DEFAULT_ACCURACY_MODE = 'binary' -def accuracy(y_true, y_pred, balanced=False): - """Computes the Accuracy of the predictions (also known as the - zero-one score). + +def accuracy(A, B=None, mode=DEFAULT_ACCURACY_MODE, \ + balanced=False, collation=None): + """Computes the accuracy of the predictions (also known as the + zero-one score). Depending on the choice of `mode`, this + function can take one of the following data format: + + * Binary classification outputs (`mode='binary'`; default) + * Confusion matrix (`mode='confusionmat'`) Parameters ---------- - y_true: array, shape = [n_samples] - True values, interpreted as strictly positive or not - (i.e. converted to binary). + A, B: + If `mode` is 'binary' (default): + + A: array, shape = [n_samples] + True values, interpreted as strictly positive or not + (i.e. converted to binary). + + B: array, shape = [n_samples] + Predicted values, interpreted as strictly positive or not + (i.e. converted to binary). - y_pred: array, shape = [n_samples] - Predicted values, interpreted as strictly positive or not - (i.e. converted to binary). + if `mode` is 'confusionmat': + + A: array-like, shape = [n_classes (true), n_classes (pred)] + Confusion matrix, where the element M_{rc} means + the number of times when the classifier or subject + guesses that a test sample in the r-th class + belongs to the c-th class. + + B: ignored balanced: bool, optional (default=False) Returns the balanced accuracy (equal weight for positive and negative values). + collation: None or array-like of shape = [n_groupings, + n_classes], optional (default=None) + Defines how to group entries in `M` to make sub-confusion matrices + when `mode` is 'confusionmat'. See `confusion_matrix_stats()` + for details. + Returns ------- - acc: float - Accuracy (zero-one score). + acc: float or array of shape = [n_groupings] + An accuracy score (zero-one score) or array of accuracies, + where each element corresponds to each grouping of + positives and negatives (when `mode` is 'confusionmat'). + + References + ---------- + http://en.wikipedia.org/wiki/Accuracy """ - assert len(y_true) == len(y_pred) - assert np.isfinite(y_true).all() - assert np.isfinite(y_pred).all() - # -- "binarize" the arguments - y_true = np.array(y_true) > 0 - assert y_true.ndim == 1 + if mode == 'binary': + y_true, y_pred = A, B + assert len(y_true) == len(y_pred) + assert np.isfinite(y_true).all() + assert np.isfinite(y_pred).all() + + # -- "binarize" the arguments + y_true = np.array(y_true) > 0 + assert y_true.ndim == 1 + + y_pred = np.array(y_pred) > 0 + assert y_pred.ndim == 1 + + i_pos = y_true > 0 + i_neg = ~i_pos - y_pred = np.array(y_pred) > 0 - assert y_pred.ndim == 1 + P = float(i_pos.sum()) + N = float(i_neg.sum()) + TP = float((y_true[i_pos] == y_pred[i_pos]).sum()) + TN = float((y_true[i_neg] == y_pred[i_neg]).sum()) + + elif mode == 'confusionmat': + # A: confusion mat + # row means true classes, col means predicted classes + P, N, TP, TN, _, _ = confusion_matrix_stats(A, \ + collation=collation, fudge_mode='none') + + else: + raise ValueError('Invalid mode') if balanced: - pos = y_true > 0 - neg = ~pos - pos_acc = (y_true[pos] == y_pred[pos]).mean() - neg_acc = (y_true[neg] == y_pred[neg]).mean() - acc = (pos_acc + neg_acc) / 2. + sensitivity = TP / P + specificity = TN / N + acc = (sensitivity + specificity) / 2. else: - acc = (y_true == y_pred).mean() + acc = (TP + TN) / (P + N) return acc diff --git a/bangmetric/dprime.py b/bangmetric/dprime.py index a73c75d..658381d 100644 --- a/bangmetric/dprime.py +++ b/bangmetric/dprime.py @@ -2,61 +2,176 @@ # Authors: Nicolas Pinto # Nicolas Poilvert +# Ha Hong # # License: BSD __all__ = ['dprime'] import numpy as np +from scipy.stats import norm +from .utils import confusion_matrix_stats +DEFAULT_DPRIME_MODE = 'binary' -def dprime(y_pred, y_true): - """Computes the d-prime sensitivity index of the predictions. + +def dprime(A, B=None, mode=DEFAULT_DPRIME_MODE,\ + max_value=np.inf, min_value=-np.inf,\ + max_ppf_value=np.inf, min_ppf_value=-np.inf,\ + **kwargs): + """Computes the d-prime sensitivity index of predictions + from various data formats. Depending on the choice of + `mode`, this function can take one of the following format: + + * Binary classification outputs (`mode='binary'`; default) + * Positive and negative samples (`mode='sample'`) + * True positive and false positive rate (`mode='rate'`) + * Confusion matrix (`mode='confusionmat'`) Parameters ---------- - y_true: array, shape = [n_samples] - True values, interpreted as strictly positive or not - (i.e. converted to binary). - Could be in {-1, +1} or {0, 1} or {False, True}. + A, B: + If `mode` is 'binary' (default): + + A: array, shape = [n_samples], + True values, interpreted as strictly positive or not + (i.e. converted to binary). + Could be in {-1, +1} or {0, 1} or {False, True}. + + B: array, shape = [n_samples], + Predicted values (real). + + If `mode` is 'sample': + + A: array-like, + Positive sample values (e.g., raw projection values + of the positive classifier). + + B: array-like, + Negative sample values. + + If `mode` is 'rate': + + A: array-like, shape = [n_groupings] + True positive rates + + B: array-like, shape = [n_groupings] + False positive rates + + if `mode` is 'confusionmat': + + A: array-like, shape = [n_classes (true), n_classes (pred)] + Confusion matrix, where the element M_{rc} means + the number of times when the classifier or subject + guesses that a test sample in the r-th class + belongs to the c-th class. + + B: ignored + + mode: {'binary', 'sample', 'rate'}, optional, (default='binary') + Directs the interpretation of A and B. + + max_value: float, optional (default=np.inf) + Maximum possible d-prime value. + + min_value: float, optional (default=-np.inf) + Minimum possible d-prime value. + + max_ppf_value: float, optional (default=np.inf) + Maximum possible ppf value. + Used only when mode is 'rate' or 'confusionmat'. + + min_ppf_value: float, optional (default=-np.inf). + Minimum possible ppf value. + Used only when mode is 'rate' or 'confusionmat'. - y_pred: array, shape = [n_samples] - Predicted values (real). + kwargs: named arguments, optional + Passed to ``confusion_matrix_stats()`` and used only when `mode` + is 'confusionmat'. By assigning ``collation``, + ``fudge_mode``, ``fudge_factor``, etc. one can + change the behavior of d-prime computation + (see ``confusion_matrix_stats()`` for details). Returns ------- - dp: float or None - d-prime, None if d-prime is undefined + dp: float or array of shape = [n_groupings] + A d-prime value or array of d-primes, where each element + corresponds to each grouping of positives and negatives + (when `mode` is 'rate' or 'confusionmat') References ---------- http://en.wikipedia.org/wiki/D' + http://en.wikipedia.org/wiki/Confusion_matrix """ # -- basic checks and conversion - assert len(y_true) == len(y_pred) - assert np.isfinite(y_true).all() - assert np.isfinite(y_pred).all() - - y_true = np.array(y_true) - assert y_true.ndim == 1 - - y_pred = np.array(y_pred) - assert y_pred.ndim == 1 - - # -- actual computation - pos = y_true > 0 - neg = ~pos - pos_mean = y_pred[pos].mean() - neg_mean = y_pred[neg].mean() - pos_var = y_pred[pos].var(ddof=1) - neg_var = y_pred[neg].var(ddof=1) - - num = pos_mean - neg_mean - div = np.sqrt((pos_var + neg_var) / 2.) - if div == 0: - dp = None + if mode == 'sample': + pos, neg = np.array(A), np.array(B) + + elif mode == 'binary': + y_true, y_pred = A, B + + assert len(y_true) == len(y_pred) + assert np.isfinite(y_true).all() + + y_true = np.array(y_true) + assert y_true.ndim == 1 + + y_pred = np.array(y_pred) + assert y_pred.ndim == 1 + + i_pos = y_true > 0 + i_neg = ~i_pos + + pos = y_pred[i_pos] + neg = y_pred[i_neg] + + elif mode == 'rate': + TPR, FPR = np.array(A), np.array(B) + assert TPR.shape == FPR.shape + + elif mode == 'confusionmat': + # A: confusion mat + # row means true classes, col means predicted classes + P, N, TP, _, FP, _ = confusion_matrix_stats(A, **kwargs) + + TPR = TP / P + FPR = FP / N + else: + raise ValueError('Invalid mode') + + # -- compute d' + if mode in ['sample', 'binary']: + assert np.isfinite(pos).all() + assert np.isfinite(neg).all() + + if pos.size <= 1: + raise ValueError('Not enough positive samples'\ + 'to estimate the variance') + if neg.size <= 1: + raise ValueError('Not enough negative samples'\ + 'to estimate the variance') + + pos_mean = pos.mean() + neg_mean = neg.mean() + pos_var = pos.var(ddof=1) + neg_var = neg.var(ddof=1) + + num = pos_mean - neg_mean + div = np.sqrt((pos_var + neg_var) / 2.) + dp = num / div + else: # mode is rate or confusionmat + ppfTPR = norm.ppf(TPR) + ppfFPR = norm.ppf(FPR) + ppfTPR = np.clip(ppfTPR, min_ppf_value, max_ppf_value) + ppfFPR = np.clip(ppfFPR, min_ppf_value, max_ppf_value) + dp = ppfTPR - ppfFPR + + # from Dan's suggestion about clipping d' values... + dp = np.clip(dp, min_value, max_value) + return dp diff --git a/bangmetric/human_metric.py b/bangmetric/human_metric.py new file mode 100644 index 0000000..9045226 --- /dev/null +++ b/bangmetric/human_metric.py @@ -0,0 +1,86 @@ +"""Metrics designed to compute the similarity to human data""" + +# Authors: Ha Hong +# +# License: BSD + +__all__ = ['central_ratio', 'consistency'] + +import numpy as np +from .correlation import spearman + +DTYPE = np.float64 + + +def central_ratio(num, dnm, centerfn=np.median, finite=True): + """Computes the central tendency (median, by default) of the ratios + between `num` and `dnm`. By default, this function gives the + "Turing ratio" used in the paper by Majaj, Hong, Solomon, and DiCarlo. + + Parameters + ---------- + num: array-like + Numerators of ratios + + dnm: array-like, shape = `num.shape()` + Denominators of ratios. `num` and `dnm` must have the same shape. + + centerfn: function, optional (default=np.median) + Function to compute the central tendency. + + finite: boolean, optional (default=True) + If True, only finite numbers in `num` and `dnm` will be used for + the computation of the central tendency. + """ + + num = np.array(num, dtype=DTYPE) + dnm = np.array(dnm, dtype=DTYPE) + assert num.shape == dnm.shape + + num = num.ravel() + dnm = dnm.ravel() + + if finite: + fi = np.isfinite(dnm) & np.isfinite(num) + num = num[fi] + dnm = dnm[fi] + + return centerfn(num / dnm) + + +def consistency(A, B, consistencyfn=spearman, finite=True): + """Computes the consistency (Spearman rank correlation coefficient, + by default) between two sets of data points (e.g., d' scores) `A` + and `B`. By default, this function gives the "consistency" + used in the paper by Majaj, Hong, Solomon, and DiCarlo. + + Parameters + ---------- + A: array-like + A set of data points + + B: array-like, shape = `A.shape()` + Another set of data points to compare with `A`. + `A` and `B` must have the same shape. + + consistencyfn: function, optional (default=bangmetric.spearman) + Function to compute the "consistency." + + finite: boolean, optional (default=True) + If True, only finite numbers in `A` and `B` will be used for + the computation of the consistency. + """ + + A = np.array(A, dtype=DTYPE) + B = np.array(B, dtype=DTYPE) + assert A.shape == B.shape + + A = A.ravel() + B = B.ravel() + + if finite: + fi = np.isfinite(B) & np.isfinite(A) + A = A[fi] + B = B[fi] + + return consistencyfn(A, B) diff --git a/bangmetric/kernel_analysis.py b/bangmetric/kernel_analysis.py index 2094e27..c45ef13 100644 --- a/bangmetric/kernel_analysis.py +++ b/bangmetric/kernel_analysis.py @@ -106,7 +106,7 @@ def kanalysis(X, Y_true, n_components='all', quantiles=DEFAULT_QUANTILES): # Sort them l2_squared_sorted = l2_squared.ravel() - np.sort(l2_squared_sorted) + l2_squared_sorted.sort() # ------------------------------------------------------------------------ # -- Compute Kernel Analysis for each quantile diff --git a/bangmetric/tests/test_dprime.py b/bangmetric/tests/test_dprime.py index 374db1a..002f512 100644 --- a/bangmetric/tests/test_dprime.py +++ b/bangmetric/tests/test_dprime.py @@ -18,7 +18,7 @@ def test_basic(): y_true = np.array([False, True, True, True, False, False, False, True]) y_pred = np.array([0.491, -0.1, 0.64, 1.52, -0.23, -0.23, 1.579, 0.76]) dp = dprime(y_true, y_pred) - reference = 0.47387910220727386 + reference = 0.39541092958803298 assert abs(dp - reference) < ATOL @@ -27,7 +27,7 @@ def test_basic100(): y_true = rng.binomial(1, 0.5, size=100) y_pred = rng.randn(y_true.size) dp = dprime(y_true, y_pred) - reference = -0.39852816153409176 + reference = -0.20652941441924857 assert abs(dp - reference) < ATOL diff --git a/bangmetric/utils.py b/bangmetric/utils.py new file mode 100644 index 0000000..1c30c46 --- /dev/null +++ b/bangmetric/utils.py @@ -0,0 +1,138 @@ +"""Other utility functions""" + +# Authors: Ha Hong +# +# License: BSD + +__all__ = ['confusion_matrix_stats'] + +import numpy as np + +DEFAULT_FUDGE_FACTOR = 0.5 +DEFAULT_FUDGE_MODE = 'correction' +DTYPE = np.float64 + + +def confusion_matrix_stats(M, collation=None, \ + fudge_mode=DEFAULT_FUDGE_MODE, fudge_factor=DEFAULT_FUDGE_FACTOR): + """Computes classification statistics of sub-confusion matrices inside + the given original confusion matrix M. If no ``collation`` is given, + statistics for each one vs. rest sub-confusion matrix will be computed. + + Parameters + ---------- + M: array-like, shape = [n_classes (true), n_classes (pred)] + Confusion matrix, where the element M_{rc} means the number of + times when the classifier guesses that a test sample in the r-th class + belongs to the c-th class. + + collation: None or array-like of shape = [n_groupings, + n_classes], optional (default=None) + Defines how to group entries in `M` to make sub-confusion matrices. + Entries shoule be {+1, 0, -1}. A row defines one instance of grouping, + where +1, -1, and 0 designate the corresponding class as a + positive, negative, and ignored class, respectively. For example, + the following `collation` defines a 3-way one vs. rest grouping + (given that `M` is a 3x3 matrix): + [[+1, -1, -1], + [-1, +1, -1], + [-1, -1, +1]] + If `None` (default), one vs. rest grouping is assumed. + + fudge_factor: float, optional (default=0.5) + A small factor to avoid TPR, FPR, TNR, or FNR becoming 0 or 1. + Mostly intended for d-prime calculation. + + fudge_mode: str, optional (default='correction') + Determins how to apply the fudge factor. Can be one of: + 'correction': apply only when needed + 'always': always apply the fudge factor + 'none': no fudging --- equivalent to ``fudge_factor=0`` + + Returns + ------- + P: array, shape = [n_groupings] + Array of the number of positives, where each element corresponds to + each grouping (row) defined by `collation`. + N: array, shape = [n_groupings] + Same as P, except that this is an array of the number of negatives. + TP: array, shape = [n_groupings] + Same as P, except an array of the number of true positives. + TN: array, shape = [n_groupings] + Same as P, except an array of the number of true negatives. + FP: array, shape = [n_groupings] + Same as P, except an array of the number of false positives. + FN: array, shape = [n_groupings] + Same as P, except an array of the number of false negatives. + + References + ---------- + http://en.wikipedia.org/wiki/Confusion_matrix + http://en.wikipedia.org/wiki/Receiver_operating_characteristic + """ + + # M: confusion matrix, row means true classes, col means predicted classes + M = np.array(M) + assert M.ndim == 2 + assert M.shape[0] == M.shape[1] + n_classes = M.shape[0] + + if collation is None: + # make it one vs. rest. E.g., for a 3-classes case: + # [[+1, -1, -1], + # [-1, +1, -1], + # [-1, -1, +1]] + collation = -np.ones((n_classes, n_classes), dtype='int8') + collation += 2 * np.eye(n_classes, dtype='int8') + else: + collation = np.array(collation, dtype='int8') + assert collation.ndim == 2 + assert collation.shape[1] == n_classes + + # P0: number of positives, for each class + # P: number of positives, for each grouping + # N: number of negatives, for each grouping + # TP: number of true positives, for each grouping + # FP: number of false positives, for each grouping + P0 = np.sum(M, axis=1) + P = np.array([np.sum(P0[coll == +1]) \ + for coll in collation], dtype=DTYPE) + N = np.array([np.sum(P0[coll == -1]) \ + for coll in collation], dtype=DTYPE) + TP = np.array([np.sum(M[coll == +1][:, coll == +1]) \ + for coll in collation], dtype=DTYPE) + TN = np.array([np.sum(M[coll == -1][:, coll == -1]) \ + for coll in collation], dtype=DTYPE) + FP = np.array([np.sum(M[coll == -1][:, coll == +1]) \ + for coll in collation], dtype=DTYPE) + FN = np.array([np.sum(M[coll == +1][:, coll == -1]) \ + for coll in collation], dtype=DTYPE) + + # -- application of fudge factor + if fudge_mode == 'none': # no fudging + pass + + elif fudge_mode == 'always': # always apply fudge factor + TP += fudge_factor + FP += fudge_factor + TN += fudge_factor + FN += fudge_factor + P += 2. * fudge_factor + N += 2. * fudge_factor + + elif fudge_mode == 'correction': # apply fudge factor only when needed + TP[TP == P] = P[TP == P] - fudge_factor # 100% correct + TP[TP == 0] = fudge_factor # 0% correct + FP[FP == N] = N[FP == N] - fudge_factor # always FAR + FP[FP == 0] = fudge_factor # no false alarm + + TN[TN == N] = N[TN == N] - fudge_factor + TN[TN == 0] = fudge_factor + FN[FN == P] = P[FN == P] - fudge_factor + FN[FN == 0] = fudge_factor + + else: + raise ValueError('Invalid fudge_mode') + + # -- done + return P, N, TP, TN, FP, FN diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 7f2ecdb..0000000 --- a/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -numpy>=1.6.1 -scikit-learn>=0.10 diff --git a/setup.py b/setup.py old mode 100755 new mode 100644 index 8802ad3..e989deb --- a/setup.py +++ b/setup.py @@ -1,252 +1,106 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" distribute- and pip-enabled setup.py """ - -import logging -import os -import re - -# ----- overrides ----- - -# set these to anything but None to override the automatic defaults -packages = None -package_name = None -package_data = None -scripts = None -requirements_file = None -requirements = None -dependency_links = None -use_numpy = True - -# --------------------- - - -# ----- control flags ----- - -# fallback to setuptools if distribute isn't found -setup_tools_fallback = False - -# don't include subdir named 'tests' in package_data -skip_tests = True - -# print some extra debugging info -debug = True - -# ------------------------- - -if debug: - logging.basicConfig(level=logging.DEBUG) -# distribute import and testing -try: - import distribute_setup - distribute_setup.use_setuptools() - logging.debug("distribute_setup.py imported and used") -except ImportError: - # fallback to setuptools? - # distribute_setup.py was not in this directory - if not (setup_tools_fallback): - import setuptools - if not (hasattr(setuptools, '_distribute') and \ - setuptools._distribute): - raise ImportError(\ - "distribute was not found and fallback " \ - "to setuptools was not allowed") - else: - logging.debug("distribute_setup.py not found, \ - defaulted to system distribute") - else: - logging.debug("distribute_setup.py not found, " \ - "defaulting to system setuptools") - -import setuptools - - -def find_scripts(): - return [s for s in setuptools.findall('scripts/') \ - if os.path.splitext(s)[1] != '.pyc'] - - -def package_to_path(package): - """ - Convert a package (as found by setuptools.find_packages) - e.g. "foo.bar" to usable path - e.g. "foo/bar" - - No idea if this works on windows - """ - return package.replace('.', '/') - - -def find_subdirectories(package): - """ - Get the subdirectories within a package - This will include resources (non-submodules) and submodules - """ - try: - subdirectories = os.walk(package_to_path(package)).next()[1] - except StopIteration: - subdirectories = [] - return subdirectories - - -def subdir_findall(dir, subdir): - """ - Find all files in a subdirectory and return paths relative to dir - - This is similar to (and uses) setuptools.findall - However, the paths returned are in the form needed for package_data - """ - strip_n = len(dir.split('/')) - path = '/'.join((dir, subdir)) - return ['/'.join(s.split('/')[strip_n:]) for s in setuptools.findall(path)] - - -def find_package_data(packages): - """ - For a list of packages, find the package_data - - This function scans the subdirectories of a package and considers all - non-submodule subdirectories as resources, including them in - the package_data - - Returns a dictionary suitable for setup(package_data=) - """ - package_data = {} - for package in packages: - package_data[package] = [] - for subdir in find_subdirectories(package): - if '.'.join((package, subdir)) in packages: # skip submodules - logging.debug("skipping submodule %s/%s" % (package, subdir)) - continue - if skip_tests and (subdir == 'tests'): # skip tests - logging.debug("skipping tests %s/%s" % (package, subdir)) - continue - package_data[package] += \ - subdir_findall(package_to_path(package), subdir) - return package_data - - -def parse_requirements(file_name): - """ - from: - http://cburgmer.posterous.com/pip-requirementstxt-and-setuppy - """ - requirements = [] - with open(file_name, 'r') as f: - for line in f: - if re.match(r'(\s*#)|(\s*$)', line): - continue - if re.match(r'\s*-e\s+', line): - requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$',\ - r'\1', line).strip()) - elif re.match(r'\s*-f\s+', line): - pass - else: - requirements.append(line.strip()) - return requirements - - -def parse_dependency_links(file_name): - """ - from: - http://cburgmer.posterous.com/pip-requirementstxt-and-setuppy - """ - dependency_links = [] - with open(file_name) as f: - for line in f: - if re.match(r'\s*-[ef]\s+', line): - dependency_links.append(re.sub(r'\s*-[ef]\s+',\ - '', line)) - return dependency_links - -# ----------- Override defaults here ---------------- -if packages is None: - packages = setuptools.find_packages() - -if len(packages) == 0: - raise Exception("No valid packages found") - -if package_name is None: - package_name = packages[0] - -if package_data is None: - package_data = find_package_data(packages) - -if scripts is None: - scripts = find_scripts() - -if requirements_file is None: - requirements_file = 'requirements.txt' - -if os.path.exists(requirements_file): - if requirements is None: - requirements = parse_requirements(requirements_file) - if dependency_links is None: - dependency_links = parse_dependency_links(requirements_file) -else: - if requirements is None: - requirements = [] - if dependency_links is None: - dependency_links = [] - -if debug: - logging.debug("Module name: %s" % package_name) - for package in packages: - logging.debug("Package: %s" % package) - logging.debug("\tData: %s" % str(package_data[package])) - logging.debug("Scripts:") - for script in scripts: - logging.debug("\tScript: %s" % script) - logging.debug("Requirements:") - for req in requirements: - logging.debug("\t%s" % req) - logging.debug("Dependency links:") - for dl in dependency_links: - logging.debug("\t%s" % dl) - -from distutils.core import Command -class PyTest(Command): - user_options = [] - def initialize_options(self): - pass - def finalize_options(self): - pass - def run(self): - import sys,subprocess - errno = subprocess.call([sys.executable, 'runtests.py']) - raise SystemExit(errno) - - -if __name__ == '__main__': - - sub_packages = packages - - if use_numpy: - from numpy.distutils.misc_util import Configuration - config = Configuration(package_name, '', None) - - for sub_package in sub_packages: - print 'adding', sub_package - config.add_subpackage(sub_package) - - from numpy.distutils.core import setup - kwargs = config.todict() - kwargs['cmdclass'] = dict(test=PyTest) - setup(**kwargs) - - else: - setuptools.setup( - name=package_name, - version='dev', - packages=packages, - scripts=scripts, - - package_data=package_data, - include_package_data=True, - - install_requires=requirements, - dependency_links=dependency_links, - - cmdclass=dict(test=PyTest), - ) +"""A setuptools based setup module. + +See: +https://packaging.python.org/en/latest/distributing.html +https://github.com/pypa/sampleproject +""" + +# Always prefer setuptools over distutils +from setuptools import setup, find_packages +# To use a consistent encoding +from codecs import open +from os import path + +import bangmetric + + +here = path.abspath(path.dirname(__file__)) + +# Get the long description from the README file +with open(path.join(here, 'README.md'), encoding='utf-8') as f: + long_description = f.read() + +setup( + name='bangmetric', + + # Versions should comply with PEP440. For a discussion on single-sourcing + # the version across setup.py and the project code, see + # https://packaging.python.org/en/latest/single_source_version.html + version=bangmetric.__version__, + + description='', + long_description=long_description, + + # The project's main homepage. + url='https://github.com/dicarlolab/bangmetric', + + # Author details + author='DiCarlo Lab', + + # Choose your license + license='New BSD', + + # See https://pypi.python.org/pypi?%3Aaction=list_classifiers + classifiers=[ + # How mature is this project? Common values are + # 3 - Alpha + # 4 - Beta + # 5 - Production/Stable + + # Pick your license as you wish (should match "license" above) + 'License :: OSI Approved :: New BSD License', + + # Specify the Python versions you support here. In particular, ensure + # that you indicate whether you support Python 2, Python 3 or both. + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + ], + + # What does your project relate to? + keywords='tensorflow deep learning', + + # You can just specify the packages manually here if your project is + # simple. Or you can use find_packages(). + packages=find_packages(exclude=['contrib', 'docs', 'tests']), + + # Alternatively, if you want to distribute just a my_module.py, uncomment + # this: + # py_modules=["my_module"], + + # List run-time dependencies here. These will be installed by pip when + # your project is installed. For an analysis of "install_requires" vs pip's + # requirements files see: + # https://packaging.python.org/en/latest/requirements.html + install_requires=['numpy', 'scipy', 'scikit-learn'], + + # List additional groups of dependencies here (e.g. development + # dependencies). You can install these using the following syntax, + # for example: + # $ pip install -e .[dev,test] + # extras_require={ + # 'dev': ['check-manifest'], + # 'test': ['coverage'], + # }, + + # If there are data files included in your packages that need to be + # installed, specify them here. If using Python 2.6 or less, then these + # have to be included in MANIFEST.in as well. + # package_data={ + # 'sample': ['package_data.dat'], + # }, + + # Although 'package_data' is the preferred approach, in some case you may + # need to place data files outside of your packages. See: + # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa + # In this case, 'data_file' will be installed into '/my_data' + # data_files=[('my_data', ['data/data_file'])], + + # To provide executable scripts, use entry points in preference to the + # "scripts" keyword. Entry points provide cross-platform support and allow + # pip to create the appropriate form of executable for the target platform. + # entry_points={ + # 'console_scripts': [ + # 'sample=sample:main', + # ], + # }, +) \ No newline at end of file