diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index 4c399247..861fddc7 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -2,9 +2,9 @@ name: enterprise CI targets on: push: - branches: [ master ] + branches: [ master, dev ] pull_request: - branches: [ master ] + branches: [ master, dev ] release: types: - published @@ -16,8 +16,8 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest] - python-version: ['3.7', '3.8', '3.9', '3.10'] + os: [ubuntu-latest, macos-13] + python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] steps: - name: Checkout repository @@ -56,7 +56,7 @@ jobs: - name: Test with pytest run: make test - name: Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 #with: # fail_ci_if_error: true @@ -72,7 +72,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: "3.7" + python-version: "3.8" - name: Install non-python dependencies on linux run: | sudo apt-get install libsuitesparse-dev @@ -118,7 +118,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.8' - name: Install dependencies run: | python -m pip install --upgrade pip @@ -133,4 +133,4 @@ jobs: TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} run: | - twine upload dist/* \ No newline at end of file + twine upload dist/* diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index ad6c38ec..841886e5 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -57,12 +57,38 @@ If you are proposing a feature: Get Started! ------------ -Ready to contribute? Here's how to set up `enterprise` for local development. +Ready to contribute? Here's how to set up ``enterprise`` for local development. -1. Fork the `enterprise` repo on GitHub. +Install the dependencies +~~~~~~~~~~~~~~~~~~~~~~~~ + +``enterprise`` relies on a lot of other software to function. +If you use the Anaconda distribution of Python, you can get all of this software using ``conda``. +First, you install the latest stable version of ``enterprise``, which will come with all of the dependencies. +Then you remove ``enterprise`` leaving everything else intact. +This way you can use your development version of ``enterprise`` instead of the stable version. +We will also need some additional software that is required to run the tests. + +Start with a virtual environment with the extra dependencies required for running tests. In this case it is called ``ent_dev``:: + + $ conda create -n ent_dev -y -c conda-forge python=3.9 black=22.3.0 flake8 sphinx_rtd_theme pytest-cov + $ conda activate ent_dev + +Now install everything else by running the commands:: + + $ conda install -c conda-forge enterprise-pulsar + $ conda remove enterprise-pulsar --force + $ pip install coverage-conditional-plugin + + +Get the enterprise source code and get to work! +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +1. Fork the ``enterprise`` repo on GitHub. 2. Clone your fork locally:: $ git clone git@github.com:your_name_here/enterprise.git + $ cd enterprise/ 3. Set `enterprise/master` as upstream remote:: @@ -73,17 +99,25 @@ Ready to contribute? Here's how to set up `enterprise` for local development. $ git pull upstream master 4. This is how you set up your fork for local development: - + .. note:: - You will need to have ``tempo`` and ``suitesparse`` installed before - running the commands below. + You will need to have ``tempo2`` and ``suitesparse`` installed before + running these commands. - :: + If you installed the dependencies via conda, you are good to go! - $ cd enterprise/ + If you set up a ``conda`` virtual environment with the dependencies already, + you can add your local fork of ``enterprise`` to it by running:: + + $ pip install -e . + + If you manually installed the dependencies, this will make and activate a + Python3 virtual env with your local fork of ``enterprise``:: + $ make init $ source .enterprise/bin/activate + 5. Create a branch for local development:: $ git checkout -b name-of-your-bugfix-or-feature @@ -93,7 +127,6 @@ Ready to contribute? Here's how to set up `enterprise` for local development. 6. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox (tox not implemented yet). Also check that any new docs are formatted correctly:: $ make test - $ make lint $ make docs To get flake8 and tox, just pip install them into your virtualenv. @@ -114,9 +147,8 @@ Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring. -3. The pull request should work for Python 2.6, 2.7, 3.3, 3.4 and 3.5, and for PyPy. Check - https://travis-ci.org/nanograv/enterprise/pull_requests - and make sure that the tests pass for all supported Python versions. +3. The pull request should work for all supported versions of Python: 3.8, 3.9, 3.10, 3.11, and 3.12. You + can see the progress of the tests in the `Checks` tab of your GitHub pull request. Tips ---- @@ -130,3 +162,4 @@ To track and checkout another user's branch:: $ git remote add other-user-username https://github.com/other-user-username/enterprise.git $ git fetch other-user-username $ git checkout --track -b branch-name other-user-username/branch-name + diff --git a/README.md b/README.md index 6c71c471..34a24a64 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ [![Build Status](https://github.com/nanograv/enterprise/workflows/CI-Tests/badge.svg)](https://github.com/nanograv/enterprise/actions) [![Documentation Status](https://readthedocs.org/projects/enterprise/badge/?version=latest)](https://enterprise.readthedocs.io/en/latest/?badge=latest) [![Test Coverage](https://codecov.io/gh/nanograv/enterprise/branch/master/graph/badge.svg?token=YXSX3293VF)](https://codecov.io/gh/nanograv/enterprise) -![Python Versions](https://img.shields.io/badge/python-3.7%2C%203.8%2C%203.9%2C%203.10-blue.svg) +![Python Versions](https://img.shields.io/badge/python-3.8%2C%203.9%2C%203.10%2C%203.11%2C%203.12-blue.svg) [![Zenodo DOI 4059815](https://zenodo.org/badge/DOI/10.5281/zenodo.4059815.svg)](https://doi.org/10.5281/zenodo.4059815) @@ -14,7 +14,7 @@ Inference SuitE) is a pulsar timing analysis code, aimed at noise analysis, gravitational-wave searches, and timing model analysis. - Note: `enterprise>=3.0` does not support Python2.7. You must use - Python \>= 3.7. + Python \>= 3.8. - Free software: MIT license - Documentation: . diff --git a/docs/index.rst b/docs/index.rst index 223d63dd..f7d95823 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -15,7 +15,7 @@ Welcome to enterprise's documentation! .. image:: https://codecov.io/gh/nanograv/enterprise/branch/master/graph/badge.svg?token=YXSX3293VF :target: https://codecov.io/gh/nanograv/enterprise :alt: Test Coverage -.. image:: https://img.shields.io/badge/python-3.6%2C%203.7%2C%203.8-blue.svg +.. image:: https://img.shields.io/badge/python-3.8%2C%203.9%2C%203.10%2C%203.11%2C%203.12-blue.svg :alt: Python Versions .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.4059815.svg diff --git a/docs/installation.rst b/docs/installation.rst index 775ccaf3..d9716885 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -23,6 +23,17 @@ Installation .. .. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/ +Using conda +----------- + +``enterprise`` is available via conda-forge as `enterprise-pulsar `_. +If you use the Anaconda distribution of Python, we strongly recommend installing via the ``conda`` command: + +.. code-block:: console + + $ conda install -c conda-forge enterprise-pulsar + + From sources ------------ @@ -57,3 +68,11 @@ If you want to run tests or do any other development then also run: .. _Github repo: https://github.com/nanograv/enterprise .. _tarball: https://github.com/nanograv/enterprise/tarball/master + + +Tips +---- + +If you have a computer with an Apple Silicon chip, see `these instructions`_ on how to install Apple Intel packages using Rosetta. + +.. _these instructions: https://conda-forge.org/docs/user/tipsandtricks.html#installing-apple-intel-packages-on-apple-silicon diff --git a/enterprise/pulsar.py b/enterprise/pulsar.py index 18a25cd0..fa486582 100644 --- a/enterprise/pulsar.py +++ b/enterprise/pulsar.py @@ -2,13 +2,13 @@ """Class containing pulsar data from timing package [tempo2/PINT]. """ +import contextlib import json import logging import os import pickle +from io import StringIO -import astropy.constants as const -import astropy.units as u import numpy as np from ephem import Ecliptic, Equatorial from astropy.time import Time @@ -35,6 +35,13 @@ logger.warning("PINT not installed. Will use libstempo instead.") # pragma: no cover pint = None +try: + import astropy.constants as const + import astropy.units as u +except ImportError: # pragma: no cover + const = None + u = None + if pint is None and t2 is None: err_msg = "Must have either PINT or libstempo timing package installed" raise ImportError(err_msg) @@ -154,20 +161,18 @@ def filter_data(self, start_time=None, end_time=None): self.sort_data() + def drop_not_picklable(self): + """Drop all attributes that cannot be pickled. + + Derived classes should implement this if they have + any such attributes. + """ + pass + def to_pickle(self, outdir=None): """Save object to pickle file.""" - # drop t2pulsar object - if hasattr(self, "t2pulsar"): - del self.t2pulsar - msg = "t2pulsar object cannot be pickled and has been removed." - logger.warning(msg) - - if hasattr(self, "pint_toas"): - del self.pint_toas - del self.model - msg = "pint_toas and model objects cannot be pickled and have been removed." - logger.warning(msg) + self.drop_not_picklable() if outdir is None: outdir = os.getcwd() @@ -310,7 +315,7 @@ def sunssb(self): @property def telescope(self): - """Return telescope vector at all timestamps""" + """Return telescope name at all timestamps""" return self._telescope[self._isort] @@ -322,7 +327,11 @@ def __init__(self, toas, model, sort=True, drop_pintpsr=True, planets=True): if not drop_pintpsr: self.model = model + self.parfile = model.as_parfile() self.pint_toas = toas + with StringIO() as tim: + toas.write_TOA_file(tim) + self.timfile = tim.getvalue() # these are TDB but not barycentered # self._toas = np.array(toas.table["tdbld"], dtype="float64") * 86400 @@ -331,20 +340,17 @@ def __init__(self, toas, model, sort=True, drop_pintpsr=True, planets=True): self._stoas = np.array(toas.get_mjds().value, dtype="float64") * 86400 self._residuals = np.array(resids(toas, model).time_resids.to(u.s), dtype="float64") self._toaerrs = np.array(toas.get_errors().to(u.s), dtype="float64") - self._designmatrix = model.designmatrix(toas)[0] + self._designmatrix, self.fitpars, self.designmatrix_units = model.designmatrix(toas) self._ssbfreqs = np.array(model.barycentric_radio_freq(toas), dtype="float64") self._telescope = np.array(toas.get_obss()) - # fitted parameters - self.fitpars = ["Offset"] + [par for par in model.params if not getattr(model, par).frozen] - # gather DM/DMX information if available self._set_dm(model) # set parameters - spars = [par for par in model.params] - self.setpars = [sp for sp in spars if sp not in self.fitpars] + self.setpars = [sp for sp in model.params if sp not in self.fitpars] + # FIXME: this can be done more cleanly using PINT self._flags = {} for ii, obsflags in enumerate(toas.get_flags()): for jj, flag in enumerate(obsflags): @@ -355,6 +361,7 @@ def __init__(self, toas, model, sort=True, drop_pintpsr=True, planets=True): # convert flags to arrays # TODO probably better way to do this + # -- PINT always stores flags as strings for key, val in self._flags.items(): if isinstance(val[0], u.quantity.Quantity): self._flags[key] = np.array([v.value for v in val]) @@ -379,6 +386,21 @@ def __init__(self, toas, model, sort=True, drop_pintpsr=True, planets=True): self.sort_data() + def drop_pintpsr(self): + with contextlib.suppress(NameError): + del self.model + del self.parfile + del self.pint_toas + del self.timfile + + def drop_not_picklable(self): + with contextlib.suppress(AttributeError): + del self.model + del self.pint_toas + logger.warning("pint_toas and model objects cannot be pickled and have been removed.") + + return super().drop_not_picklable() + def _set_dm(self, model): pars = [par for par in model.params if not getattr(model, par).frozen] @@ -402,12 +424,13 @@ def _set_dm(self, model): def _get_radec(self, model): if hasattr(model, "RAJ") and hasattr(model, "DECJ"): - return (model.RAJ.value, model.DECJ.value) + raj = model.RAJ.quantity.to(u.rad).value + decj = model.DECJ.quantity.to(u.rad).value + return raj, decj else: - # TODO: better way of dealing with units - d2r = np.pi / 180 - elong, elat = model.ELONG.value, model.ELAT.value - return self._get_radec_from_ecliptic(elong * d2r, elat * d2r) + elong = model.ELONG.quantity.to(u.rad).value + elat = model.ELAT.quantity.to(u.rad).value + return self._get_radec_from_ecliptic(elong, elat) def _get_ssb_lsec(self, toas, obs_planet): """Get the planet to SSB vector in lightseconds from Pint table""" @@ -455,7 +478,15 @@ def _get_sunssb(self, toas, model): class Tempo2Pulsar(BasePulsar): - def __init__(self, t2pulsar, sort=True, drop_t2pulsar=True, planets=True): + def __init__( + self, + t2pulsar, + sort=True, + drop_t2pulsar=True, + planets=True, + par_name=None, + tim_name=None, + ): self._sort = sort self.t2pulsar = t2pulsar self.planets = planets @@ -502,6 +533,15 @@ def __init__(self, t2pulsar, sort=True, drop_t2pulsar=True, planets=True): self.sort_data() if drop_t2pulsar: + self.drop_tempopsr() + else: + if par_name is not None and os.path.exists(par_name): + self.parfile = open(par_name).read() + if tim_name is not None and os.path.exists(tim_name): + self.timfile = open(tim_name).read() + + def drop_tempopsr(self): + with contextlib.suppress(NameError): del self.t2pulsar # gather DM/DMX information if available @@ -564,7 +604,7 @@ def _get_sunssb(self, t2pulsar): sunssb = None if self.planets: # for ii in range(1, 10): - # tag = 'DMASSPLANET' + str(ii) + # tag = 'DMASSPLANET' + str(ii)@pytest.mark.skipif(t2 is None, reason="TEMPO2/libstempo not available") # self.t2pulsar[tag].val = 0.0 self.t2pulsar.formbats() sunssb = np.zeros((len(self._toas), 6)) @@ -581,6 +621,12 @@ def _get_sunssb(self, t2pulsar): # then replace them with pickleable objects that can be inflated # to numpy arrays with SharedMemory storage + def drop_not_picklable(self): + with contextlib.suppress(AttributeError): + del self.t2pulsar + logger.warning("t2pulsar object cannot be pickled and has been removed.") + return super().drop_not_picklable() + _todeflate = ["_designmatrix", "_planetssb", "_sunssb", "_flags"] _deflated = "pristine" @@ -617,7 +663,9 @@ def Pulsar(*args, **kwargs): sort = kwargs.get("sort", True) drop_t2pulsar = kwargs.get("drop_t2pulsar", True) drop_pintpsr = kwargs.get("drop_pintpsr", True) - timing_package = kwargs.get("timing_package", "tempo2") + timing_package = kwargs.get("timing_package", None) + if timing_package is not None: + timing_package = timing_package.lower() if pint is not None: toas = [x for x in args if isinstance(x, TOAs)] @@ -645,31 +693,46 @@ def Pulsar(*args, **kwargs): reltimfile = timfiletup[-1] relparfile = os.path.relpath(parfile[0], dirname) + if timing_package is None: + if t2 is not None: + timing_package = "tempo2" + elif pint is not None: # pragma: no cover + timing_package = "pint" + else: # pragma: no cover + raise ValueError("No timing package available with which to load a pulsar") + # get current directory cwd = os.getcwd() - - # Change directory to the base directory of the tim-file to deal with - # INCLUDE statements in the tim-file - os.chdir(dirname) - - if timing_package.lower() == "pint": - if (clk is not None) and (bipm_version is None): - bipm_version = clk.split("(")[1][:-1] - model, toas = get_model_and_toas( - relparfile, - reltimfile, - ephem=ephem, - bipm_version=bipm_version, - planets=planets, - ) - os.chdir(cwd) - return PintPulsar(toas, model, sort=sort, drop_pintpsr=drop_pintpsr, planets=planets) - - elif timing_package.lower() == "tempo2": - # hack to set maxobs - maxobs = get_maxobs(reltimfile) + 100 - t2pulsar = t2.tempopulsar(relparfile, reltimfile, maxobs=maxobs, ephem=ephem, clk=clk) + try: + # Change directory to the base directory of the tim-file to deal with + # INCLUDE statements in the tim-file + os.chdir(dirname) + if timing_package.lower() == "tempo2": + if t2 is None: # pragma: no cover + raise ValueError("tempo2 requested but tempo2 is not available") + # hack to set maxobs + maxobs = get_maxobs(reltimfile) + 100 + t2pulsar = t2.tempopulsar(relparfile, reltimfile, maxobs=maxobs, ephem=ephem, clk=clk) + return Tempo2Pulsar( + t2pulsar, + sort=sort, + drop_t2pulsar=drop_t2pulsar, + planets=planets, + par_name=relparfile, + tim_name=reltimfile, + ) + elif timing_package.lower() == "pint": + if pint is None: # pragma: no cover + raise ValueError("PINT requested but PINT is not available") + if (clk is not None) and (bipm_version is None): + bipm_version = clk.split("(")[1][:-1] + model, toas = get_model_and_toas( + relparfile, reltimfile, ephem=ephem, bipm_version=bipm_version, planets=planets + ) + os.chdir(cwd) + return PintPulsar(toas, model, sort=sort, drop_pintpsr=drop_pintpsr, planets=planets) + else: + raise ValueError(f"Unknown timing package {timing_package}") + finally: os.chdir(cwd) - return Tempo2Pulsar(t2pulsar, sort=sort, drop_t2pulsar=drop_t2pulsar, planets=planets) - - raise ValueError("Unknown arguments {}".format(args)) + raise ValueError("Pulsar (par/tim) not specified in {args} or {kwargs}") diff --git a/enterprise/signals/gp_bases.py b/enterprise/signals/gp_bases.py index 1847e54e..f924be12 100644 --- a/enterprise/signals/gp_bases.py +++ b/enterprise/signals/gp_bases.py @@ -13,9 +13,12 @@ __all__ = [ "createfourierdesignmatrix_red", "createfourierdesignmatrix_dm", + "createfourierdesignmatrix_dm_tn", "createfourierdesignmatrix_env", "createfourierdesignmatrix_ephem", "createfourierdesignmatrix_eph", + "createfourierdesignmatrix_chromatic", + "createfourierdesignmatrix_general", ] @@ -124,6 +127,44 @@ def createfourierdesignmatrix_dm( return F * Dm[:, None], Ffreqs +@function +def createfourierdesignmatrix_dm_tn( + toas, freqs, nmodes=30, Tspan=None, pshift=False, fref=1400, logf=False, fmin=None, fmax=None, idx=2, modes=None +): + """ + Construct DM-variation fourier design matrix. Current + normalization expresses DM signal as a deviation [seconds] + at fref [MHz] + + :param toas: vector of time series in seconds + :param freqs: radio frequencies of observations [MHz] + :param nmodes: number of fourier coefficients to use + :param Tspan: option to some other Tspan + :param pshift: option to add random phase shift + :param fref: reference frequency [MHz] + :param logf: use log frequency spacing + :param fmin: lower sampling frequency + :param fmax: upper sampling frequency + :param idx: index of the radio frequency dependence + :param modes: option to provide explicit list or array of + sampling frequencies + + :return: F: DM-variation fourier design matrix + :return: f: Sampling frequencies + """ + + # get base fourier design matrix and frequencies + F, Ffreqs = createfourierdesignmatrix_red( + toas, nmodes=nmodes, Tspan=Tspan, logf=logf, fmin=fmin, fmax=fmax, pshift=pshift, modes=modes + ) + + # compute the DM-variation vectors in the temponest normalization + # amplitude normalization: sqrt(12)*pi, scaling to 1 MHz from 1400 MHz, DM constant: 2.41e-4 + Dm = (fref / freqs) ** idx * np.sqrt(12) * np.pi / 1400 / 1400 / 2.41e-4 + + return F * Dm[:, None], Ffreqs + + @function def createfourierdesignmatrix_env( toas, @@ -218,8 +259,9 @@ def createfourierdesignmatrix_eph( @function -def createfourierdesignmatrix_chromatic(toas, freqs, nmodes=30, Tspan=None, logf=False, fmin=None, fmax=None, idx=4): - +def createfourierdesignmatrix_chromatic( + toas, freqs, nmodes=30, Tspan=None, logf=False, fmin=None, fmax=None, idx=4, modes=None +): """ Construct Scattering-variation fourier design matrix. @@ -232,15 +274,82 @@ def createfourierdesignmatrix_chromatic(toas, freqs, nmodes=30, Tspan=None, logf :param fmin: lower sampling frequency :param fmax: upper sampling frequency :param idx: Index of chromatic effects + :param modes: option to provide explicit list or array of + sampling frequencies :return: F: Chromatic-variation fourier design matrix :return: f: Sampling frequencies """ # get base fourier design matrix and frequencies - F, Ffreqs = createfourierdesignmatrix_red(toas, nmodes=nmodes, Tspan=Tspan, logf=logf, fmin=fmin, fmax=fmax) + F, Ffreqs = createfourierdesignmatrix_red( + toas, nmodes=nmodes, Tspan=Tspan, logf=logf, fmin=fmin, fmax=fmax, modes=modes + ) # compute the DM-variation vectors Dm = (1400 / freqs) ** idx return F * Dm[:, None], Ffreqs + + +@function +def createfourierdesignmatrix_general( + toas, + freqs, + flags, + flagname="group", + flagval=None, + idx=None, + tndm=False, + nmodes=30, + Tspan=None, + psrTspan=True, + logf=False, + fmin=None, + fmax=None, + modes=None, + pshift=None, + pseed=None, +): + """ + Construct fourier design matrix with possibility of adding selection and/or chromatic index envelope. + + :param toas: vector of time series in seconds + :param freqs: radio frequencies of observations [MHz] + :param flags: Flags from timfiles + :param nmodes: number of fourier coefficients to use + :param Tspan: option to some other Tspan + :param psrTspan: option to use pulsar time span. Used only if sub-group of ToAs is chosen + :param logf: use log frequency spacing + :param fmin: lower sampling frequency + :param fmax: upper sampling frequency + :param log10_Amp: log10 of the Amplitude [s] + :param idx: Index of chromatic effects + :param modes: option to provide explicit list or array of + sampling frequencies + + :return: F: fourier design matrix + :return: f: Sampling frequencies + """ + if flagval and not psrTspan: + sel_toas = toas[np.where(flags[flagname] == flagval)] + Tspan = sel_toas.max() - sel_toas.min() + + # get base fourier design matrix and frequencies + F, Ffreqs = createfourierdesignmatrix_red( + toas, nmodes=nmodes, Tspan=Tspan, logf=logf, fmin=fmin, fmax=fmax, modes=modes, pshift=pshift, pseed=pseed + ) + + # compute the chromatic-variation vectors + if idx: + if tndm: + chrom_fac = (1400 / freqs) ** idx * np.sqrt(12) * np.pi / 1400 / 1400 / 2.41e-4 + else: + chrom_fac = (1400 / freqs) ** idx + F *= chrom_fac[:, None] + + # compute the mask for the selection + if flagval: + F *= np.array([flags[flagname] == flagval] * F.shape[1]).T + + return F, Ffreqs diff --git a/enterprise/signals/gp_signals.py b/enterprise/signals/gp_signals.py index 443927d9..21ad177d 100644 --- a/enterprise/signals/gp_signals.py +++ b/enterprise/signals/gp_signals.py @@ -192,6 +192,9 @@ def FourierBasisGP( components=20, selection=Selection(selections.no_selection), Tspan=None, + logf=False, + fmin=None, + fmax=None, modes=None, name="red_noise", pshift=False, @@ -200,7 +203,9 @@ def FourierBasisGP( """Convenience function to return a BasisGP class with a fourier basis.""" - basis = utils.createfourierdesignmatrix_red(nmodes=components, Tspan=Tspan, modes=modes, pshift=pshift, pseed=pseed) + basis = utils.createfourierdesignmatrix_red( + nmodes=components, Tspan=Tspan, logf=logf, fmin=fmin, fmax=fmax, modes=modes, pshift=pshift, pseed=pseed + ) BaseClass = BasisGP(spectrum, basis, coefficients=coefficients, combine=combine, selection=selection, name=name) class FourierBasisGP(BaseClass): @@ -211,24 +216,24 @@ class FourierBasisGP(BaseClass): return FourierBasisGP -def get_timing_model_basis(use_svd=False, normed=True): +def get_timing_model_basis(use_svd=False, normed=True, idx_exclude=None): if use_svd: if normed is not True: raise ValueError("use_svd == True requires normed == True") - return utils.svd_tm_basis() + return utils.svd_tm_basis(idx_exclude=idx_exclude) elif normed is True: - return utils.normed_tm_basis() + return utils.normed_tm_basis(idx_exclude=idx_exclude) elif normed is not False: - return utils.normed_tm_basis(norm=normed) + return utils.normed_tm_basis(norm=normed, idx_exclude=idx_exclude) else: - return utils.unnormed_tm_basis() + return utils.unnormed_tm_basis(idx_exclude=idx_exclude) -def TimingModel(coefficients=False, name="linear_timing_model", use_svd=False, normed=True): +def TimingModel(coefficients=False, name="linear_timing_model", use_svd=False, normed=True, idx_exclude=None): """Class factory for marginalized linear timing model signals.""" - basis = get_timing_model_basis(use_svd, normed) + basis = get_timing_model_basis(use_svd, normed, idx_exclude) prior = utils.tm_prior() BaseClass = BasisGP(prior, basis, coefficients=coefficients, name=name) @@ -413,6 +418,9 @@ def FourierBasisCommonGP( combine=True, components=20, Tspan=None, + logf=False, + fmin=None, + fmax=None, modes=None, name="common_fourier", pshift=False, @@ -424,7 +432,9 @@ def FourierBasisCommonGP( "With coefficients=True, FourierBasisCommonGP " + "requires that you specify Tspan explicitly." ) - basis = utils.createfourierdesignmatrix_red(nmodes=components, Tspan=Tspan, modes=modes, pshift=pshift, pseed=pseed) + basis = utils.createfourierdesignmatrix_red( + nmodes=components, Tspan=Tspan, logf=logf, fmin=fmin, fmax=fmax, modes=modes, pshift=pshift, pseed=pseed + ) BaseClass = BasisCommonGP(spectrum, basis, orf, coefficients=coefficients, combine=combine, name=name) class FourierBasisCommonGP(BaseClass): @@ -851,7 +861,7 @@ def solve(self, right, left_array=None, logdet=False): if right.ndim == 1 and left_array is right: res = right - rNr, logdet_N = self.Nmat.solve(res, left_array=res, logdet=logdet) + rNr, logdet_N = self.Nmat.solve(res, left_array=res, logdet=True) MNr = self.MNr(res) ret = rNr - np.dot(MNr, self.cf(MNr)) diff --git a/enterprise/signals/parameter.py b/enterprise/signals/parameter.py index 38e4a4e2..065f7f23 100644 --- a/enterprise/signals/parameter.py +++ b/enterprise/signals/parameter.py @@ -6,6 +6,7 @@ import numpy as np from scipy.special import erf as _erf +import scipy.stats as sstats from enterprise.signals.selections import selection_func @@ -28,6 +29,7 @@ def _sample(parlist, parvalues): for par in parlist: if par not in parvalues: + # RvH: Hyper pars seem to be broken # sample hyperpars for this par, skip parameter itself parvalues.update(sample(par.params[1:])) @@ -49,9 +51,15 @@ def __init__(self, name): msg = "Parameter classes need to define _prior, or _logprior." raise AttributeError(msg) + if hasattr(self, "_ppf"): + self.ppf = self._ppf(name) + else: + self.ppf = None + self.type = self.__class__.__name__.lower() def get_logpdf(self, value=None, **kwargs): + # RvH: This exception cannot be triggered if not isinstance(self, Parameter): raise TypeError("You can only call get_logpdf() on an " "instantiated (named) Parameter.") @@ -66,6 +74,7 @@ def get_logpdf(self, value=None, **kwargs): return logpdf if self._size is None else np.sum(logpdf) def get_pdf(self, value=None, **kwargs): + # RvH: This exception cannot be triggered if not isinstance(self, Parameter): raise TypeError("You can only call get_pdf() on an " "instantiated (named) Parameter.") @@ -87,6 +96,7 @@ def sample(self, **kwargs): raise AttributeError("No sampler was provided for this Parameter.") else: if self.name in kwargs: + # RvH: This exception cannot be triggered raise ValueError("You shouldn't give me my value when you're sampling me.!") if hasattr(self, "prior"): @@ -94,6 +104,15 @@ def sample(self, **kwargs): else: return self.logprior(func=self._sampler, size=self._size, **kwargs) + def get_ppf(self, value=None, **kwargs): + if self.ppf is None: + raise NotImplementedError("No ppf was implemented for this Parameter.") + + if value is None and "params" in kwargs: + value = kwargs["params"][self.name] + + return self.ppf(value, **kwargs) + @property def size(self): return self._size @@ -136,7 +155,7 @@ class GPCoefficients(Parameter): return GPCoefficients -def UserParameter(prior=None, logprior=None, sampler=None, size=None): +def UserParameter(prior=None, logprior=None, sampler=None, ppf=None, size=None): """Class factory for UserParameter, implementing Enterprise parameters with arbitrary priors. The prior is specified by way of an Enterprise ``Function`` of the form ``prior(value, [par1, par2])``. Optionally, @@ -147,6 +166,7 @@ def UserParameter(prior=None, logprior=None, sampler=None, size=None): :param prior: parameter prior pdf, given as Enterprise ``Function`` :param sampler: function returning a randomly sampled parameter according to prior + :param ppf: percentage point function (inverse cdf), for this parameter :param size: length for vector parameter :return: ``UserParameter`` class """ @@ -157,6 +177,8 @@ class UserParameter(Parameter): _prior = prior if logprior is not None: _logprior = logprior + if ppf is not None: + _ppf = ppf _sampler = None if sampler is None else staticmethod(sampler) _typename = "UserParameter" @@ -189,6 +211,12 @@ def UniformSampler(pmin, pmax, size=None): return np.random.uniform(pmin, pmax, size=size) +def UniformPPF(value, pmin, pmax): + """Percentage Point function for Uniform paramters.""" + + return sstats.uniform.ppf(value, loc=pmin, scale=pmax - pmin) + + def Uniform(pmin, pmax, size=None): """Class factory for Uniform parameters (with pdf(x) ~ 1/[pmax - pmin] inside [pmin,pmax], 0 outside. Handles vectors correctly, @@ -204,6 +232,7 @@ def Uniform(pmin, pmax, size=None): class Uniform(Parameter): _size = size _prior = Function(UniformPrior, pmin=pmin, pmax=pmax) + _ppf = Function(UniformPPF, pmin=pmin, pmax=pmax) _sampler = staticmethod(UniformSampler) _typename = _argrepr("Uniform", pmin=pmin, pmax=pmax) @@ -233,6 +262,17 @@ def NormalSampler(mu, sigma, size=None): return np.random.normal(mu, sigma, size=size) +def NormalPPF(value, mu, sigma): + """Prior function for Normal parameters. + Handles scalar mu and sigma, compatible vector value/mu/sigma, + vector value/mu and compatible covariance matrix sigma.""" + + if np.ndim(sigma) >= 2: + raise NotImplementedError("PPF not implemented when sigma is 2D") + + return sstats.norm.ppf(value, loc=mu, scale=sigma) + + def Normal(mu=0, sigma=1, size=None): """Class factory for Normal parameters (with pdf(x) ~ N(``mu``,``sigma``)). Handles vectors correctly if ``size == len(mu) == len(sigma)``, @@ -249,6 +289,7 @@ def Normal(mu=0, sigma=1, size=None): class Normal(Parameter): _size = size _prior = Function(NormalPrior, mu=mu, sigma=sigma) + _ppf = Function(NormalPPF, mu=mu, sigma=sigma) _sampler = staticmethod(NormalSampler) _typename = _argrepr("Normal", mu=mu, sigma=sigma) @@ -346,6 +387,13 @@ def LinearExpSampler(pmin, pmax, size=None): return np.log10(np.random.uniform(10**pmin, 10**pmax, size)) +def LinearExpPPF(value, pmin, pmax): + """Percentage Point function for Uniform paramters.""" + + ev = sstats.uniform.ppf(value, loc=10**pmin, scale=10**pmax - 10**pmin) + return np.log10(ev) + + def LinearExp(pmin, pmax, size=None): """Class factory for LinearExp parameters (with pdf(x) ~ 10^x, and 0 outside [``pmin``,``max``]). Handles vectors correctly @@ -361,6 +409,7 @@ def LinearExp(pmin, pmax, size=None): class LinearExp(Parameter): _size = size _prior = Function(LinearExpPrior, pmin=pmin, pmax=pmax) + _ppf = Function(LinearExpPPF, pmin=pmin, pmax=pmax) _sampler = staticmethod(LinearExpSampler) _typename = _argrepr("LinearExp", pmin=pmin, pmax=pmax) @@ -439,7 +488,6 @@ def __init__(self, name, psr=None): for kw, arg in self.func_kwargs.items(): if isinstance(arg, type) and issubclass(arg, (Parameter, ConstantParameter)): - # parameter name template: # pname_[signalname_][fname_]parname pnames = [name, fname, kw] @@ -582,7 +630,6 @@ def wrapper(*args, **kwargs): and issubclass(arg, FunctionBase) or isinstance(arg, FunctionBase) ): - return Function(func, **kwargs) # otherwise, we simply call the function diff --git a/enterprise/signals/selections.py b/enterprise/signals/selections.py index c701e7b0..0b4f1e73 100644 --- a/enterprise/signals/selections.py +++ b/enterprise/signals/selections.py @@ -103,6 +103,24 @@ def by_band(flags): return {val: flags["B"] == val for val in flagvals} +def by_freq_band(bands=None): + def backends(freqs): + """Selection function to split by EPTA and custom frequency band values + bands: dict of bands and ranges + default recovers EPTA freq bands + """ + nonlocal bands + if isinstance(bands, dict): + pass + else: + bands = {"Band.1": [0, 1000], "Band.2": [1000, 2000], "Band.3": [2000, 3000], "Band.4": [3000, 10000]} + return { + val: (freqs >= fl) & (freqs < fh) for val, (fl, fh) in bands.items() if any((freqs >= fl) & (freqs < fh)) + } + + return backends + + def by_frontend(flags): """Selection function to split by frontend under -fe flag""" flagvals = np.unique(flags["fe"]) @@ -118,7 +136,7 @@ def by_backend(backend_flags): def nanograv_backends(backend_flags): """Selection function to split by NANOGRav backend flags only.""" flagvals = np.unique(backend_flags) - ngb = ["ASP", "GASP", "GUPPI", "PUPPI", "YUPPI"] + ngb = ["ASP", "GASP", "GUPPI", "PUPPI", "YUPPI", "CHIME", "VEGAS"] flagvals = [val for val in flagvals if any([b in val for b in ngb])] return {val: backend_flags == val for val in flagvals} @@ -129,6 +147,75 @@ def by_telescope(telescope): return {t: (telescope == t) for t in telescopes} +def by_index(name, idx): + def indexvals(toas): + """Selection function to split by ToA index values""" + return {name: np.isin(np.arange(len(toas)), idx)} + + return indexvals + + def no_selection(toas): """Default selection with no splitting.""" return {"": np.ones_like(toas, dtype=bool)} + + +def custom_backends(cb): + def backends(backend_flags): + """Selection function to split by custom backend flags only. + cb : list of str of the backends + use None to recover by_backend + use ["ASP", "GASP", "GUPPI", "PUPPI", "YUPPI"] to recover nanograv_backends + """ + nonlocal cb + flagvals = np.unique(backend_flags) + if cb is not None: + cb = list(np.atleast_1d(cb)) + flagvals = [val for val in flagvals if any([b in val for b in cb])] + else: + pass + return {val: backend_flags == val for val in flagvals} + + return backends + + +def custom_backends_dict(cb): + def backends(backend_flags, flags, toas): + """Selection function to split by custom flags dictionary only. + cb : str, list or dict of flags and names + use None to recover no_selection + use {"B":None} to recover by_band + use {"fe":None} to recover by_frontend + use {"backend":None} to recover by_backend + use {"backend":["ASP", "GASP", "GUPPI", "PUPPI", "YUPPI"]} to recover nanograv_backends + """ + nonlocal cb + if isinstance(cb, str) or isinstance(cb, list): + flagvals = np.unique(backend_flags) + cb = list(np.atleast_1d(cb)) + flagvals = [val for val in flagvals if any([b in val for b in cb])] + return {val: backend_flags == val for val in flagvals} + elif isinstance(cb, dict): + flagdict = {} + for flagname in cb.keys(): + if flagname == "backend": + flagvals = np.unique(backend_flags) + if cb["backend"] is not None: + cb_key = list(np.atleast_1d(cb["backend"])) + flagvals = [val for val in flagvals if any([b in val for b in cb_key])] + else: + pass + flagdict.update({val: backend_flags == val for val in flagvals}) + else: + flagvals = np.unique(flags[flagname]) + if cb[flagname] is not None: + cb_key = list(np.atleast_1d(cb[flagname])) + flagvals = [val for val in flagvals if any([b in val for b in cb_key])] + else: + pass + flagdict.update({val: flags[flagname] == val for val in flagvals}) + return flagdict + else: + return {"": np.ones_like(toas, dtype=bool)} + + return backends diff --git a/enterprise/signals/signal_base.py b/enterprise/signals/signal_base.py index e0fd3d32..5070bbb3 100644 --- a/enterprise/signals/signal_base.py +++ b/enterprise/signals/signal_base.py @@ -25,6 +25,7 @@ from enterprise.signals.parameter import function # noqa: F401 from enterprise.signals.parameter import ConstantParameter from enterprise.signals.utils import KernelMatrix +from enterprise.signals.utils import indices_from_slice from enterprise import __version__ from sys import version @@ -220,9 +221,17 @@ def __call__(self, xs, phiinv_method="cliques"): if self.cholesky_sparse: try: - cf = cholesky(TNT + sps.csc_matrix(phiinv)) # cf(Sigma) - expval = cf(TNr) - logdet_sigma = cf.logdet() + Sigma_sp = TNT + sps.csc_matrix(phiinv) + + if hasattr(self, "cf_sp"): + # Have analytical decomposition already. Just do update + self.cf_sp.cholesky_inplace(Sigma_sp) + else: + # Do analytical and numerical Sparse Cholesky + self.cf_sp = cholesky(Sigma_sp) + + expval = self.cf_sp(TNr) + logdet_sigma = self.cf_sp.logdet() except CholmodError: # pragma: no cover return -np.inf else: @@ -734,6 +743,9 @@ def summary(self, include_params=True, to_stdout=False): cpcount += 1 row = [sig.name, sig.__class__.__name__, len(sig.param_names)] summary += "{: <40} {: <30} {: <20}\n".format(*row) + if "BasisGP" in sig.__class__.__name__: + summary += "\nBasis shape (Ntoas x N basis functions): {}".format(str(sig.get_basis().shape)) + summary += "\nN selected toas: {}\n".format(str(len([i for i in sig._masks[0] if i]))) if include_params: summary += "\n" summary += "params:\n" @@ -1118,6 +1130,7 @@ class BlockMatrix(object): def __init__(self, blocks, slices, nvec=0): self._blocks = blocks self._slices = slices + self._idxs = [indices_from_slice(slc) for slc in slices] self._nvec = nvec if np.any(nvec != 0): @@ -1152,15 +1165,15 @@ def _solve_ZNX(self, X, Z): ZNXr = np.dot(Z[self._idx, :].T, X[self._idx, :] / self._nvec[self._idx, None]) else: ZNXr = 0 - for slc, block in zip(self._slices, self._blocks): - Zblock = Z[slc, :] - Xblock = X[slc, :] + for idx, block in zip(self._idxs, self._blocks): + Zblock = Z[idx, :] + Xblock = X[idx, :] - if slc.stop - slc.start > 1: - cf = sl.cho_factor(block + np.diag(self._nvec[slc])) + if len(idx) > 1: + cf = sl.cho_factor(block + np.diag(self._nvec[idx])) bx = sl.cho_solve(cf, Xblock) else: - bx = Xblock / self._nvec[slc][:, None] + bx = Xblock / self._nvec[idx][:, None] ZNX += np.dot(Zblock.T, bx) ZNX += ZNXr return ZNX.squeeze() if len(ZNX) > 1 else float(ZNX) @@ -1173,11 +1186,11 @@ def _solve_NX(self, X): X = X.reshape(X.shape[0], 1) NX = X / self._nvec[:, None] - for slc, block in zip(self._slices, self._blocks): - Xblock = X[slc, :] - if slc.stop - slc.start > 1: - cf = sl.cho_factor(block + np.diag(self._nvec[slc])) - NX[slc] = sl.cho_solve(cf, Xblock) + for idx, block in zip(self._idxs, self._blocks): + Xblock = X[idx, :] + if len(idx) > 1: + cf = sl.cho_factor(block + np.diag(self._nvec[idx])) + NX[idx] = sl.cho_solve(cf, Xblock) return NX.squeeze() def _get_logdet(self): @@ -1188,12 +1201,12 @@ def _get_logdet(self): logdet = np.sum(np.log(self._nvec[self._idx])) else: logdet = 0 - for slc, block in zip(self._slices, self._blocks): - if slc.stop - slc.start > 1: - cf = sl.cho_factor(block + np.diag(self._nvec[slc])) + for idx, block in zip(self._idxs, self._blocks): + if len(idx) > 1: + cf = sl.cho_factor(block + np.diag(self._nvec[idx])) logdet += np.sum(2 * np.log(np.diag(cf[0]))) else: - logdet += np.sum(np.log(self._nvec[slc])) + logdet += np.sum(np.log(self._nvec[idx])) return logdet def solve(self, other, left_array=None, logdet=False): @@ -1218,6 +1231,7 @@ class ShermanMorrison(object): def __init__(self, jvec, slices, nvec=0.0): self._jvec = jvec self._slices = slices + self._idxs = [indices_from_slice(slc) for slc in slices] self._nvec = nvec def __add__(self, other): @@ -1235,12 +1249,12 @@ def _solve_D1(self, x): """Solves :math:`N^{-1}x` where :math:`x` is a vector.""" Nx = x / self._nvec - for slc, jv in zip(self._slices, self._jvec): - if slc.stop - slc.start > 1: - rblock = x[slc] - niblock = 1 / self._nvec[slc] + for idx, jv in zip(self._idxs, self._jvec): + if len(idx) > 1: + rblock = x[idx] + niblock = 1 / self._nvec[idx] beta = 1.0 / (np.einsum("i->", niblock) + 1.0 / jv) - Nx[slc] -= beta * np.dot(niblock, rblock) * niblock + Nx[idx] -= beta * np.dot(niblock, rblock) * niblock return Nx def _solve_1D1(self, x, y): @@ -1250,11 +1264,11 @@ def _solve_1D1(self, x, y): Nx = x / self._nvec yNx = np.dot(y, Nx) - for slc, jv in zip(self._slices, self._jvec): - if slc.stop - slc.start > 1: - xblock = x[slc] - yblock = y[slc] - niblock = 1 / self._nvec[slc] + for idx, jv in zip(self._idxs, self._jvec): + if len(idx) > 1: + xblock = x[idx] + yblock = y[idx] + niblock = 1 / self._nvec[idx] beta = 1.0 / (np.einsum("i->", niblock) + 1.0 / jv) yNx -= beta * np.dot(niblock, xblock) * np.dot(niblock, yblock) return yNx @@ -1265,11 +1279,11 @@ def _solve_2D2(self, X, Z): """ ZNX = np.dot(Z.T / self._nvec, X) - for slc, jv in zip(self._slices, self._jvec): - if slc.stop - slc.start > 1: - Zblock = Z[slc, :] - Xblock = X[slc, :] - niblock = 1 / self._nvec[slc] + for idx, jv in zip(self._idxs, self._jvec): + if len(idx) > 1: + Zblock = Z[idx, :] + Xblock = X[idx, :] + niblock = 1 / self._nvec[idx] beta = 1.0 / (np.einsum("i->", niblock) + 1.0 / jv) zn = np.dot(niblock, Zblock) xn = np.dot(niblock, Xblock) @@ -1281,9 +1295,9 @@ def _get_logdet(self): is a quantization matrix. """ logdet = np.einsum("i->", np.log(self._nvec)) - for slc, jv in zip(self._slices, self._jvec): - if slc.stop - slc.start > 1: - niblock = 1 / self._nvec[slc] + for idx, jv in zip(self._idxs, self._jvec): + if len(idx) > 1: + niblock = 1 / self._nvec[idx] beta = 1.0 / (np.einsum("i->", niblock) + 1.0 / jv) logdet += np.log(jv) - np.log(beta) return logdet diff --git a/enterprise/signals/utils.py b/enterprise/signals/utils.py index df949af0..d896de37 100644 --- a/enterprise/signals/utils.py +++ b/enterprise/signals/utils.py @@ -19,11 +19,14 @@ from enterprise import constants as const from enterprise import signals as sigs # noqa: F401 from enterprise.signals.gp_bases import ( # noqa: F401 + createfourierdesignmatrix_red, createfourierdesignmatrix_dm, + createfourierdesignmatrix_dm_tn, createfourierdesignmatrix_env, - createfourierdesignmatrix_eph, createfourierdesignmatrix_ephem, - createfourierdesignmatrix_red, + createfourierdesignmatrix_eph, + createfourierdesignmatrix_chromatic, + createfourierdesignmatrix_general, ) from enterprise.signals.gp_priors import powerlaw, turnover # noqa: F401 from enterprise.signals.parameter import function @@ -322,7 +325,6 @@ def create_stabletimingdesignmatrix(designmat, fastDesign=True): def make_ecc_interpolant(): - """ Make interpolation function from eccentricity file to determine number of harmonics to use for a given @@ -339,7 +341,6 @@ def make_ecc_interpolant(): def get_edot(F, mc, e): - """ Compute eccentricity derivative from Taylor et al. (2016) @@ -767,26 +768,37 @@ def create_quantization_matrix(toas, dt=1, nmin=2): return U, weights -def quant2ind(U): +def quant2ind(U, as_slice=False): """ - Use quantization matrix to return slices of non-zero elements. + Use quantization matrix to return indices of non-zero elements. :param U: quantization matrix + :param as_slice: whether to return a slice object - :return: list of `slice`s for non-zero elements of U + :return: list of `slice`s or indices for non-zero elements of U - .. note:: This function assumes that the pulsar TOAs were sorted by time. + .. note:: For slice objects the TOAs need to be sorted by time """ inds = [] for cc, col in enumerate(U.T): epinds = np.flatnonzero(col) - if epinds[-1] - epinds[0] + 1 != len(epinds): - raise ValueError("ERROR: TOAs not sorted properly!") - inds.append(slice(epinds[0], epinds[-1] + 1)) + if epinds[-1] - epinds[0] + 1 != len(epinds) or not as_slice: + inds.append(epinds) + else: + inds.append(slice(epinds[0], epinds[-1] + 1)) return inds +def indices_from_slice(slc): + """Given a slice object, return an index arrays""" + + if isinstance(slc, np.ndarray): + return slc + else: + return np.arange(*slc.indices(slc.stop)) + + def linear_interp_basis(toas, dt=30 * 86400): """Provides a basis for linear interpolation. @@ -863,12 +875,19 @@ def anis_orf(pos1, pos2, params, **kwargs): @function -def unnormed_tm_basis(Mmat): +def unnormed_tm_basis(Mmat, idx_exclude=None): + if idx_exclude: + idxs = np.array([i for i in range(Mmat.shape[1]) if i not in idx_exclude]) + Mmat = Mmat[:, idxs] return Mmat, np.ones_like(Mmat.shape[1]) @function -def normed_tm_basis(Mmat, norm=None): +def normed_tm_basis(Mmat, norm=None, idx_exclude=None): + if idx_exclude: + idxs = np.array([i for i in range(Mmat.shape[1]) if i not in idx_exclude]) + Mmat = Mmat[:, idxs] + if norm is None: norm = np.sqrt(np.sum(Mmat**2, axis=0)) @@ -879,7 +898,11 @@ def normed_tm_basis(Mmat, norm=None): @function -def svd_tm_basis(Mmat): +def svd_tm_basis(Mmat, idx_exclude=None): + if idx_exclude: + idxs = np.array([i for i in range(Mmat.shape[1]) if i not in idx_exclude]) + Mmat = Mmat[:, idxs] + u, s, v = np.linalg.svd(Mmat, full_matrices=False) return u, np.ones_like(s) diff --git a/enterprise/signals/white_signals.py b/enterprise/signals/white_signals.py index a7d794b0..9c318b8e 100644 --- a/enterprise/signals/white_signals.py +++ b/enterprise/signals/white_signals.py @@ -6,10 +6,23 @@ import numpy as np import scipy.sparse +import logging from enterprise.signals import parameter, selections, signal_base, utils from enterprise.signals.parameter import function from enterprise.signals.selections import Selection +from enterprise.signals.utils import indices_from_slice + +try: + import fastshermanmorrison.fastshermanmorrison as fastshermanmorrison + + fsm_warning_issued = False +except ImportError: # pragma: no cover + fastshermanmorrison = None + fsm_warning_issued = False + +# logging.basicConfig(format="%(levelname)s: %(name)s: %(message)s", level=logging.INFO) +logger = logging.getLogger(__name__) def WhiteNoise(varianceFunction, selection=Selection(selections.no_selection), name=""): @@ -114,7 +127,7 @@ def EquadNoise(*args, **kwargs): def EcorrKernelNoise( log10_ecorr=parameter.Uniform(-10, -5), selection=Selection(selections.no_selection), - method="sherman-morrison", + method="fast-sherman-morrison", name="", ): r"""Class factory for ECORR type noise. @@ -123,7 +136,8 @@ def EcorrKernelNoise( :param selection: ``Selection`` object specifying masks for backends, time segments, etc. :param method: Method for computing noise covariance matrix. - Options include `sherman-morrison`, `sparse`, and `block` + Options include `fast-sherman-morrison`, `sherman-morrison`, `sparse`, + and `block` :return: ``EcorrKernelNoise`` class. @@ -140,6 +154,12 @@ def EcorrKernelNoise( In this signal implementation we offer three methods of performing these matrix operations: + fast-sherman-morrison + Uses the `Sherman-Morrison`_ forumla to compute the matrix + inverse and other matrix operations. **Note:** This method can only + be used for covariances that make up ECorrKernelNoise, :math:`uv^T`. + This version is Cython optimized. + sherman-morrison Uses the `Sherman-Morrison`_ forumla to compute the matrix inverse and other matrix operations. **Note:** This method can only @@ -166,10 +186,17 @@ def EcorrKernelNoise( """ - if method not in ["sherman-morrison", "block", "sparse"]: + global fsm_warning_issued + + if method not in ["fast-sherman-morrison", "sherman-morrison", "block", "sparse"]: msg = "EcorrKernelNoise does not support method: {}".format(method) raise TypeError(msg) + if method == "fast-sherman-morrison" and fastshermanmorrison is None and not fsm_warning_issued: # pragma: no cover + msg = "Package `fastshermanmorrison` not installed. Fallback to sherman-morrison" + logger.warning(msg) + fsm_warning_issued = True + class EcorrKernelNoise(signal_base.Signal): signal_type = "white noise" signal_name = "ecorr_" + method @@ -191,6 +218,7 @@ def __init__(self, psr): nepoch = sum(U.shape[1] for U in Umats) U = np.zeros((len(psr.toas), nepoch)) self._slices = {} + self._idxs = {} netot = 0 for ct, (key, mask) in enumerate(zip(keys, masks)): nn = Umats[ct].shape[1] @@ -198,6 +226,10 @@ def __init__(self, psr): self._slices.update({key: utils.quant2ind(U[:, netot : nn + netot])}) netot += nn + self._idxs.update( + {key: [indices_from_slice(slc) for slc in slices] for (key, slices) in self._slices.items()} + ) + # initialize sparse matrix self._setup(psr) @@ -210,6 +242,11 @@ def ndiag_params(self): def get_ndiag(self, params): if method == "sherman-morrison": return self._get_ndiag_sherman_morrison(params) + elif method == "fast-sherman-morrison": + if fastshermanmorrison: + return self._get_ndiag_fast_sherman_morrison(params) + else: # pragma: no cover + return self._get_ndiag_sherman_morrison(params) elif method == "sparse": return self._get_ndiag_sparse(params) elif method == "block": @@ -221,39 +258,40 @@ def _setup(self, psr): def _setup_sparse(self, psr): Ns = scipy.sparse.csc_matrix((len(psr.toas), len(psr.toas))) - for key, slices in self._slices.items(): - for slc in slices: - if slc.stop - slc.start > 1: - Ns[slc, slc] = 1.0 + for key, idxs in self._idxs.items(): + for idx in idxs: + if len(idx) > 1: + Ns[np.ix_(idx, idx)] = 1.0 self._Ns = signal_base.csc_matrix_alt(Ns) def _get_ndiag_sparse(self, params): for p in self._params: - for slc in self._slices[p]: - if slc.stop - slc.start > 1: - self._Ns[slc, slc] = 10 ** (2 * self.get(p, params)) + for idx in self._idxs[p]: + if len(idx) > 1: + self._Ns[np.ix_(idx, idx)] = 10 ** (2 * self.get(p, params)) return self._Ns def _get_ndiag_sherman_morrison(self, params): slices, jvec = self._get_jvecs(params) return signal_base.ShermanMorrison(jvec, slices) - def _get_ndiag_block(self, params): + def _get_ndiag_fast_sherman_morrison(self, params): slices, jvec = self._get_jvecs(params) + return fastshermanmorrison.FastShermanMorrison(jvec, slices) + + def _get_ndiag_block(self, params): + idxs, jvec = self._get_jvecs(params) blocks = [] - for jv, slc in zip(jvec, slices): - nb = slc.stop - slc.start + for jv, idx in zip(jvec, idxs): + nb = len(idx) blocks.append(np.ones((nb, nb)) * jv) - return signal_base.BlockMatrix(blocks, slices) + return signal_base.BlockMatrix(blocks, idxs) def _get_jvecs(self, params): - slices = sum([self._slices[key] for key in sorted(self._slices.keys())], []) + idxs = sum([self._idxs[key] for key in sorted(self._idxs.keys())], []) jvec = np.concatenate( - [ - np.ones(len(self._slices[key])) * 10 ** (2 * self.get(key, params)) - for key in sorted(self._slices.keys()) - ] + [np.ones(len(self._idxs[key])) * 10 ** (2 * self.get(key, params)) for key in sorted(self._idxs.keys())] ) - return (slices, jvec) + return (idxs, jvec) return EcorrKernelNoise diff --git a/requirements_dev.txt b/requirements_dev.txt index 73ae670a..e3ba1966 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -14,4 +14,5 @@ sphinx-rtd-theme>=0.4.0 pytest-cov>=2.7.0 coverage-conditional-plugin>=0.4.0 jupyter>=1.0.0 -build==0.3.1.post1 \ No newline at end of file +build==0.3.1.post1 +fastshermanmorrison-pulsar>=0.4.0 diff --git a/setup.py b/setup.py index 2e5993a3..a1625c05 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ package_dir={"enterprise": "enterprise"}, include_package_data=True, package_data={"enterprise": ["datafiles/*", "datafiles/ephemeris/*", "datafiles/ng9/*", "datafiles/mdc_open1/*"]}, - python_requires=">=3.7, <3.11", + python_requires=">=3.8, <3.13", install_requires=requirements, license="MIT license", zip_safe=False, @@ -42,10 +42,11 @@ "License :: OSI Approved :: MIT License", "Natural Language :: English", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Topic :: Scientific/Engineering :: Astronomy", "Topic :: Scientific/Engineering :: Physics", ], diff --git a/tests/test_likelihood.py b/tests/test_likelihood.py index 10449010..61eff926 100644 --- a/tests/test_likelihood.py +++ b/tests/test_likelihood.py @@ -326,6 +326,43 @@ def test_compare_ecorr_likelihood(self): msg = "Likelihood mismatch between ECORR methods" assert np.allclose(l1, l2), msg + def test_like_sparse_cache(self): + """Test likelihood with sparse Cholesky caching""" + + # find the maximum time span to set GW frequency sampling + tmin = [p.toas.min() for p in self.psrs] + tmax = [p.toas.max() for p in self.psrs] + Tspan = np.max(tmax) - np.min(tmin) + + # setup basic model + efac = parameter.Constant(1.0) + log10_A = parameter.Constant(-15.0) + gamma = parameter.Constant(4.33) + + ef = white_signals.MeasurementNoise(efac) + pl = utils.powerlaw(log10_A=log10_A, gamma=gamma) + + orf = utils.hd_orf() + crn = gp_signals.FourierBasisCommonGP(pl, orf, components=20, name="GW", Tspan=Tspan) + + tm = gp_signals.TimingModel() + m = tm + ef + crn + + # Two identical arrays that we'll compare with two sets of parameters + pta1 = signal_base.PTA([m(p) for p in self.psrs]) + pta2 = signal_base.PTA([m(p) for p in self.psrs]) + + params_init = parameter.sample(pta1.params) + params_check = parameter.sample(pta1.params) + + # First call for pta1 only initializes the sparse decomposition. Second one uses it + _ = pta1.get_lnlikelihood(params_init) + l1 = pta1.get_lnlikelihood(params_check) + l2 = pta2.get_lnlikelihood(params_check) + + msg = "Likelihood mismatch between sparse Cholesky full & inplace" + assert np.allclose(l1, l2), msg + class TestLikelihoodPint(TestLikelihood): @classmethod diff --git a/tests/test_parameter.py b/tests/test_parameter.py index b706b834..c549ea27 100644 --- a/tests/test_parameter.py +++ b/tests/test_parameter.py @@ -13,10 +13,53 @@ import numpy as np import scipy.stats -from enterprise.signals.parameter import UniformPrior, UniformSampler, Uniform -from enterprise.signals.parameter import NormalPrior, NormalSampler, Normal +from enterprise.signals.parameter import Parameter, UserParameter, Function +from enterprise.signals.parameter import UniformPrior, UniformSampler, Uniform, UniformPPF +from enterprise.signals.parameter import NormalPrior, NormalSampler, Normal, NormalPPF from enterprise.signals.parameter import TruncNormalPrior, TruncNormalSampler, TruncNormal -from enterprise.signals.parameter import LinearExpPrior, LinearExpSampler +from enterprise.signals.parameter import LinearExpPrior, LinearExpSampler, LinearExpPPF, LinearExp + + +class TestParameterExceptions(unittest.TestCase): + def test_missing_prior_attribute_error(self): + class MissingPriorParameter(Parameter): + pass # Do not define _prior or _logprior + + with self.assertRaises(AttributeError): + MissingPriorParameter("test") + + def test_methods_called_on_class_type_error(self): + UniformClass = Uniform(pmin=0, pmax=1) + with self.assertRaises(TypeError): + UniformClass.get_logpdf() + + def test_missing_sampler_attribute_error(self): + class MissingSamplerParameter(Parameter): + _prior = staticmethod(lambda x: x) + + def __init__(self, name): + super().__init__(name) + self._sampler = None + + missing_sampler_param = MissingSamplerParameter("test") + with self.assertRaises(AttributeError): + missing_sampler_param.sample() + + def test_missing_ppf_not_implemented_error(self): + class MissingPPFParameter(Parameter): + _prior = staticmethod(lambda x: x) + + def __init__(self, name): + super().__init__(name) + self.ppf = None + + missing_ppf_param = MissingPPFParameter("test") + with self.assertRaises(NotImplementedError): + missing_ppf_param.get_ppf() + + def test_2D_NormalPPF_error(self): + with self.assertRaises(NotImplementedError): + NormalPPF(0.0, 1.0, np.array([[1.0, 1.0], [1.0, 1.0]])) class TestParameter(unittest.TestCase): @@ -35,6 +78,16 @@ def test_uniform(self): assert p_min < x1 < p_max, msg2 assert type(x1) == float, msg2 + msg3 = "Enterprise and scipy PPF do not match" + assert np.allclose(UniformPPF(x, p_min, p_max), scipy.stats.uniform.ppf(x, p_min, p_max - p_min)), msg3 + + # As parameter dictionary or value for Uniform instantiated object + unipar = Uniform(pmin=p_min, pmax=p_max)("testpar") + assert np.allclose( + unipar.get_ppf(params=dict(testpar=x)), scipy.stats.uniform.ppf(x, p_min, p_max - p_min) + ), msg3 + assert np.allclose(unipar.get_ppf(x), scipy.stats.uniform.ppf(x, p_min, p_max - p_min)), msg3 + # vector argument x = np.array([0.5, 0.1]) assert np.allclose(UniformPrior(x, p_min, p_max), scipy.stats.uniform.pdf(x, p_min, p_max - p_min)), msg1 @@ -43,9 +96,13 @@ def test_uniform(self): assert np.all((p_min < x1) & (x1 < p_max)), msg2 assert x1.shape == (3,), msg2 + # vector argument + assert np.allclose(UniformPPF(x, p_min, p_max), scipy.stats.uniform.ppf(x, p_min, p_max - p_min)), msg3 + # vector bounds p_min, p_max = np.array([0.2, 0.3]), np.array([1.1, 1.2]) assert np.allclose(UniformPrior(x, p_min, p_max), scipy.stats.uniform.pdf(x, p_min, p_max - p_min)), msg1 + assert np.allclose(UniformPPF(x, p_min, p_max), scipy.stats.uniform.ppf(x, p_min, p_max - p_min)), msg3 x1 = UniformSampler(p_min, p_max) assert np.all((p_min < x1) & (x1 < p_max)), msg2 @@ -55,6 +112,33 @@ def test_uniform(self): assert np.all((p_min < x1) & (x1 < p_max)), msg2 assert x1.shape == (3, 2), msg2 + def test_userparameter(self): + """Test User-defined parameter prior, sampler, and ppf""" + + # scalar + p_min, p_max = 0.2, 1.1 + x = 0.5 + + # As parameter dictionary or value for Uniform instantiated object + unipar = Uniform(pmin=p_min, pmax=p_max)("testpar") + unipar = UserParameter( + prior=Function(UniformPrior, pmin=p_min, pmax=p_max), + sampler=staticmethod(UniformSampler), + ppf=Function(UniformPPF, pmin=p_min, pmax=p_max), + )("testpar") + + msg1 = "Enterprise and scipy prior do not match" + assert np.allclose( + unipar.get_pdf(params=dict(testpar=x)), scipy.stats.uniform.pdf(x, p_min, p_max - p_min) + ), msg1 + assert np.allclose(unipar.get_pdf(x), scipy.stats.uniform.pdf(x, p_min, p_max - p_min)), msg1 + + msg2 = "Enterprise and scipy PPF do not match" + assert np.allclose( + unipar.get_ppf(params=dict(testpar=x)), scipy.stats.uniform.ppf(x, p_min, p_max - p_min) + ), msg2 + assert np.allclose(unipar.get_ppf(x), scipy.stats.uniform.ppf(x, p_min, p_max - p_min)), msg2 + def test_linearexp(self): """Test LinearExp parameter prior and sampler.""" @@ -68,6 +152,19 @@ def test_linearexp(self): msg1b = "Scalar sampler out of range" assert p_min <= x <= p_max, msg1b + msg1c = "Scalar PPF does not match" + x = 0.5 + assert np.allclose( + LinearExpPPF(x, p_min, p_max), np.log10(10**p_min + x * (10**p_max - 10**p_min)) + ), msg1c + + # As parameter dictionary or value for Uniform instantiated object + lepar = LinearExp(pmin=p_min, pmax=p_max)("testpar") + assert np.allclose( + lepar.get_ppf(params=dict(testpar=x)), np.log10(10**p_min + x * (10**p_max - 10**p_min)) + ), msg1 + assert np.allclose(lepar.get_ppf(x), np.log10(10**p_min + x * (10**p_max - 10**p_min))), msg1 + # vector argument x = np.array([0, 1.5, 2.5]) msg2 = "Vector-argument prior does not match" @@ -79,6 +176,12 @@ def test_linearexp(self): msg2b = "Vector-argument sampler out of range" assert np.all((p_min < x) & (x < p_max)), msg2b + x = np.array([0.5, 0.75]) + msg2c = "Vector-argument PPF does not match" + assert np.allclose( + LinearExpPPF(x, p_min, p_max), np.log10(10**p_min + x * (10**p_max - 10**p_min)) + ), msg2c + # vector bounds p_min, p_max = np.array([0, 1]), np.array([2, 3]) x = np.array([1, 2]) @@ -88,6 +191,14 @@ def test_linearexp(self): np.array([10**1 / (10**2 - 10**0), 10**2 / (10**3 - 10**1)]) * np.log(10), ), msg3 + # Vector PPF + x = np.array([0.5, 0.75]) + p_min, p_max = np.array([0, 1]), np.array([2, 3]) + msg3c = "Vector-argument PPF+bounds does not match" + assert np.allclose( + LinearExpPPF(x, p_min, p_max), np.log10(10**p_min + x * (10**p_max - 10**p_min)) + ), msg3c + def test_normal(self): """Test Normal parameter prior and sampler for various combinations of scalar and vector arguments.""" @@ -105,6 +216,9 @@ def test_normal(self): # this should almost never fail assert -5 < (x1 - mu) / sigma < 5, msg2 + msg3 = "Enterprise and scipy PPF do not match" + assert np.allclose(NormalPPF(x, mu, sigma), scipy.stats.norm.ppf(x, loc=mu, scale=sigma)), msg3 + # vector argument x = np.array([-0.2, 0.1, 0.5]) @@ -118,6 +232,9 @@ def test_normal(self): ) assert x1.shape == x2.shape, msg2 + x = np.array([0.1, 0.25, 0.65]) + assert np.allclose(NormalPPF(x, mu, sigma), scipy.stats.norm.ppf(x, loc=mu, scale=sigma)), msg3 + # vector bounds; note the different semantics from `NormalPrior`, # which returns a vector consistently with `UniformPrior` mu, sigma = np.array([0.1, 0.15, 0.2]), np.array([2, 1, 2]) @@ -199,4 +316,4 @@ def test_metaparam(self): paramA = TruncNormal(mu, sigma, pmin, pmax)("A") xs = np.array([-3.5, 3.5]) - assert np.alltrue(paramA.get_pdf(xs, mu=mu.sample()) == zeros), msg4 + assert np.all(paramA.get_pdf(xs, mu=mu.sample()) == zeros), msg4 diff --git a/tests/test_pulsar.py b/tests/test_pulsar.py index 6ab617e3..61aba402 100644 --- a/tests/test_pulsar.py +++ b/tests/test_pulsar.py @@ -25,18 +25,49 @@ from pint.models import get_model_and_toas +class TestTimingPackageExceptions(unittest.TestCase): + def test_unkown_timing_package(self): + # initialize Pulsar class + with self.assertRaises(ValueError): + self.psr = Pulsar( + datadir + "/B1855+09_NANOGrav_9yv1.gls.par", + datadir + "/B1855+09_NANOGrav_9yv1.tim", + timing_package="foobar", + ) + + def test_clk_but_no_bipm(self): + self.psr = Pulsar( + datadir + "/B1855+09_NANOGrav_9yv1.gls.par", + datadir + "/B1855+09_NANOGrav_9yv1.tim", + clk="TT(BIPM2020)", + timing_package="pint", + ) + + class TestPulsar(unittest.TestCase): @classmethod def setUpClass(cls): """Setup the Pulsar object.""" # initialize Pulsar class - cls.psr = Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim") + cls.psr = Pulsar( + datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim", drop_t2pulsar=True + ) @classmethod def tearDownClass(cls): shutil.rmtree("pickle_dir", ignore_errors=True) + def test_droppsr(self): + self.psr_nodrop = Pulsar( + datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim", drop_t2pulsar=False + ) + + self.psr_nodrop.drop_tempopsr() + + with self.assertRaises(AttributeError): + _ = self.psr.t2pulsar + def test_residuals(self): """Check Residual shape.""" @@ -195,6 +226,15 @@ def setUpClass(cls): # initialize Pulsar class cls.psr = Pulsar( + datadir + "/B1855+09_NANOGrav_9yv1.gls.par", + datadir + "/B1855+09_NANOGrav_9yv1.tim", + ephem="DE430", + drop_pintpsr=True, + timing_package="pint", + ) + + def test_droppsr(self): + self.psr_nodrop = Pulsar( datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim", ephem="DE430", @@ -202,6 +242,37 @@ def setUpClass(cls): timing_package="pint", ) + self.psr_nodrop.drop_pintpsr() + + with self.assertRaises(AttributeError): + _ = self.psr_nodrop.model + + with self.assertRaises(AttributeError): + _ = self.psr_nodrop.parfile + + with self.assertRaises(AttributeError): + _ = self.psr_nodrop.pint_toas + + with self.assertRaises(AttributeError): + _ = self.psr_nodrop.timfile + + def test_drop_not_picklable(self): + self.psr_nodrop = Pulsar( + datadir + "/B1855+09_NANOGrav_9yv1.gls.par", + datadir + "/B1855+09_NANOGrav_9yv1.tim", + ephem="DE430", + drop_pintpsr=False, + timing_package="pint", + ) + + self.psr_nodrop.drop_not_picklable() + + with self.assertRaises(AttributeError): + _ = self.psr_nodrop.model + + with self.assertRaises(AttributeError): + _ = self.psr_nodrop.pint_toas + def test_deflate_inflate(self): pass @@ -218,6 +289,21 @@ def test_load_radec_psr(self): timing_package="pint", ) + def test_load_radec_psr_mdc(self): + """Setup the Pulsar object.""" + + # initialize Pulsar class with RAJ DECJ so _get_radec can be covered + psr = Pulsar( + datadir + "/mdc1/J0030+0451.par", + datadir + "/mdc1/J0030+0451.tim", + ephem="DE430", + drop_pintpsr=False, + timing_package="pint", + ) + + msg = f"Pulsar not loaded properly {self.psr.Mmat.shape}" + assert psr.Mmat.shape == (130, 8), msg + def test_no_planet(self): """Test exception when incorrect par(tim) file given.""" @@ -225,7 +311,7 @@ def test_no_planet(self): model, toas = get_model_and_toas( datadir + "/J0030+0451_NANOGrav_9yv1.gls.par", datadir + "/J0030+0451_NANOGrav_9yv1.tim", planets=False ) - Pulsar(model, toas, planets=True) + Pulsar(model, toas, planets=True, drop_pintpsr=False) msg = "obs_earth_pos is not in toas.table.colnames. Either " msg += "`planet` flag is not True in `toas` or further Pint " msg += "development to add additional planets is needed." diff --git a/tests/test_utils.py b/tests/test_utils.py index f8268cf3..b6a8148f 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -8,7 +8,9 @@ Tests for `utils` module. """ +import os import unittest +import pytest import numpy as np @@ -18,6 +20,8 @@ from enterprise.signals import utils from tests.enterprise_test_data import datadir +IN_GITHUB_ACTIONS = os.getenv("GITHUB_ACTIONS") == "true" + class TestUtils(unittest.TestCase): @classmethod @@ -128,6 +132,27 @@ def test_quantization_matrix(self): assert U.shape == (4005, 235), msg1 assert all(np.sum(U, axis=0) > 1), msg2 + inds = utils.quant2ind(U, as_slice=False) + slcs = utils.quant2ind(U, as_slice=True) + inds_check = [utils.indices_from_slice(slc) for slc in slcs] + + msg3 = "Quantization Matrix slice not equal to quantization indices" + for ind, ind_c in zip(inds, inds_check): + assert np.all(ind == ind_c), msg3 + + def test_indices_from_slice(self): + """Test conversion of slices to numpy indices""" + ind_np = np.array([2, 4, 6, 8]) + ind_np_check = utils.indices_from_slice(ind_np) + + msg1 = "Numpy indices not left as-is by indices_from_slice" + assert np.all(ind_np == ind_np_check), msg1 + + slc = slice(2, 10, 2) + ind_np_check = utils.indices_from_slice(slc) + msg2 = "Slice not converted properly by indices_from_slice" + assert np.all(ind_np == ind_np_check), msg2 + def test_psd(self): """Test PSD functions.""" Tmax = self.psr.toas.max() - self.psr.toas.min() @@ -145,6 +170,7 @@ def test_psd(self): assert np.allclose(utils.powerlaw(f, log10_A, gamma), pl), msg assert np.allclose(utils.turnover(f, log10_A, gamma, lf0, kappa, beta), pt), msg + @pytest.mark.skipif(IN_GITHUB_ACTIONS, reason="Test doesn't work in Github Actions due to limited memory.") def test_orf(self): """Test ORF functions.""" p1 = np.array([0.3, 0.648, 0.7]) diff --git a/tests/test_white_signals.py b/tests/test_white_signals.py index 0736bf71..5ef2c33b 100644 --- a/tests/test_white_signals.py +++ b/tests/test_white_signals.py @@ -58,7 +58,14 @@ def setUpClass(cls): cls.psr = Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim") # IPTA-like pulsar - cls.ipsr = Pulsar(datadir + "/1713.Sep.T2.par", datadir + "/1713.Sep.T2.tim") + cls.ipsr = Pulsar(datadir + "/1713.Sep.T2.par", datadir + "/1713.Sep.T2.tim", sort=True) + + # Same pulsar, but with TOAs shuffled + cls.ipsr_shuffled = Pulsar(datadir + "/1713.Sep.T2.par", datadir + "/1713.Sep.T2.tim", sort=True) + rng = np.random.default_rng(seed=123) + rng.shuffle(cls.ipsr_shuffled._isort) + for ii, p in enumerate(cls.ipsr_shuffled._isort): + cls.ipsr_shuffled._iisort[p] = ii def test_efac(self): """Test that efac signal returns correct covariance.""" @@ -384,8 +391,13 @@ def _ecorr_test(self, method="sparse"): msg = "EFAC/ECORR {} 2D2 solve incorrect.".format(method) assert np.allclose(N.solve(T, left_array=T), np.dot(T.T, wd.solve(T)), rtol=1e-10), msg - def _ecorr_test_ipta(self, method="sparse"): + def _ecorr_test_ipta(self, method="sparse", shuffled=False): """Test of sparse/sherman-morrison ecorr signal and solve methods.""" + if shuffled: + ipsr = self.ipsr_shuffled + else: + ipsr = self.ipsr + selection = Selection(selections.nanograv_backends) efac = parameter.Uniform(0.1, 5) @@ -394,7 +406,7 @@ def _ecorr_test_ipta(self, method="sparse"): ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr, selection=selection, method=method) tm = gp_signals.TimingModel() s = ef + ec + tm - m = s(self.ipsr) + m = s(ipsr) # set parameters efacs = [1.3] @@ -412,18 +424,18 @@ def _ecorr_test_ipta(self, method="sparse"): } # get EFAC Nvec - nvec0 = efacs[0] ** 2 * self.ipsr.toaerrs**2 + nvec0 = efacs[0] ** 2 * ipsr.toaerrs**2 # get the basis flags = ["ASP-L", "ASP-S", "GASP-8", "GASP-L", "GUPPI-8", "GUPPI-L", "PUPPI-L", "PUPPI-S"] - bflags = self.ipsr.backend_flags + bflags = ipsr.backend_flags Umats = [] for flag in np.unique(bflags): if flag in flags: mask = bflags == flag - Umats.append(utils.create_quantization_matrix(self.ipsr.toas[mask], nmin=2)[0]) + Umats.append(utils.create_quantization_matrix(ipsr.toas[mask], nmin=2)[0]) nepoch = sum(U.shape[1] for U in Umats) - U = np.zeros((len(self.ipsr.toas), nepoch)) + U = np.zeros((len(ipsr.toas), nepoch)) jvec = np.zeros(nepoch) netot, ct = 0, 0 for flag in np.unique(bflags): @@ -441,23 +453,21 @@ def _ecorr_test_ipta(self, method="sparse"): # test msg = "EFAC/ECORR {} logdet incorrect.".format(method) N = m.get_ndiag(params) - assert np.allclose(N.solve(self.ipsr.residuals, logdet=True)[1], wd.logdet(), rtol=1e-8), msg + assert np.allclose(N.solve(ipsr.residuals, logdet=True)[1], wd.logdet(), rtol=1e-8), msg msg = "EFAC/ECORR {} D1 solve incorrect.".format(method) - assert np.allclose(N.solve(self.ipsr.residuals), wd.solve(self.ipsr.residuals), rtol=1e-8), msg + assert np.allclose(N.solve(ipsr.residuals), wd.solve(ipsr.residuals), rtol=1e-8), msg msg = "EFAC/ECORR {} 1D1 solve incorrect.".format(method) assert np.allclose( - N.solve(self.ipsr.residuals, left_array=self.ipsr.residuals), - np.dot(self.ipsr.residuals, wd.solve(self.ipsr.residuals)), + N.solve(ipsr.residuals, left_array=ipsr.residuals), + np.dot(ipsr.residuals, wd.solve(ipsr.residuals)), rtol=1e-8, ), msg msg = "EFAC/ECORR {} 2D1 solve incorrect.".format(method) T = m.get_basis() - assert np.allclose( - N.solve(self.ipsr.residuals, left_array=T), np.dot(T.T, wd.solve(self.ipsr.residuals)), rtol=1e-8 - ), msg + assert np.allclose(N.solve(ipsr.residuals, left_array=T), np.dot(T.T, wd.solve(ipsr.residuals)), rtol=1e-8), msg msg = "EFAC/ECORR {} 2D2 solve incorrect.".format(method) assert np.allclose(N.solve(T, left_array=T), np.dot(T.T, wd.solve(T)), rtol=1e-8), msg @@ -470,21 +480,28 @@ def test_ecorr_sherman_morrison(self): """Test of sherman-morrison ecorr signal and solve methods.""" self._ecorr_test(method="sherman-morrison") + def test_ecorr_fast_sherman_morrison(self): + """Test of fast-sherman-morrison ecorr signal and solve methods.""" + self._ecorr_test(method="fast-sherman-morrison") + def test_ecorr_block(self): """Test of block matrix ecorr signal and solve methods.""" self._ecorr_test(method="block") def test_ecorr_sparse_ipta(self): """Test of sparse ecorr signal and solve methods.""" - self._ecorr_test_ipta(method="sparse") + self._ecorr_test_ipta(method="sparse", shuffled=False) + self._ecorr_test_ipta(method="sparse", shuffled=True) def test_ecorr_sherman_morrison_ipta(self): """Test of sherman-morrison ecorr signal and solve methods.""" - self._ecorr_test_ipta(method="sherman-morrison") + self._ecorr_test_ipta(method="sherman-morrison", shuffled=False) + self._ecorr_test_ipta(method="sherman-morrison", shuffled=True) def test_ecorr_block_ipta(self): """Test of block matrix ecorr signal and solve methods.""" - self._ecorr_test_ipta(method="block") + self._ecorr_test_ipta(method="block", shuffled=False) + self._ecorr_test_ipta(method="block", shuffled=True) class TestWhiteSignalsPint(TestWhiteSignals): @@ -502,5 +519,14 @@ def setUpClass(cls): # IPTA-like pulsar cls.ipsr = Pulsar( - datadir + "/1713.Sep.T2.par", datadir + "/1713.Sep.T2.tim", ephem="DE421", timint_package="pint" + datadir + "/1713.Sep.T2.par", datadir + "/1713.Sep.T2.tim", ephem="DE421", timint_package="pint", sort=True + ) + + # Same pulsar, but with TOAs shuffled + cls.ipsr_shuffled = Pulsar( + datadir + "/1713.Sep.T2.par", datadir + "/1713.Sep.T2.tim", ephem="DE421", timint_package="pint", sort=True ) + rng = np.random.default_rng(seed=123) + rng.shuffle(cls.ipsr_shuffled._isort) + for ii, p in enumerate(cls.ipsr_shuffled._isort): + cls.ipsr_shuffled._iisort[p] = ii