diff --git a/.github/workflows/pypi-publish.yml b/.github/workflows/pypi-publish.yml new file mode 100644 index 0000000..91ce4a6 --- /dev/null +++ b/.github/workflows/pypi-publish.yml @@ -0,0 +1,51 @@ +# This workflow will install Python dependencies, run tests and lint with a single version of Python +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: Publish to PyPI + +on: + push: + tags: "*" + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.9 + uses: actions/setup-python@v2 + with: + python-version: 3.9 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install flake8 pytest tox + # - name: Lint with flake8 + # run: | + # # stop the build if there are Python syntax errors or undefined names + # flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + # # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + # # flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + - name: Test with tox + run: | + tox + - name: Build docs + run: | + tox -e docs + - run: touch ./docs/_build/html/.nojekyll + - name: GH Pages Deployment + uses: JamesIves/github-pages-deploy-action@4.1.3 + with: + branch: gh-pages # The branch the action should deploy to. + folder: ./docs/_build/html + clean: true # Automatically remove deleted files from the deploy branch + - name: Build Project and Publish + run: | + python -m tox -e clean,build + - name: Publish package + uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 + with: + user: __token__ + password: ${{ secrets.PYPI_PASSWORD }} \ No newline at end of file diff --git a/.github/workflows/pypi-test.yml b/.github/workflows/pypi-test.yml new file mode 100644 index 0000000..f35fd23 --- /dev/null +++ b/.github/workflows/pypi-test.yml @@ -0,0 +1,37 @@ +name: Test the library + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + matrix: + python-version: [ '3.8', '3.9', '3.10', '3.11', '3.12' ] + + name: Python ${{ matrix.python-version }} + steps: + - uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install flake8 pytest tox + # - name: Lint with flake8 + # run: | + # # stop the build if there are Python syntax errors or undefined names + # flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + # # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + # # flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + - name: Test with tox + run: | + tox diff --git a/CHANGELOG.md b/CHANGELOG.md index 205cc5e..fd9f348 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,3 @@ ## Version 0.1 (development) -- Feature A added -- FIX: nasty bug #1729 fixed -- add your changes here! diff --git a/README.md b/README.md index 05f5469..0c8c607 100644 --- a/README.md +++ b/README.md @@ -11,17 +11,76 @@ --> [![Project generated with PyScaffold](https://img.shields.io/badge/-PyScaffold-005CA0?logo=pyscaffold)](https://pyscaffold.org/) +[![PyPI-Server](https://img.shields.io/pypi/v/dolomite-se.svg)](https://pypi.org/project/dolomite-se/) +![Unit tests](https://github.com/ArtifactDB/dolomite-se/actions/workflows/pypi-test.yml/badge.svg) -# dolomite-mae +# Save and load `MultiAssayExperiments` in Python -> Add a short description here! +## Introduction -A longer description of your project goes here... +The **dolomite-mae** package is the Python counterpart to the [**alabaster.mae**](https://github.com/ArtifactDB/alabaster.mae) R package, +providing methods for saving/reading `MultiAssayExperiment` objects within the [**dolomite** framework](https://github.com/ArtifactDB/dolomite-base). +All components of the `MultiAssayExperiment` - column_data, sample map and experiments - are saved to their respective file representations, +which can be loaded in a new R/Python environment for cross-language analyses. +## Quick start - +Let's mock up a `MultiAssayExperiment`: -## Note +```python +from multiassayexperiment import MultiAssayExperiment +from singlecellexperiment import SingleCellExperiment +from summarizedexperiment import SummarizedExperiment +import biocframe +import numpy -This project has been set up using PyScaffold 4.5. For details and usage -information on PyScaffold see https://pyscaffold.org/. +x = numpy.random.rand(1000, 200) +x2 = (numpy.random.rand(1000, 200) * 10).astype(numpy.int32) + +sce = SingleCellExperiment( + {"logcounts": x, "counts": x2}, + main_experiment_name="aaron's secret modality", + row_data=biocframe.BiocFrame( + {"foo": numpy.random.rand(1000), "bar": numpy.random.rand(1000)}, + row_names=["gene_sce_" + str(i) for i in range(1000)], + ), + column_data=biocframe.BiocFrame( + {"whee": numpy.random.rand(200), "stuff": numpy.random.rand(200)}, + row_names=["cell_sce" + str(i) for i in range(200)], + ), +) + +se = SummarizedExperiment( + {"counts": numpy.random.rand(100, 200)}, + row_data=biocframe.BiocFrame( + {"foo": numpy.random.rand(100), "bar": numpy.random.rand(100)}, + row_names=["gene_se_" + str(i) for i in range(100)], + ), + column_data=biocframe.BiocFrame( + {"whee": numpy.random.rand(200), "stuff": numpy.random.rand(200)}, + row_names=["cell_se" + str(i) for i in range(200)], + ), +) + +mae = MultiAssayExperiment(experiments={"jay_expt": sce, "aarons_expt": se}) +``` + +Now we can save it: + +```python +from dolomite_base import save_object +import dolomite_se +import os +from tempfile import mkdtemp + +path = os.path.join(mkdtemp(), "test") +save_object(se, path) +``` + +And load it again, e,g., in a new session: + +```python +from dolomite_base import read_object + +roundtrip = read_object(path) +``` diff --git a/docs/conf.py b/docs/conf.py index a6c0f39..cb6a00c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -72,6 +72,7 @@ "sphinx.ext.ifconfig", "sphinx.ext.mathjax", "sphinx.ext.napoleon", + "sphinx_autodoc_typehints", ] # Add any paths that contain templates here, relative to this directory. @@ -171,7 +172,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = "alabaster" +html_theme = "furo" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -299,6 +300,8 @@ "scipy": ("https://docs.scipy.org/doc/scipy/reference", None), "setuptools": ("https://setuptools.pypa.io/en/stable/", None), "pyscaffold": ("https://pyscaffold.org/en/stable", None), + "dolomite_base": ("https://artifactdb.github.io/dolomite-base", None), + "multiassayexperiment": ("https://biocpy.github.io/MultiAssayExperiment/", None), } print(f"loading configurations for {project} {version} ...", file=sys.stderr) \ No newline at end of file diff --git a/docs/requirements.txt b/docs/requirements.txt index 0990c2a..8aed2c8 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -4,3 +4,5 @@ # sphinx_rtd_theme myst-parser[linkify] sphinx>=3.2.1 +furo +sphinx-autodoc-typehints \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index b60ac02..2c13a93 100644 --- a/setup.cfg +++ b/setup.cfg @@ -5,17 +5,17 @@ [metadata] name = dolomite-mae -description = Add a short description here! +description = Save and load multi-assay experiments in the dolomite framework! author = Jayaram Kancherla author_email = jayaram.kancherla@gmail.com license = MIT license_files = LICENSE.txt long_description = file: README.md long_description_content_type = text/markdown; charset=UTF-8; variant=GFM -url = https://github.com/pyscaffold/pyscaffold/ +url = https://github.com/ArtifactDB/dolomite-mae # Add here related links, for example: project_urls = - Documentation = https://pyscaffold.org/ + Documentation = https://github.com/ArtifactDB/dolomite-mae # Source = https://github.com/pyscaffold/pyscaffold/ # Changelog = https://pyscaffold.org/en/latest/changelog.html # Tracker = https://github.com/pyscaffold/pyscaffold/issues @@ -41,7 +41,7 @@ package_dir = =src # Require a min/specific Python version (comma-separated conditions) -# python_requires = >=3.8 +python_requires = >=3.8 # Add here dependencies of your project (line-separated), e.g. requests>=2.2,<3.0. # Version specifiers like >=2.2,<3.0 avoid problems due to API changes in @@ -49,7 +49,13 @@ package_dir = # For more information, check out https://semver.org/. install_requires = importlib-metadata; python_version<"3.8" - + dolomite_base==0.2.0-alpha6 + dolomite_sce==0.1.0-alpha + dolomite_se==0.1.0-alpha2 + multiassayexperiment>=0.4.2,<0.5.0 + biocutils + pandas + numpy [options.packages.find] where = src diff --git a/src/dolomite_mae/__init__.py b/src/dolomite_mae/__init__.py index 89ea05f..c8b47b6 100644 --- a/src/dolomite_mae/__init__.py +++ b/src/dolomite_mae/__init__.py @@ -14,3 +14,6 @@ __version__ = "unknown" finally: del version, PackageNotFoundError + +from .read_multi_assay_experiment import read_multi_assay_experiment +from .save_multi_assay_experiment import save_multi_assay_experiment \ No newline at end of file diff --git a/src/dolomite_mae/read_multi_assay_experiment.py b/src/dolomite_mae/read_multi_assay_experiment.py new file mode 100644 index 0000000..97be2e8 --- /dev/null +++ b/src/dolomite_mae/read_multi_assay_experiment.py @@ -0,0 +1,101 @@ +import json +import os + +import dolomite_base as dl +import h5py +from biocframe import BiocFrame +from dolomite_base.read_object import read_object_registry +from multiassayexperiment import MultiAssayExperiment + +read_object_registry[ + "multi_sample_dataset" +] = "dolomite_mae.read_multi_assay_experiment" + + +def read_multi_assay_experiment( + path: str, metadata: dict, **kwargs +) -> MultiAssayExperiment: + """Load a + :py:class:`~multiassayexperiment.MultiAssayExperiment.MultiAssayExperiment` + from its on-disk representation. + + This method should generally not be called directly but instead be invoked by + :py:meth:`~dolomite_base.read_object.read_object`. + + Args: + path: + Path to the directory containing the object. + + metadata: + Metadata for the object. + + kwargs: + Further arguments, ignored. + + Returns: + A + :py:class:`~multiassayexperiment.MultiAssayExperiment.MultiAssayExperiment` + with file-backed arrays in the assays. + """ + + _sample_path = os.path.join(path, "sample_data") + _sample_data = None + if os.path.exists(_sample_path): + _sample_data = dl.read_object(_sample_path) + + if _sample_data is None: + raise RuntimeError("Cannot read 'sample_data'.") + + _srow_names = _sample_data.get_row_names() + if _srow_names is None: + raise RuntimeError("'sample_data' does not contain 'row_names'.") + + _expts_path = os.path.join(path, "experiments") + _expts = {} + _expt_names = [] + _sample_map_data = None + if os.path.exists(_expts_path): + with open(os.path.join(_expts_path, "names.json"), "r") as handle: + _expt_names = json.load(handle) + + if len(_expt_names) > 0: + _sample_map_path = os.path.join(path, "sample_map.h5") + _shandle = h5py.File(_sample_map_path, "r") + _sghandle = _shandle["multi_sample_dataset"] + _primary = [] + _assay = [] + _colname = [] + + for _aidx, _aname in enumerate(_expt_names): + _expt_read_path = os.path.join(_expts_path, str(_aidx)) + + try: + _expts[_aname] = dl.read_object(_expt_read_path) + except Exception as ex: + raise RuntimeError( + f"failed to load experiment '{_aname}' from '{path}'; " + + str(ex) + ) + + _expt_map = dl.load_vector_from_hdf5( + _sghandle[str(_aidx)], expected_type=int, report_1darray=True + ) + + _assay.extend([_aname] * _expts[_aname].shape[1]) + _colname.extend(_expts[_aname].get_column_names()) + _primary.extend([_srow_names[i] for i in _expt_map]) + + _sample_map_data = BiocFrame( + {"primary": _primary, "colname": _colname, "assay": _assay} + ) + + mae = MultiAssayExperiment( + experiments=_expts, column_data=_sample_data, sample_map=_sample_map_data + ) + + _meta_path = os.path.join(path, "other_data") + if os.path.exists(_meta_path): + _meta = dl.read_object(_meta_path) + mae = mae.set_metadata(_meta.as_dict()) + + return mae diff --git a/src/dolomite_mae/save_multi_assay_experiment.py b/src/dolomite_mae/save_multi_assay_experiment.py new file mode 100644 index 0000000..2086565 --- /dev/null +++ b/src/dolomite_mae/save_multi_assay_experiment.py @@ -0,0 +1,130 @@ +import json +import os + +import biocutils +import dolomite_base as dl +import h5py +from multiassayexperiment import MultiAssayExperiment + + +@dl.save_object.register +@dl.validate_saves +def save_multi_assay_experiment( + x: MultiAssayExperiment, + path: str, + data_frame_args: dict = None, + assay_args: dict = None, + **kwargs, +): + """Method for saving + :py:class:`~multiassayexperiment.MultiAssayExperiment.MultiAssayExperiment` + objects to their corresponding file representations, see + :py:meth:`~dolomite_base.save_object.save_object` for details. + + Args: + x: + Object to be staged. + + path: + Path to a directory in which to save ``x``. + + data_frame_args: + Further arguments to pass to the ``save_object`` method for the + row/column data. + + assay_args: + Further arguments to pass to the ``save_object`` method for the + assays. + + kwargs: Further arguments, ignored. + + Returns: + ``x`` is saved to path. + """ + os.mkdir(path) + + if data_frame_args is None: + data_frame_args = {} + + if assay_args is None: + assay_args = {} + + with open(os.path.join(path, "OBJECT"), "w", encoding="utf-8") as handle: + handle.write( + '{ "type": "multi_sample_dataset", "multi_sample_dataset": { "version": "1.0" } }' + ) + + # sample/column data + _sample_path = os.path.join(path, "sample_data") + dl.save_object(x.get_column_data(), _sample_path, **data_frame_args) + + # save alt expts. + _expt_names = x.get_experiment_names() + if len(_expt_names) > 0: + _expt_path = os.path.join(path, "experiments") + os.mkdir(_expt_path) + + with open(os.path.join(_expt_path, "names.json"), "w") as handle: + json.dump(_expt_names, handle) + + for _aidx, _aname in enumerate(_expt_names): + _expt_save_path = os.path.join(_expt_path, str(_aidx)) + try: + dl.save_object( + x.experiment(_aname), + path=_expt_save_path, + data_frame_args=data_frame_args, + assay_args=assay_args, + ) + except Exception as ex: + raise RuntimeError( + "failed to stage experiment '" + + _aname + + "' for " + + str(type(x)) + + "; " + + str(ex) + ) + with h5py.File(os.path.join(path, "sample_map.h5"), "w") as handle: + ghandle = handle.create_group("multi_sample_dataset") + + _sample_map = x.get_sample_map() + for _aidx, _aname in enumerate(_expt_names): + _indices_to_keep = [ + idx + for idx, x in enumerate(_sample_map.get_column("assay")) + if x == _aname + ] + + _colnames = biocutils.subset_sequence( + _sample_map.get_column("colname"), _indices_to_keep + ) + _sample = biocutils.subset_sequence( + _sample_map.get_column("primary"), _indices_to_keep + ) + + i = biocutils.match(_sample, x.get_column_data().get_row_names()) + + if (i == -1).any(): + raise RuntimeError( + "Samples in 'sample_map' not presented in 'column_data' for ", + f"{_aname}.", + ) + + j = biocutils.match(x.experiment(_aname).get_column_names(), _colnames) + if (j == -1).any(): + raise RuntimeError( + f"Column names in experiment '{_aname}' not presented in 'sample_map'." + ) + + reorder = i[j.tolist()] + + dl.write_integer_vector_to_hdf5( + ghandle, name=str(_aidx), h5type="u4", x=reorder + ) + + _meta = x.get_metadata() + if _meta is not None and len(_meta) > 0: + dl.save_object(_meta, path=os.path.join(path, "other_data")) + + return diff --git a/src/dolomite_mae/skeleton.py b/src/dolomite_mae/skeleton.py deleted file mode 100644 index e37b385..0000000 --- a/src/dolomite_mae/skeleton.py +++ /dev/null @@ -1,149 +0,0 @@ -""" -This is a skeleton file that can serve as a starting point for a Python -console script. To run this script uncomment the following lines in the -``[options.entry_points]`` section in ``setup.cfg``:: - - console_scripts = - fibonacci = dolomite_mae.skeleton:run - -Then run ``pip install .`` (or ``pip install -e .`` for editable mode) -which will install the command ``fibonacci`` inside your current environment. - -Besides console scripts, the header (i.e. until ``_logger``...) of this file can -also be used as template for Python modules. - -Note: - This file can be renamed depending on your needs or safely removed if not needed. - -References: - - https://setuptools.pypa.io/en/latest/userguide/entry_point.html - - https://pip.pypa.io/en/stable/reference/pip_install -""" - -import argparse -import logging -import sys - -from dolomite_mae import __version__ - -__author__ = "Jayaram Kancherla" -__copyright__ = "Jayaram Kancherla" -__license__ = "MIT" - -_logger = logging.getLogger(__name__) - - -# ---- Python API ---- -# The functions defined in this section can be imported by users in their -# Python scripts/interactive interpreter, e.g. via -# `from dolomite_mae.skeleton import fib`, -# when using this Python module as a library. - - -def fib(n): - """Fibonacci example function - - Args: - n (int): integer - - Returns: - int: n-th Fibonacci number - """ - assert n > 0 - a, b = 1, 1 - for _i in range(n - 1): - a, b = b, a + b - return a - - -# ---- CLI ---- -# The functions defined in this section are wrappers around the main Python -# API allowing them to be called directly from the terminal as a CLI -# executable/script. - - -def parse_args(args): - """Parse command line parameters - - Args: - args (List[str]): command line parameters as list of strings - (for example ``["--help"]``). - - Returns: - :obj:`argparse.Namespace`: command line parameters namespace - """ - parser = argparse.ArgumentParser(description="Just a Fibonacci demonstration") - parser.add_argument( - "--version", - action="version", - version=f"dolomite-mae {__version__}", - ) - parser.add_argument(dest="n", help="n-th Fibonacci number", type=int, metavar="INT") - parser.add_argument( - "-v", - "--verbose", - dest="loglevel", - help="set loglevel to INFO", - action="store_const", - const=logging.INFO, - ) - parser.add_argument( - "-vv", - "--very-verbose", - dest="loglevel", - help="set loglevel to DEBUG", - action="store_const", - const=logging.DEBUG, - ) - return parser.parse_args(args) - - -def setup_logging(loglevel): - """Setup basic logging - - Args: - loglevel (int): minimum loglevel for emitting messages - """ - logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s" - logging.basicConfig( - level=loglevel, stream=sys.stdout, format=logformat, datefmt="%Y-%m-%d %H:%M:%S" - ) - - -def main(args): - """Wrapper allowing :func:`fib` to be called with string arguments in a CLI fashion - - Instead of returning the value from :func:`fib`, it prints the result to the - ``stdout`` in a nicely formatted message. - - Args: - args (List[str]): command line parameters as list of strings - (for example ``["--verbose", "42"]``). - """ - args = parse_args(args) - setup_logging(args.loglevel) - _logger.debug("Starting crazy calculations...") - print(f"The {args.n}-th Fibonacci number is {fib(args.n)}") - _logger.info("Script ends here") - - -def run(): - """Calls :func:`main` passing the CLI arguments extracted from :obj:`sys.argv` - - This function can be used as entry point to create console scripts with setuptools. - """ - main(sys.argv[1:]) - - -if __name__ == "__main__": - # ^ This is a guard statement that will prevent the following code from - # being executed in the case someone imports this file instead of - # executing it as a script. - # https://docs.python.org/3/library/__main__.html - - # After installing your project with pip, users can also run your Python - # modules as scripts via the ``-m`` flag, as defined in PEP 338:: - # - # python -m dolomite_mae.skeleton 42 - # - run() diff --git a/tests/test_mae.py b/tests/test_mae.py new file mode 100644 index 0000000..8e5424f --- /dev/null +++ b/tests/test_mae.py @@ -0,0 +1,152 @@ +import os +from random import random +from tempfile import mkdtemp + +import biocframe +import dolomite_mae +import dolomite_sce +import dolomite_se +import numpy +import pandas as pd +from dolomite_base import read_object, save_object +from genomicranges import GenomicRanges +from multiassayexperiment import MultiAssayExperiment +from singlecellexperiment import SingleCellExperiment +from summarizedexperiment import SummarizedExperiment + + +def test_stage_mae_basic(): + x = numpy.random.rand(1000, 200) + x2 = (numpy.random.rand(1000, 200) * 10).astype(numpy.int32) + + sce = SingleCellExperiment( + {"logcounts": x, "counts": x2}, + main_experiment_name="aaron's secret modality", + row_data=biocframe.BiocFrame( + {"foo": numpy.random.rand(1000), "bar": numpy.random.rand(1000)}, + row_names=["gene_sce_" + str(i) for i in range(1000)], + ), + column_data=biocframe.BiocFrame( + {"whee": numpy.random.rand(200), "stuff": numpy.random.rand(200)}, + row_names=["cell_sce" + str(i) for i in range(200)], + ), + ) + + se = SummarizedExperiment( + {"counts": numpy.random.rand(100, 200)}, + row_data=biocframe.BiocFrame( + {"foo": numpy.random.rand(100), "bar": numpy.random.rand(100)}, + row_names=["gene_se_" + str(i) for i in range(100)], + ), + column_data=biocframe.BiocFrame( + {"whee": numpy.random.rand(200), "stuff": numpy.random.rand(200)}, + row_names=["cell_se" + str(i) for i in range(200)], + ), + ) + + mae = MultiAssayExperiment(experiments={"jay_expt": sce, "aarons_expt": se}) + + dir = os.path.join(mkdtemp(), "mae_simple") + save_object(mae, dir) + + roundtrip = read_object(dir) + assert isinstance(roundtrip, MultiAssayExperiment) + assert roundtrip.experiment("jay_expt").shape == sce.shape + assert ( + roundtrip.experiment("aarons_expt").shape == mae.experiment("aarons_expt").shape + ) + assert len(mae.get_column_data()) == 2 + assert len(mae.get_sample_map()) == 400 + assert list(mae.get_column_data().get_row_names()) == [ + "unknown_sample_jay_expt", + "unknown_sample_aarons_expt", + ] + + +def test_stage_mae_complex(): + nrows = 200 + ncols = 6 + counts = numpy.random.rand(nrows, ncols) + df_gr = pd.DataFrame( + { + "seqnames": [ + "chr1", + "chr2", + "chr2", + "chr2", + "chr1", + "chr1", + "chr3", + "chr3", + "chr3", + "chr3", + ] + * 20, + "starts": range(100, 300), + "ends": range(110, 310), + "strand": ["-", "+", "+", "*", "*", "+", "+", "+", "-", "-"] * 20, + "score": range(0, 200), + "GC": [random() for _ in range(10)] * 20, + } + ) + + gr = GenomicRanges.from_pandas(df_gr) + + column_data_sce = pd.DataFrame( + { + "treatment": ["ChIP", "Input"] * 3, + }, + index=["sce"] * 6, + ) + column_data_se = pd.DataFrame( + { + "treatment": ["ChIP", "Input"] * 3, + }, + index=["se"] * 6, + ) + + sample_map = pd.DataFrame( + { + "assay": ["sce", "se"] * 6, + "primary": ["sample1", "sample2"] * 6, + "colname": ["sce", "se"] * 6, + } + ) + + sample_data = pd.DataFrame( + {"samples": ["sample1", "sample2"]}, index=["sample1", "sample2"] + ) + + tsce = SingleCellExperiment( + assays={"counts": counts}, + row_data=df_gr, + column_data=column_data_sce, + row_ranges=gr, + ) + + tse2 = SummarizedExperiment( + assays={"counts": counts.copy()}, + row_data=df_gr.copy(), + column_data=column_data_se.copy(), + ) + + mae = MultiAssayExperiment( + experiments={"sce": tsce, "se": tse2}, + column_data=sample_data, + sample_map=sample_map, + metadata={"could be": "anything"}, + ) + + dir = os.path.join(mkdtemp(), "mae_simple") + save_object(mae, dir) + + roundtrip = read_object(dir) + assert isinstance(roundtrip, MultiAssayExperiment) + assert roundtrip.experiment("sce").shape == mae.experiment("sce").shape + assert roundtrip.experiment("se").shape == mae.experiment("se").shape + assert len(mae.get_column_data()) == len(sample_data) + assert len(mae.get_sample_map()) == len(sample_map) + assert list(mae.get_column_data().get_row_names()) == [ + "sample1", + "sample2", + ] diff --git a/tests/test_skeleton.py b/tests/test_skeleton.py deleted file mode 100644 index 068147c..0000000 --- a/tests/test_skeleton.py +++ /dev/null @@ -1,25 +0,0 @@ -import pytest - -from dolomite_mae.skeleton import fib, main - -__author__ = "Jayaram Kancherla" -__copyright__ = "Jayaram Kancherla" -__license__ = "MIT" - - -def test_fib(): - """API Tests""" - assert fib(1) == 1 - assert fib(2) == 1 - assert fib(7) == 13 - with pytest.raises(AssertionError): - fib(-10) - - -def test_main(capsys): - """CLI Tests""" - # capsys is a pytest fixture that allows asserts against stdout/stderr - # https://docs.pytest.org/en/stable/capture.html - main(["7"]) - captured = capsys.readouterr() - assert "The 7-th Fibonacci number is 13" in captured.out