Skip to content

Commit

Permalink
Merge pull request #1279 from flatironinstitute/dev-fix_sphinx
Browse files Browse the repository at this point in the history
Fix some sphinx issues
  • Loading branch information
pgunn authored Feb 16, 2024
2 parents 1cf9d71 + fb3e752 commit 30aafba
Show file tree
Hide file tree
Showing 11 changed files with 80 additions and 100 deletions.
25 changes: 13 additions & 12 deletions caiman/base/movies.py
Original file line number Diff line number Diff line change
Expand Up @@ -2203,23 +2203,24 @@ def load_iter(file_name: Union[str, list[str]], subindices=None, var_name_hdf5:
logging.error(f"File request:[{file_name}] not found!")
raise Exception('File not found!')

def get_file_size(file_name, var_name_hdf5='mov') -> tuple[tuple, Union[int, tuple]]:
""" Computes the dimensions of a file or a list of files without loading
def get_file_size(file_name, var_name_hdf5:str='mov') -> tuple[tuple, Union[int, tuple]]:
"""
Computes the dimensions of a file or a list of files without loading
it/them in memory. An exception is thrown if the files have FOVs with
different sizes
Args:
file_name: str/filePath or various list types
locations of file(s)
var_name_hdf5: 'str'
if loading from hdf5 name of the dataset to load
Args:
file_name:
locations of file(s)
var_name_hdf5:
if loading from hdf5 name of the dataset to load
Returns:
dims: tuple
dimensions of FOV
Returns:
dims: tuple
dimensions of FOV
T: int or tuple of int
number of timesteps in each file
T: int or tuple of int
number of timesteps in each file
"""
# TODO There is a lot of redundant code between this, load(), and load_iter() that should be unified somehow
if isinstance(file_name, pathlib.Path):
Expand Down
24 changes: 15 additions & 9 deletions caiman/cluster.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#!/usr/bin/env python

""" functions related to the creation and management of the "cluster",
"""
Functions related to the creation and management of the "cluster",
meaning the framework for distributed computation.
We put arrays on disk as raw bytes, extending along the first dimension.
Expand Down Expand Up @@ -182,32 +183,37 @@ def setup_cluster(backend:str = 'multiprocessing',
maxtasksperchild:int = None) -> tuple[Any, Any, Optional[int]]:
"""
Setup and/or restart a parallel cluster.
Args:
backend
backend:
One of:
'multiprocessing' - Use multiprocessing library
'ipyparallel' - Use ipyparallel instead (better on Windows?)
'single' - Don't be parallel (good for debugging, slow)
Most backends will try, by default, to stop a running cluster if
it is running before setting up a new one, or throw an error if
they find one.
n_processes
n_processes:
Sets number of processes to use. If None, is set automatically.
single_thread
single_thread:
Deprecated alias for the 'single' backend.
ignore_preexisting
ignore_preexisting:
If True, ignores the existence of an already running multiprocessing
pool (which usually indicates a previously-started CaImAn cluster)
maxtasksperchild
maxtasksperchild:
Only used for multiprocessing, default None (number of tasks a worker process can
complete before it will exit and be replaced with a fresh worker process).
Returns:
c: ipyparallel.Client object; only used for ipyparallel backends, else None
dview: multicore processing engine that is used for parallel processing.
c:
ipyparallel.Client object; only used for ipyparallel backends, else None
dview:
multicore processing engine that is used for parallel processing.
If backend is 'multiprocessing' then dview is Pool object.
If backend is 'ipyparallel' then dview is a DirectView object.
n_processes: number of workers in dview. None means single core mode in use.
n_processes:
number of workers in dview. None means single core mode in use.
"""

sys.stdout.flush() # XXX Unsure why we do this
Expand Down
2 changes: 1 addition & 1 deletion caiman/source_extraction/cnmf/deconvolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -677,7 +677,7 @@ def constrained_oasisAR2(y, g, sn, optimize_b=True, b_nonneg=True, optimize_g=0,
s_min : float, optional, default 0
Minimal non-zero activity within each bin (minimal 'spike size').
For negative values the threshold is |s_min| * sn * sqrt(1-decay_constant)
For negative values the threshold is abs(s_min) * sn * sqrt(1 - decay_constant)
If 0 the threshold is determined automatically such that RSS <= sn^2 T
Returns:
Expand Down
65 changes: 30 additions & 35 deletions caiman/source_extraction/cnmf/estimates.py
Original file line number Diff line number Diff line change
Expand Up @@ -1071,52 +1071,44 @@ def evaluate_components(self, imgs, params, dview=None):
self.idx_components = np.intersect1d(self.idx_components, idx_ecc)
return self

def filter_components(self, imgs, params, new_dict={}, dview=None, select_mode='All'):
"""Filters components based on given thresholds without re-computing
def filter_components(self, imgs, params, new_dict={}, dview=None, select_mode:str='All'):
"""
Filters components based on given thresholds without re-computing
the quality metrics. If the quality metrics are not present then it
calls self.evaluate components.
Args:
imgs: np.array (possibly memory mapped, t,x,y[,z])
Imaging data
params: params object
Parameters of the algorithm
new_dict: dict
New dictionary with parameters to be called. The dictionary's keys are
used to modify the params.quality subdictionary:
min_SNR: float
trace SNR threshold
SNR_lowest: float
minimum required trace SNR
rval_thr: float
space correlation threshold
rval_lowest: float
minimum required space correlation
use_cnn: bool
flag for using the CNN classifier
min_cnn_thr: float
CNN classifier threshold
cnn_lowest: float
minimum required CNN threshold
gSig_range: list
gSig scale values for CNN classifier
select_mode: str
select_mode:
Can be 'All' (no subselection is made, but quality filtering is performed),
'Accepted' (subselection of accepted components, a field named self.accepted_list must exist),
'Rejected' (subselection of rejected components, a field named self.rejected_list must exist),
'Unassigned' (both fields above need to exist)
new_dict: dict
New dictionary with parameters to be called. The dictionary
modifies the params.quality subdictionary in the following
entries:
min_SNR: float
trace SNR threshold
SNR_lowest: float
minimum required trace SNR
rval_thr: float
space correlation threshold
rval_lowest: float
minimum required space correlation
use_cnn: bool
flag for using the CNN classifier
min_cnn_thr: float
CNN classifier threshold
cnn_lowest: float
minimum required CNN threshold
gSig_range: list
gSig scale values for CNN classifier
Returns:
self: estimates object
self.idx_components: np.array
Expand Down Expand Up @@ -1407,9 +1399,10 @@ def threshold_spatial_components(self, maxthr=0.25, dview=None):

def remove_small_large_neurons(self, min_size_neuro, max_size_neuro,
select_comp=False):
''' remove neurons that are too large or too small
"""
Remove neurons that are too large or too small
Args:
Args:
min_size_neuro: int
min size in pixels
max_size_neuro: int
Expand All @@ -1421,7 +1414,9 @@ def remove_small_large_neurons(self, min_size_neuro, max_size_neuro,
Returns:
neurons_to_keep: np.array
indices of components with size within the acceptable range
'''
"""

if self.A_thr is None:
raise Exception('You need to compute thresholded components before calling remove_duplicates: use the threshold_components method')

Expand Down
9 changes: 5 additions & 4 deletions caiman/source_extraction/cnmf/initialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,10 +153,11 @@ def initialize_components(Y, K=30, gSig=[5, 5], gSiz=None, ssub=1, tsub=1, nIter
Initialize components. This function initializes the spatial footprints, temporal components,
and background which are then further refined by the CNMF iterations. There are four
different initialization methods depending on the data you're processing:
'greedy_roi': GreedyROI method used in standard 2p processing (default)
'corr_pnr': GreedyCorr method used for processing 1p data
'sparse_nmf': Sparse NMF method suitable for dendritic/axonal imaging
'graph_nmf': Graph NMF method also suitable for dendritic/axonal imaging
greedy_roi: GreedyROI method used in standard 2p processing (default)
corr_pnr: GreedyCorr method used for processing 1p data
sparse_nmf: Sparse NMF method suitable for dendritic/axonal imaging
graph_nmf: Graph NMF method also suitable for dendritic/axonal imaging
The GreedyROI method by default is not using the RollingGreedyROI method. This can
be changed through the binary flag 'rolling_sum'.
Expand Down
20 changes: 9 additions & 11 deletions caiman/utils/nn_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -487,35 +487,33 @@ def create_NL_model(Y=None, shape=(None, None, 1), n_channels=8, gSig=5, r_facto

def fit_NL_model(model_NL, Y, patience=5, val_split=0.2, batch_size=32,
epochs=500, schedule=None):
""" Fit either the linear or the non-linear model. The model is fit for a
"""
Fit either the linear or the non-linear model. The model is fit for a
use specified maximum number of epochs and early stopping is used based on the
validation loss. A Tensorboard compatible log is also created.
Args:
model_LN: Keras Ring-CNN model
see create_LN_model and create_NL_model above
patience: int, default: 5
patience value for early stopping criterion
val_split: float, default: 0.2
fraction of data to keep for validation (value between 0 and 1)
batch_size: int, default: 32
batch size during training
epochs: int, default: 500
maximum number of epochs
schedule: keras learning rate scheduler
Returns:
model_NL: Keras Ring-CNN model
model_NL:
Keras Ring-CNN model
trained model loaded with best weights according to validation loss
history_NL:
contains data related to the training history
path_to_model:
path to where the weights are stored
history_NL: contains data related to the training history
path_to_model: str
path to where the weights are stored.
"""
if Y.ndim < 4:
Y = np.expand_dims(Y, axis=-1)
Expand Down
6 changes: 2 additions & 4 deletions docs/source/Installation.rst
Original file line number Diff line number Diff line change
Expand Up @@ -123,11 +123,9 @@ A couple of things to note:
need to be removed. Use the Windows find-file utility
(under the Start Menu) to look for ``vs2019_compiler_vars.bat`` under
your home directory. If a copy shows up, delete the version that has
conda:raw-latex:`\envs`:raw-latex:`\caiman` as part of its location.
your ``caiman`` environment name as part of its location.
You may then continue the installation.

.. code:: bash
.. raw:: html

</details>
Expand Down Expand Up @@ -171,7 +169,7 @@ and install the package file you will find in the folder that pops up


Section 2: Set up demos with caimanmanager
-----------------------
------------------------------------------

Once Caiman is installed, you will likely want to set up a working directory with code samples and datasets.
The installation step in Section 1 produced a command ``caimanmanager`` that handles this. caimanmanager will
Expand Down
18 changes: 2 additions & 16 deletions docs/source/Overview.rst
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ CaImAn implements a set of essential methods required in the analysis pipeline o
<CaImAn_features_and_references.html>`_.

Companion paper
--------------
---------------

A paper explaining most of the implementation details and benchmarking can be found `here
<https://elifesciences.org/articles/38173>`_.
Expand All @@ -28,22 +28,8 @@ A paper explaining most of the implementation details and benchmarking can be fo
publisher={eLife Sciences Publications Limited}
}


Developers/Contributors
------------

CaImAn is being developed at the `Flatiron Institute <https://www.simonsfoundation.org/flatiron/>`_ with numerous contributions from the broader community. The main developers are

* Eftychios A. Pnevmatikakis, Flatiron Institute
* Andrea Giovannucci, University of North Carolina at Chapel Hill, previously at Flatiron Institute
* Johannes Friedrich, Flatiron Institute
* Pat Gunn, Flatiron Institute

A complete list of contributors can be found `here <https://github.com/flatironinstitute/CaImAn/graphs/contributors>`_.


Questions, comments, issues
-----------------------------
---------------------------

Please use our `gitter chat room <https://gitter.im/agiovann/Constrained_NMF>`_ for questions and comments and create an issue on our `repo page <https://github.com/flatironinstitute/CaImAn>`_ for any bugs you might encounter.

Expand Down
2 changes: 1 addition & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
language = 'en'

# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
Expand Down
8 changes: 2 additions & 6 deletions docs/source/core_functions.rst
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,8 @@ Motion Correction
.. automethod:: MotionCorrect.motion_correct_rigid
.. automethod:: MotionCorrect.motion_correct_pwrigid
.. automethod:: MotionCorrect.apply_shifts_movie
.. automethod:: motion_correct_oneP_rigid
.. automethod:: motion_correct_oneP_nonrigid
.. autofunction:: motion_correct_oneP_rigid
.. autofunction:: motion_correct_oneP_nonrigid


Estimates
Expand Down Expand Up @@ -259,8 +259,4 @@ VolPy

.. currentmodule:: caiman.source_extraction.volpy.spikepursuit
.. autofunction:: volspike
.. autofunction:: denoiseSpikes
.. autofunction:: getThresh
.. autofunction:: whitenedMathcedFilter
.. autofunction:: highpassVideo

1 change: 0 additions & 1 deletion docs/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ Contents:

Overview
Installation
news
Getting_Started
Handling_Movies
CaImAn_Tips
Expand Down

0 comments on commit 30aafba

Please sign in to comment.