diff --git a/README.md b/README.md
index 1401b0e..de8d521 100644
--- a/README.md
+++ b/README.md
@@ -14,12 +14,6 @@ architectures for future work and research.
Join our community to create datasets and deep-learning models! Chat with us on [Gitter](https://gitter.im/EchoTorch/Lobby) and join the [Google Group](https://groups.google.com/forum/#!forum/echotorch/) to collaborate with us.
-
-
-
-
-
-
## Development status
![PyPI - Python Version](https://img.shields.io/pypi/pyversions/echotorch.svg?style=flat-square)
diff --git a/docs/source/conf.py b/docs/source/conf.py
new file mode 100644
index 0000000..39d493e
--- /dev/null
+++ b/docs/source/conf.py
@@ -0,0 +1,164 @@
+# -*- coding: utf-8 -*-
+#
+# EchoTorch documentation build configuration file, created by
+# sphinx-quickstart on Thu Apr 6 11:30:46 2017.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+import os
+import sys
+import echotorch
+#import sphinx_bootstrap_theme
+sys.path.insert(0, os.path.abspath('../../echotorch'))
+
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = ['sphinx.ext.autodoc',
+ 'sphinx.ext.todo',
+ 'sphinx.ext.coverage',
+ 'sphinx.ext.mathjax',
+ 'sphinx.ext.githubpages']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = 'EchoTorch'
+copyright = '2017, Nils Schaetti'
+author = 'Nils Schaetti'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '0.1'
+# The full version, including alpha/beta/rc tags.
+release = '0.1'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This patterns also effect to html_static_path and html_extra_path
+exclude_patterns = []
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = True
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'sphinx_rtd_theme'
+#html_theme = 'bootstrap'
+#html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#
+# html_theme_options = {}
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+
+# -- Options for HTMLHelp output ------------------------------------------
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'EchoTorchdoc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ #
+ # 'papersize': 'letterpaper',
+
+ # The font size ('10pt', '11pt' or '12pt').
+ #
+ # 'pointsize': '10pt',
+
+ # Additional stuff for the LaTeX preamble.
+ #
+ # 'preamble': '',
+
+ # Latex figure (float) alignment
+ #
+ # 'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, 'EchoTorch.tex', 'EchoTorch Documentation',
+ 'Nils Schaetti', 'manual'),
+]
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (master_doc, 'echotorch', 'EchoTorch Documentation',
+ [author], 1)
+]
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (master_doc, 'EchoTorch', 'EchoTorch Documentation',
+ author, 'EchoTorch', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+
+
diff --git a/docs/source/echotorch.datasets.rst b/docs/source/echotorch.datasets.rst
new file mode 100644
index 0000000..ec4877e
--- /dev/null
+++ b/docs/source/echotorch.datasets.rst
@@ -0,0 +1,38 @@
+echotorch\.datasets package
+===========================
+
+Submodules
+----------
+
+echotorch\.datasets\.MackeyGlassDataset module
+----------------------------------------------
+
+.. automodule:: echotorch.datasets.MackeyGlassDataset
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+echotorch\.datasets\.MemTestDataset module
+------------------------------------------
+
+.. automodule:: echotorch.datasets.MemTestDataset
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+echotorch\.datasets\.NARMADataset module
+----------------------------------------
+
+.. automodule:: echotorch.datasets.NARMADataset
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: echotorch.datasets
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/echotorch.nn.rst b/docs/source/echotorch.nn.rst
new file mode 100644
index 0000000..6be9d85
--- /dev/null
+++ b/docs/source/echotorch.nn.rst
@@ -0,0 +1,32 @@
+echotorch.nn
+============
+
+.. automodule:: torch.nn
+.. currentmodule:: torch.nn
+
+Echo State Layers
+-----------------
+
+ESNCell
+~~~~~~~
+
+.. autoclass:: nn.ESNCell
+ :members:
+
+ESN
+~~~
+
+.. autoclass:: nn.ESN
+ :members:
+
+LiESNCell
+~~~~~~~~~
+
+.. autoclass:: nn.LiESNCell
+ :members:
+
+LiESN
+~~~~~
+
+.. autoclass:: nn.LiESN
+ :members:
diff --git a/docs/source/echotorch.rst b/docs/source/echotorch.rst
new file mode 100644
index 0000000..aaed1d2
--- /dev/null
+++ b/docs/source/echotorch.rst
@@ -0,0 +1,19 @@
+echotorch package
+=================
+
+Subpackages
+-----------
+
+.. toctree::
+
+ echotorch.datasets
+ echotorch.nn
+ echotorch.utils
+
+Module contents
+---------------
+
+.. automodule:: echotorch
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/echotorch.utils.rst b/docs/source/echotorch.utils.rst
new file mode 100644
index 0000000..b41a8e1
--- /dev/null
+++ b/docs/source/echotorch.utils.rst
@@ -0,0 +1,30 @@
+echotorch\.tools package
+========================
+
+Submodules
+----------
+
+echotorch\.utils\.error_measures module
+---------------------------------
+
+.. automodule:: echotorch.utils.error_measures
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+echotorch\.utils\.utility\_functions module
+-------------------------------------------
+
+.. automodule:: echotorch.utils.utility_functions
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: echotorch.utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/index.rst b/docs/source/index.rst
new file mode 100644
index 0000000..36bd1b2
--- /dev/null
+++ b/docs/source/index.rst
@@ -0,0 +1,32 @@
+.. EchoTorch documentation master file, created by
+ sphinx-quickstart on Thu Apr 6 11:30:46 2017.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+EchoTorch documentation
+=======================
+
+EchoTorch is an pyTorch-based library for Reservoir Computing and Echo State Network using GPUs and CPUs.
+
+.. toctree::
+ :glob:
+ :maxdepth: 1
+ :caption: Notes
+
+ notes/*
+
+.. toctree::
+ :maxdepth: 1
+ :caption: Package Reference
+
+ echotorch
+ echotorch.datasets
+ echotorch.nn
+ echotorch.utils
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
\ No newline at end of file
diff --git a/docs/source/modules.rst b/docs/source/modules.rst
new file mode 100644
index 0000000..96cbe13
--- /dev/null
+++ b/docs/source/modules.rst
@@ -0,0 +1,7 @@
+echotorch
+=========
+
+.. toctree::
+ :maxdepth: 4
+
+ echotorch
diff --git a/docs/source/notes/esn_learning.rst b/docs/source/notes/esn_learning.rst
new file mode 100644
index 0000000..06add54
--- /dev/null
+++ b/docs/source/notes/esn_learning.rst
@@ -0,0 +1,19 @@
+Echo State Network learning mechanics
+=====================================
+
+This note will present an overview of how Echo State Networks works works
+and its learning mechanics. It's not mandatory to understand the complete
+learning phase, but we recommend understanding the differnce between
+classical ESN learning and gradient descent, it will help you to choose
+which one to use according to cases.
+
+.. _esn_model:
+
+The Echo State Network model
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. _esn_learning:
+
+``esn_learning``
+~~~~~~~~~~~~~~~~
+
diff --git a/echotorch/__init__.py b/echotorch/__init__.py
index 13b9409..d7f89c9 100644
--- a/echotorch/__init__.py
+++ b/echotorch/__init__.py
@@ -19,100 +19,14 @@
#
# Copyright Nils Schaetti, University of Neuchâtel
-
# Imports
-import pkg_resources
-from pkg_resources import parse_version
-
-# BaseTensors
-from .base_tensors import BaseTensor, CharBaseTensor, DoubleBaseTensor, ByteBaseTensor, FloatBaseTensor
-from .base_tensors import BFloat16Tensor, HalfBaseTensor
-
-# DataTensors
-from .data_tensors import DataTensor, DataIndexer
-
-# TimeTensors
-from .timetensors import TimeTensor, CharTimeTensor, DoubleTimeTensor, ByteTimeTensor, FloatTimeTensor
-from .timetensors import BFloat16Tensor, HalfTimeTensor
-
-# Base operations
-from .base_ops import timetensor, sparse_coo_timetensor, as_timetensor, as_strided, from_numpy, zeros, zeros_like
-from .base_ops import ones, ones_like, arange, linspace, logspace, empty, empty_like, empty_strided, full, full_like
-from .base_ops import quantize_per_timetensor, quantize_per_channel, dequantize, complex, polar
-from .base_ops import cat, tcat, rand
-from .base_ops import tindex_select, randn
-
-# Stat operations
-from .stat_ops import tmean, tstd, tvar, cov, cor
-
-# ACC operations
-from . import acf
-
-# Timeseries operations
-from .series_ops import diff
-
-# Nodes
-from .nodes import Node
-
-
-# Min Torch version
-MIN_TORCH_VERSION = '1.9.0'
-
-
-# Try import torch
-try:
- # pylint: disable=wrong-import-position
- import torch
-except ModuleNotFoundError:
- raise ModuleNotFoundError(
- "No module named 'torch', and echotorch depends on PyTorch "
- "(aka 'torch'). "
- "Visit https://pytorch.org/ for installation instructions."
- )
-# end try
-
-# Get Torch version
-torch_version = pkg_resources.get_distribution('torch').version
-
-# Torch version is too old
-if parse_version(torch_version) < parse_version(MIN_TORCH_VERSION):
- # Message
- msg = (
- 'echotorch depends on a newer version of PyTorch (at least {req}, not '
- '{installed}). Visit https://pytorch.org for installation details'
- )
-
- # Import warning
- raise ImportWarning(msg.format(req=MIN_TORCH_VERSION, installed=torch_version))
-# end if
+from . import datasets
+from . import evaluation
+from . import models
+from . import nn
+from . import transforms
+from . import utils
# All echotorch's modules
-__all__ = [
- # 'esn', 'datasets', 'evaluation', 'models', 'nn', 'transforms', 'utils', 'fit', 'eval',
- # 'cross_val_score', 'copytask', 'discrete_markov_chain', 'csv_file', 'henon',
- # 'delaytask', 'cross_eval', 'segment_series', 'cycle_with_jumps', 'matlab', 'normal', 'uniform',
- # 'cycle_with_jumps_generator', 'matlab_generator', 'normal_generator', 'uniform_generator', 'conceptor', 'cone',
- # 'czero', 'cidentity', 'OR', 'AND', 'NOT', 'PHI', 'conceptor_set', 'csim', 'csimilarity', 'autocorrelation_coefs',
- # 'cov', 'autocorrelation_function', 'acc',
- # Submodels
- # 'data', 'models', 'nn', 'skecho', 'transforms', 'utils', 'viz',
- # BaseTensors
- 'BaseTensor',
- 'ByteBaseTensor', 'CharBaseTensor', 'HalfBaseTensor', 'DoubleBaseTensor', 'FloatBaseTensor',
- # DataTensors
- 'DataTensor', 'DataIndexer',
- # TimeTensors and base ops
- 'TimeTensor', 'cat', 'tcat', 'tcat', 'tindex_select', 'rand', 'randn',
- 'ByteTimeTensor', 'CharTimeTensor', 'HalfTimeTensor', 'DoubleTimeTensor', 'FloatTimeTensor',
- # Creation ops
- 'timetensor', 'sparse_coo_timetensor', 'as_timetensor', 'as_strided', 'from_numpy', 'zeros', 'zeros_like',
- 'ones', 'ones_like', 'arange', 'linspace', 'logspace', 'empty', 'empty_like', 'empty_strided', 'full', 'full_like',
- 'quantize_per_timetensor', 'quantize_per_channel', 'dequantize', 'complex', 'polar',
- # Stats ops
- 'tmean', 'tstd', 'cov', 'cor',
- # Series ops
- 'diff',
- # ACC ops
- 'acf',
-]
+__all__ = ['datasets', 'evaluation', 'models', 'nn', 'transforms', 'utils']
diff --git a/echotorch/acf.py b/echotorch/acf.py
deleted file mode 100644
index c8bbae8..0000000
--- a/echotorch/acf.py
+++ /dev/null
@@ -1,520 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/acf.py
-# Description : Auto-covariance/correlation coefficients operations on (Time/Data/*)Tensor-
-# Date : 24th of August, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-# Imports
-from typing import Dict, List, Callable, Optional, Tuple
-import matplotlib.pyplot as plt
-import echotorch.viz
-
-# Local imports
-from .timetensors import TimeTensor
-from .stat_ops import cov
-from .base_ops import zeros
-
-
-# Autocovariance coefficients for a time series (timetensor)
-def autocovariance_coeffs(
- input: TimeTensor,
- k: int
-) -> TimeTensor:
- r"""Returns the auto-covariance coefficients of a time series as a timetensor.
-
- :param input: input 0-D :math:`(T)` or 1-D :math:`(T, p)` time series.
- :type input: ``TimeTensor``
- :param k: number of different lags.
- :type k: ``int``
- :return: The auto-covariance coefficients as timetensor of size :math:`(k, p)` or :math:`(k)`.
-
- Example:
-
- >>> x = echotorch.rand(5, time_length=100)
- >>> autocov_coefs = echotorch.autocovariance_coeffs(x, k=50)
- >>> plt.figure()
- >>> echotorch.viz.timeplot(autocov_coeffs, title="Auto-covariance coefficients")
- >>> plt.show()
- """
- # Check k
- assert k > 1, "The number of lags must be greated than 1 (here {})".format(k)
-
- # Time dim must be first
- assert input.time_dim == 0, "Time dimension must be the first dimension of " \
- "the timetensor (here {})".format(input.time_dim)
-
- # 0-D of 1-D time series
- assert input.cdim in [0, 1], "Expected 0-D or 1-D time series, found {}-D".format(input.cdim)
-
- # Difference with time length
- com_time_length = input.tlen - k
-
- # The time length for comparison must
- # be superior (or equal) to the number of lags required
- assert com_time_length >= k, "Time length for comparison must be superior (or equal) to the number of lags " \
- "required (series of length {}, {} lags, " \
- "comparison length of {})".format(input.tlen, k, com_time_length)
-
- # Compute auto-covariance coefficients
- def compute_autocov_coeffs(x, lags):
- # Coeffs
- coeffs = zeros(length=lags + 1)
-
- # Variance
- coeffs[0] = cov(x[:com_time_length], x[:com_time_length])
-
- # For each lag k
- for lag_i in range(1, lags + 1):
- coeffs[lag_i] = cov(
- x[:com_time_length],
- x[lag_i:lag_i + com_time_length]
- )
- # end for
-
- return coeffs
- # end compute_autocov_coeffs
-
- # Compute coeffs for each channel
- if input.cdim == 0:
- return compute_autocov_coeffs(input, k)
- else:
- # Store coefs
- autocov_coefs = zeros(input.csize()[0], length=k + 1)
-
- # Compute coeffs for each channel
- for chan_i in range(input.csize()[0]):
- autocov_coefs[:, chan_i] = compute_autocov_coeffs(input[:, chan_i], k)
- # end for
-
- return autocov_coefs
- # end if
-# end autocovariance_coeffs
-
-
-# Auto-correlation coefficients for a time series (timetensor)
-def acf(
- input: TimeTensor,
- k: int,
- coeffs_type: str = "covariance"
-) -> TimeTensor:
- r"""Returns auto-correlation coefficients for a time series as a :class:`TimeTensor`.
-
- :param input: input 0-D :math:`(T)` or 1-D :math:`(T, p)` time series.
- :type input: ``TimeTensor``
- :param k: number of different lags.
- :type k: ``int``
- :return: The auto-covariance coefficients as timetensor of size :math:`(k, p)` or :math:`(k)`.
- :param coeffs_type: Type of coefficient, "covariance" or "correlation".
- :type coeffs_type: ``str``
-
- Example:
-
- >>> x = echotorch.rand(5, time_length=100)
- >>> autocor_coefs = echotorch.autocorrelation_coeffs(x, k=50)
- >>> plt.figure()
- >>> echotorch.viz.timeplot(autocor_coefs, title="Auto-correlation coefficients")
- >>> plt.show()
- """
- # Check type
- assert coeffs_type in ["covariance", "correlation"], "Unknown type of coefficients given, should be 'covariance' " \
- "or 'correlation' ({} given)".format(coeffs_type)
-
- # Compute auto-covariance coefficients
- autocov_coeffs = autocovariance_coeffs(input, k)
-
- # Covariance
- if coeffs_type == "covariance":
- return autocov_coeffs
- elif coeffs_type == "correlation":
- # Normalize
- if autocov_coeffs.cdim == 0:
- return autocov_coeffs / autocov_coeffs[0]
- else:
- # For each channel
- for chan_i in range(autocov_coeffs.csize()[0]):
- autocov_coeffs[:, chan_i] /= autocov_coeffs[0, chan_i]
- # end for
-
- return autocov_coeffs
- # end if
- # end if
-# end acf
-
-
-# Cross-autocovariance Coefficients
-def ccf(
- x: TimeTensor,
- y: TimeTensor,
- k: int,
- coeffs_type: str = "covariance"
-) -> TimeTensor:
- r"""Returns cross auto-correlation coefficients (CCF) for two 0-D timeseries as a :class:`TimeTensor`.
-
- :param x:
- :type x:
- :param y:
- :type y:
- :param k:
- :type k:
- :param coeffs_type:
- :type coeffs_type:
-
- Example:
-
- >>>
- """
- # Check k
- assert k > 1, "The number of lags must be greated than 1 (here {})".format(k)
-
- # Check type
- assert coeffs_type in ["covariance", "correlation"], "Unknown type of coefficients given, should be 'covariance' " \
- "or 'correlation' ({} given)".format(coeffs_type)
-
- # Check time series lengths
- assert x.tlen == y.tlen, "Expected two timeseries with same length (here {} != {})".format(x.tlen, y.tlen)
-
- # Same dim same size
- assert x.time_dim == y.time_dim, ""
- assert x.cdim == y.cdim, ""
- assert x.bdim == y.bdim, ""
-
- # Difference with time length
- com_time_length = x.tlen - k
-
- # The time length for comparison must
- # be superior (or equal) to the number of lags required
- assert com_time_length >= k, "Time length for comparison must be superior (or equal) to the number of lags " \
- "required (series of length {}, {} lags, " \
- "comparison length of {})".format(x.tlen, k, com_time_length)
-
- # Compute auto-covariance coefficients
- def compute_cross_autocov_coeffs(x, y, lags):
- # Coeffs
- coeffs = zeros(length=lags + 1)
-
- # Covariance
- coeffs[0] = cov(x[:com_time_length], y[:com_time_length])
-
- # For each lag k
- for lag_i in range(1, lags + 1):
- coeffs[lag_i] = cov(
- x[:com_time_length],
- y[lag_i:lag_i + com_time_length]
- )
- # end for
-
- return coeffs
- # end compute_autocov_coeffs
-
- # Compute auto-covariance coefficients
- autocov_coeffs = compute_cross_autocov_coeffs(x, y, k)
-
- # Covariance
- if coeffs_type == "covariance":
- return autocov_coeffs
- elif coeffs_type == "correlation":
- return autocov_coeffs / autocov_coeffs[0]
- # end if
-# end ccf
-
-
-# Cross-correlation
-
-
-# Plot auto-covariance/correlation coefficients
-def acfplot(
- input: TimeTensor,
- k: int,
- coeffs_type: str = "covariance",
- labels: List[str] = None,
- figure_params: Dict = None,
- plot_params: Dict = None
-) -> None:
- r"""Plot auto-covariance or auto-correlation coefficients for a :class:`TimeTensor`.
-
- :param input: the input timetensor.
- :type input: :class:`TimeTensor`
- :param k: the number of lags.
- :type k: ``int``
- :param coeffs_type:
- :type coeffs_type:
- :param labels:
- :type labels:
- :param figure_params:
- :type figure_params:
- :param plot_params:
- :type plot_params:
-
- Example:
-
- >>> ...
- """
- # Only 0-D or 1-D timeseries
- assert input.cdim in [0, 1], "Expected 0-D or 1-D timeseries but {}-D given".format(input.cdim)
-
- # Compute coefficients
- acf_coeffs = acf(input, k, coeffs_type)
-
- # Labels
- if labels is None:
- labels = ["Series {}".format(i) for i in range(1, max(1, input.numelc() + 1))]
- else:
- assert len(labels) == input.numelc(), "The number of labels should be equal to the number of channels " \
- "(here {} label given, " \
- "{} channels)".format(len(labels), input.numelc())
- # end if
-
- # Init. params
- figure_params = {} if figure_params is None else figure_params
- plot_params = {} if plot_params is None else plot_params
-
- # Figure
- plt.figure(**figure_params)
-
- # Plot
- if input.cdim == 0:
- echotorch.viz.timeplot(acf_coeffs, label=labels[0], **plot_params)
- else:
- # For each channel
- for chan_i in range(input.numelc()):
- echotorch.viz.timeplot(acf_coeffs, label=labels[chan_i], **plot_params)
- # end for
- # end if
-
- # Show
- plt.show()
-# end acfplot
-
-
-# Alias for acfplot
-def correlogram(
- input: TimeTensor,
- k: int,
- coeffs_type: str = "covariance",
- labels: List[str] = None,
- figure_params: Dict = None,
- plot_params: Dict = None
-) -> None:
- r"""Alias for :func:`acfplot`
- """
- acfplot(input, k, coeffs_type, labels, figure_params, plot_params)
-# end correlogram
-
-
-# Plot cross auto-correlation coefficients
-def ccfplot(
- x: TimeTensor,
- y: TimeTensor,
- k: int,
- coeffs_type: str = "covariance",
- labels: List[str] = None,
- figure_params: Dict = None,
- plot_params: Dict = None,
-) -> None:
- r"""Plot cross auto-covariance or cross auto-correlation coefficients of two time series for a :class:`TimeTensor`.
-
- :param x: the input timetensor.
- :type x: :class:`TimeTensor`
- :param y: the input timetensor.
- :type y: :class:`TimeTensor`
- :param k: the number of lags.
- :type k: ``int``
- :param coeffs_type:
- :type coeffs_type:
- :param labels:
- :type labels:
- :param figure_params:
- :type figure_params:
- :param plot_params:
- :type plot_params:
-
- Example:
-
- >>> ...
- """
- # Only 0-D or 1-D timeseries
- assert x.cdim in [0, 1], "Expected 0-D or 1-D timeseries but {}-D given".format(x.cdim)
- assert y.cdim in [0, 1], "Expected 0-D or 1-D timeseries but {}-D given".format(y.cdim)
-
- # Compute coefficients
- ccf_coeffs = ccf(x, y, k, coeffs_type)
-
- # Labels
- if labels is None:
- labels = ["Series {}".format(i) for i in range(1, max(1, x.numelc() + 1))]
- else:
- assert len(labels) == x.numelc(), "The number of labels should be equal to the number of channels " \
- "(here {} label given, " \
- "{} channels)".format(len(labels), x.numelc())
- # end if
-
- # Init. params
- figure_params = {} if figure_params is None else figure_params
- plot_params = {} if plot_params is None else plot_params
-
- # Figure
- plt.figure(**figure_params)
-
- # Plot
- if x.cdim == 0:
- echotorch.viz.timeplot(ccf_coeffs, label=labels[0], **plot_params)
- else:
- # For each channel
- for chan_i in range(x.numelc()):
- echotorch.viz.timeplot(ccf_coeffs, label=labels[chan_i], **plot_params)
- # end for
- # end if
-
- # Show
- plt.show()
-# end ccfplot
-
-
-# Alias for acfplot
-def cross_correlogram(
- x: TimeTensor,
- y: TimeTensor,
- k: int,
- coeffs_type: str = "covariance",
- labels: List[str] = None,
- figure_params: Dict = None,
- plot_params: Dict = None,
-) -> None:
- r"""Alias for :func:`ccfplot`
- """
- ccfplot(x, y, k, coeffs_type, labels, figure_params, plot_params)
-# end cross_correlogram
-
-
-# Array of cross-autocorrelation
-def ccfpairs(
- x: TimeTensor,
- k: int,
- coeffs_type: str = "covariance",
- labels: List[str] = None,
- figsize: Optional[Tuple[int, int]] = None,
- tight_layout: Optional[Dict] = None,
- plot_pvalues: Optional[bool] = True
-) -> None:
- r"""Plot an array of plots with cross-autocorrelation for each pair of channel.
-
- :param x: the input timetensor.
- :type x: :class:`TimeTensor`
- :param k: the number of lags.
- :type k: ``int``
- :param coeffs_type:
- :type coeffs_type:
- :param labels:
- :type labels:
- :param figure_params:
- :type figure_params:
- :param plot_params:
- :type plot_params:
-
- Example:
-
- >>> ...
- """
- # Only 1D timeseries
- assert x.cdim == 1, "Expected 1-D timeseries but {}-D given".format(x.cdim)
-
- # Number of channels
- nc = x.numelc()
-
- # Labels
- if labels is None:
- labels = [str(i) for i in range(nc)]
- # end if
-
- # Figure
- fig, axs = plt.subplots(nc, nc, figsize=figsize)
-
- # For pair of channel
- for chan_i in range(nc):
- for chan_j in range(nc):
- # No x-axis label
- if chan_i != nc - 1:
- axs[chan_i, chan_j].get_xaxis().set_visible(False)
- else:
- axs[chan_i, chan_j].get_xaxis().set_visible(True)
- # end if
-
- # Y ticks only on the left
- if chan_j != 0:
- # Enable ticks
- axs[chan_i, chan_j].get_yaxis().set_visible(False)
- # end if
-
- # Different channel
- if chan_i != chan_j:
- # Compute cross auto-covariance
- ccf_coeffs = ccf(x[:, chan_i], x[:, chan_j], k=k, coeffs_type=coeffs_type)
-
- # Plot
- echotorch.viz.timeplot(
- ccf_coeffs,
- marker=".",
- axis=axs[chan_i, chan_j]
- )
-
- # Plot text
- if plot_pvalues:
- # Coef background color
- # back_color = 'white' if pvs[i, j] >= sign_level else 'green'
-
- # Show correlation coefficient
- axs[chan_i, chan_j].text(
- 0.04,
- 0.07,
- "0.54",
- fontsize=10,
- verticalalignment='bottom',
- horizontalalignment='left',
- bbox=dict(boxstyle='square', facecolor='green', alpha=0.75),
- transform=axs[chan_i, chan_j].transAxes
- )
- # end if
- else:
- # Compute auto-covariance
- acf_coeffs = acf(x[:, chan_i], k=k, coeffs_type=coeffs_type)
-
- # Show titles
- axs[chan_i, chan_j].set_title("{}".format(labels[chan_i]))
-
- # Plot
- echotorch.viz.timeplot(
- acf_coeffs,
- title=labels[chan_i],
- marker=".",
- axis=axs[chan_i, chan_j]
- )
- # end if
- # end for
- # end for
-
- # Tight layout
- if tight_layout is not None:
- fig.tight_layout(*tight_layout)
- else:
- fig.tight_layout()
- # end if
-
- # Show
- plt.show()
-# end ccfpairs
-
diff --git a/echotorch/base_ops.py b/echotorch/base_ops.py
deleted file mode 100644
index 9c03010..0000000
--- a/echotorch/base_ops.py
+++ /dev/null
@@ -1,1523 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/timetensors/timetensor_creation_ops.py
-# Description : TimeTensor creation helper functions
-# Date : 27th of July, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti, University of Neuchâtel
-# University of Geneva
-
-
-# Imports
-from typing import Optional, Tuple, Union, Any
-import numpy as np
-import torch
-
-# Import local
-from echotorch import TimeTensor
-
-
-# region CREATION_OPS
-
-
-# Constructs a timetensor with data.
-def timetensor(
- data: Any,
- time_dim: Optional[int] = 0,
- dtype: Optional[torch.dtype] = None,
- device: Optional[torch.device] = None,
- requires_grad: Optional[bool] = False,
- pin_memory: Optional[bool] = False
-) -> 'TimeTensor':
- r"""Construct a :class:`TimeTensor` with given data as tensor, timetensor, list, etc.
-
- .. warning::
- Similarly to ``torch.tensor()``, :func:`echotorch.timetensor()` copies the data. For more information on how
- to avoid copy, check the `PyTorch documentation `__.
-
- .. warning::
- Like ``torch.tensor()``, :func:`echotorch.timetensor()` reads out data and construct a leaf variable. Check
- the `PyTorch documentation on torch.tensor() `__ for more information.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``tensor()`` for more informations.
-
- :param data: data for the wrapped :class:`torch.Tensor` as a tensor, timetensor, list or Numpy array.
- :type data: array_like
- :param time_dim: the index of the time dimension (default: 0).
- :type time_dim: ``int``, optional
- :param dtype: the desired data type of the wrapped tensor (default: None, infered from ``data``).
- :type dtype: :class:`torch.dtype`, optional
- :param device: the estination device of the wrapped tensor (default: None, current device, see ``torch.set_default_tensor_type()``).
- :type device: :class:`torch.device`, optional
- :param requires_grad: Should operations been recorded by autograd for this timetensor?
- :type requires_grad: `bool`, optional
- :param pin_memory: If set, returned timetensor would be allocated in the pinned memory. Works only for CPU timetensors (default: ``False``)
- :type pin_memory: `bool`, optional
-
- Example:
-
- >>> echotorch.timetensor([1, 2, 3, 4], device='cuda:0')
- timetensor([1, 2, 3, 4], device='cuda:0', time_dim: 0)
- """
- # Data
- if isinstance(data, torch.Tensor):
- src_data = data.clone().detach().requires_grad_(requires_grad)
- elif isinstance(data, TimeTensor):
- src_data = data.tensor.clone().detach().requires_grad_(requires_grad)
- else:
- src_data = torch.tensor(
- data,
- dtype=dtype,
- device=device,
- requires_grad=requires_grad,
- pin_memory=pin_memory
- )
- # end if
-
- # Set parameters
- src_data = src_data.to(dtype=dtype, device=device)
- if pin_memory: src_data = src_data.pin_memory()
-
- # Create timetensor
- return TimeTensor.new_timetensor(
- src_data,
- time_dim=time_dim
- )
-# end timetensor
-
-
-# Convert data into a TimeTensor
-def as_timetensor(
- data: Any,
- time_dim: Optional[int] = 0,
- dtype: Optional[torch.dtype] = None,
- device: Optional[torch.device] = None
-) -> 'TimeTensor':
- r"""Convert data into a :class:`TimeTensor`. If a :class:`torch.Tensor` or a :class:`TimeTensor` is given as data,
- no copy will be made, otherwise a new :class:`torch.Tensor` will be wrapped with computational graph retained if
- the tensor has ``requires_grad`` set to ``True``. If the data comes frome Numpy (:class:`ndarray`) with the same
- *dtype* and is on the cpu, the no copy will be made. This behavior is similar to :func:`torch.as_tensor()`. See
- the `PyTorch documentation `__ for more information.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``as_tensor()`` for more informations.
-
- :param data: data to convert for the wrapper tensor as :class:`TimeTensor`, Tensor, List, scalar or Numpy array.
- :type data: array-like
- :param time_dim: the index of the time dimension.
- :type time_dim: ``int``, optional
- :param dtype: the desired data type of the wrapped tensor (default: None, infered from ``data``).
- :type dtype: :class:`torch.dtype`, optional
- :param device: the estination device of the wrapped tensor (default: None, current device, see ``torch.set_default_tensor_type()``).
- :type device: :class:`torch.device`, optional
-
- Example:
-
- >>> x = echotorch.as_timetensor([[0], [1], [2]], time_dim=0)
- >>> x
- timetensor([[0],
- [1],
- [2]], time_dim: 0)
- >>> x.csize()
- torch.Size([1])
- >>> x.bsize()
- torch.Size([])
- >>> x.tlen
- 3
- """
- return TimeTensor.new_timetensor(
- torch.as_tensor(
- data,
- dtype=dtype,
- device=device
- ),
- time_dim=time_dim
- )
-# end as_timetensor
-
-
-# Sparse COO timetensor
-def sparse_coo_timetensor(
- indices,
- values,
- time_dim: Optional[int] = 0,
- size=None,
- dtype=None,
- device=None,
- requires_grad=False
-) -> TimeTensor:
- r"""Construct a :class:`TimeTensor` with a wrapped `sparse *tensor* in COO(rdinate) format `__ with specified values at the given indices.
-
- .. note::
- The contained tensor is an uncoalesced tensor.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``sparse_coo_tensor()`` for more informations.
-
- :param indices: the data indices for the wrapped tensor as a list, tuple, Numpy ``ndarray``, scalar, etc. Indices will casted to ``torch.LongTensor``. Indices are the coordinates of data inside the matrix.
- :type indices: array_like
- :param values: the data values for the wrapped tensor as a list, tuple, Numpy ``ndarray``, scalar, etc.
- :type values: array_like
- :param time_dim: the index of the time dimension.
- :type time_dim: ``int``, optional
- :param size: size of the timetensor, if not give, size will be deduce from *indices*.
- :type size: list, tuple, or ``torch.Size``, optional
- :param dtype: the desired data type of the wrapped tensor (default: None, infered from ``data``).
- :type dtype: :class:`torch.dtype`, optional
- :param device: the estination device of the wrapped tensor (default: None, current device, see ``torch.set_default_tensor_type()``).
- :type device: :class:`torch.device`, optional
- :param requires_grad: Should operations been recorded by autograd for this timetensor?
- :type requires_grad: `bool`, optional
-
- Example:
-
- >>> echotorch.sparse_coo_timetensor(indices=torch.tensor([[0, 1, 1], [2, 0, 2]]), values=torch.tensor([3, 4, 5], dtype=torch.float32), size=[2, 4])
- timetensor(indices=tensor([[0, 1, 1],
- [2, 0, 2]]),
- values=tensor([3., 4., 5.]),
- size=(2, 4), nnz=3, layout=torch.sparse_coo, time_dim: 0)
- """
- # Create sparse tensor
- coo_tensor = torch.sparse_coo_tensor(
- indices=indices,
- values=values,
- size=size,
- dtype=dtype,
- device=device,
- requires_grad=requires_grad
- )
-
- # Create TimeTensor
- return TimeTensor.new_timetensor(
- data=coo_tensor,
- time_dim=time_dim
- )
-# end sparse_coo_timetensor
-
-
-# As strided
-def as_strided(
- input,
- size,
- stride,
- length: int,
- time_stride: int,
- batch_size: Optional[Tuple[int]] = None,
- batch_stride: Optional[Tuple[int]] = None,
- storage_offset=0
-) -> TimeTensor:
- r"""Create a view of an existing :class:`TimeTensor` with specified ``size``, ``stride`` and ``storage_offset``.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``as_strided()`` for more informations.
-
- :param input: the input timetensor.
- :type input: :class:`TimeTensor`
- :param size: shape of the channel dimensions in the timeseries.
- :type size: ``tuple`` of ``int``
- :param stride: stride of the channels dimensions in the timeseries.
- :type stride: ``tuple`` or ints
- :param length: time length of the timeseries.
- :type length: ``int``
- :param time_stride: stride of the time dimension.
- :type time_stride: ``int``
- :param batch_size: shape of the batch dimensions of the timeseries, can be ``None`` if no batch dimensions are requiered.
- :type batch_size: ``tuple`` or ints, optional
- :param batch_stride: stride of the batch dimensions.
- :type batch_stride: ``tuple`` or ints, optional
- :param storage_offset: the offset of the underlying storage of the output *timetensor*.
- :type storage_offset: ``int``, optional
-
- Example:
-
- >>> ...
- """
- return TimeTensor.new_timetensor(
- torch.as_strided(
- input.tensor,
- list(batch_size) + [length] + list(size),
- list(batch_stride) + [time_stride] + list(stride),
- storage_offset
- ),
- time_dim=len(batch_size) if batch_size is not None else 0
- )
-# end as_strided
-
-
-# From Numpy
-def from_numpy(
- ndarray: np.ndarray,
- time_dim: Optional[int] = 0,
-) -> TimeTensor:
- r"""Creates a :class:`TimeTensor` from a ``numpy.ndarray``.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``from_numpy()`` for more informations.
-
- :param time_dim: Index of the time dimension.
- :type time_dim: Integer
- :param ndarray: The numpy array
- :type ndarray: ``numpy.array`` or ``numpay.ndarray``
-
- Examples::
- >>> x = echotorch.from_numpy(np.zeros((100, 2)), time_dim=0)
- >>> x.size()
- torch.Size([100, 2])
- >>> x.tlen
- 100
-
- """
- return TimeTensor.new_timetensor(
- torch.from_numpy(ndarray),
- time_dim=time_dim
- )
-# end from_numpy
-
-
-# Returns time tensor filled with zeros
-def zeros(
- *size,
- length: int,
- batch_size: Optional[Tuple[int]] = None,
- out: Optional[TimeTensor] = None,
- dtype: Optional[torch.dtype] = None,
- layout: Optional[torch.layout] = torch.strided,
- device: Optional[torch.device] = None,
- requires_grad: Optional[bool] = False
-) -> 'TimeTensor':
- r"""Returns a :class:`TimeTensor` of size ``size`` filled with 0.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``zeros()`` for more informations.
-
- :param size: shape of the channel dimensions in the timeseries.
- :type size: ``tuple`` of ``int``
- :param length: Length of the timeseries
- :type length: int
- :param batch_size: shape of the batch dimensions of the timeseries, can be ``None`` if no batch dimensions are requiered.
- :type batch_size: ``tuple`` or ints, optional
- :param out: the output timetensor.
- :type out: :class:`TimeTensor`, optional
- :param dtype: :class:`TimeTensor` data type
- :type dtype: torch.dtype, optional
- :param layout: desired layout of wrapped tensor (default: ``torch.strided``)
- :type layout: torch.layout, optional
- :param device: Destination device
- :type device: torch.device, optional
- :param requires_grad: Activate gradient computation
- :type requires_grad: bool, optional
-
- Example::
- >>> x = echotorch.zeros(2, 2, length=100)
- >>> x.size()
- torch.Size([100, 2, 2])
- >>> x.tsize()
- torch.Size([2, 2])
- >>> x.tlen
- 100
- >>> echotorch.zeros((), length=5)
- timetensor([ 0., 0., 0., 0., 0.])
- """
- return TimeTensor.new_timetensor_with_func(
- *size,
- func=torch.zeros,
- length=length,
- batch_size=batch_size,
- out=out,
- dtype=dtype,
- layout=layout,
- device=device,
- requires_grad=requires_grad,
- )
-# end new_zeros
-
-
-# Zeros like
-def zeros_like(
- input,
- dtype: Optional[torch.dtype] = None,
- layout: Optional[torch.layout] = None,
- device: Optional[torch.device] = None,
- requires_grad: Optional[bool] = False,
- memory_format: Optional[torch.memory_format] = torch.preserve_format
-) -> TimeTensor:
- r"""Returns a :class:`TimeTensor` filled with the scalar value 0, with the same size as ``input``.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``zeros_like()`` for more informations.
-
- :param input: the index of the time dimension and the size of ``input`` will be used to create the output timetensor.
- :type input: :class:`TimeTensor`
- :param dtype: :class:`TimeTensor` data type
- :type dtype: torch.dtype, optional
- :param layout: desired layout of wrapped tensor (default: ``torch.strided``)
- :type layout: torch.layout, optional
- :param device: Destination device
- :type device: torch.device, optional
- :param requires_grad: Activate gradient computation
- :type requires_grad: bool, optional
- :param memory_format: memory format of the new timetensor (default: ``torch.preserve_format``).
- :type memory_format: ``torch.memory_format``, optional
-
- Example:
-
- >>> echotorch.zeros_like()
- """
- # Data tensor
- data_tensor = torch.zeros_like(
- input=input.tensor,
- dtype=dtype,
- layout=layout,
- device=device,
- requires_grad=requires_grad,
- memory_format=memory_format
- )
-
- # New timetensor
- return TimeTensor(
- data=data_tensor,
- time_dim=input.time_dim
- )
-# end zeros_like
-
-
-# Returns time tensor filled with ones
-def ones(
- *size,
- length: int,
- batch_size: Optional[Tuple[int]] = None,
- out: Optional[TimeTensor] = None,
- dtype: Optional[torch.dtype] = None,
- layout: Optional[torch.layout] = torch.strided,
- device: Optional[torch.device] = None,
- requires_grad: Optional[bool] = False
-) -> 'TimeTensor':
- r"""Returns a :class:`TimeTensor` of size ``size`` filled with 1.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``ones()`` for more informations.
-
- :param size: shape of the channel dimensions in the timeseries.
- :type size: ``tuple`` of ``int``
- :param length: time length of the timeseries.
- :type length: ``int``
- :param batch_size:
- :type batch_size: tuple of ``int``
- :param out: the output timetensor.
- :type out: :class:`TimeTensor`, optional
- :param dtype: :class:`TimeTensor` data type
- :type dtype: torch.dtype, optional
- :param layout: desired layout of wrapped tensor (default: ``torch.strided``)
- :type layout: torch.layout, optional
- :param device: Destination device
- :type device: torch.device, optional
- :param requires_grad: Activate gradient computation
- :type requires_grad: bool, optional
-
- Example::
- >>> x = echotorch.ones(2, 2, length=100)
- >>> x.size()
- torch.Size([100, 2, 2])
- >>> x.tsize()
- torch.Size([2, 2])
- >>> x.tlen
- 100
- >>> echotorch.ones((), length=5)
- timetensor([ 1., 1., 1., 1., 1.])
- """
- return TimeTensor.new_timetensor_with_func(
- *size,
- func=torch.ones,
- length=length,
- batch_size=batch_size,
- out=out,
- dtype=dtype,
- layout=layout,
- device=device,
- requires_grad=requires_grad,
- )
-# end ones
-
-
-# Ones like
-def ones_like(
- input,
- dtype: Optional[torch.dtype] = None,
- layout: Optional[torch.layout] = None,
- device: Optional[torch.device] = None,
- requires_grad: Optional[bool] = False,
- memory_format: Optional[torch.memory_format] = torch.preserve_format
-) -> TimeTensor:
- r"""Returns a :class:`TimeTensor` filled with the scalar value 1, with the same size as ``input``.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``ones_like()`` for more informations.
-
- :param input: the index of the time dimension and the size of ``input`` will be used to create the output timetensor.
- :type input: :class:`TimeTensor`
- :param dtype: :class:`TimeTensor` data type
- :type dtype: torch.dtype, optional
- :param layout: desired layout of wrapped tensor (default: ``torch.strided``)
- :type layout: torch.layout, optional
- :param device: Destination device
- :type device: torch.device, optional
- :param requires_grad: Activate gradient computation
- :type requires_grad: bool, optional
- :param memory_format: memory format of the new timetensor (default: ``torch.preserve_format``).
- :type memory_format: ``torch.memory_format``, optional
-
- Examples:
-
- >>> ...
- """
- # Data tensor
- data_tensor = torch.ones_like(
- input=input.tensor,
- dtype=dtype,
- layout=layout,
- device=device,
- requires_grad=requires_grad,
- memory_format=memory_format
- )
-
- # New timetensor
- return TimeTensor(
- data=data_tensor,
- time_dim=input.time_dim
- )
-# end ones_like
-
-
-# Arange
-def arange(
- *args,
- out: TimeTensor = None,
- dtype: Optional[torch.dtype] = None,
- device: Optional[torch.device] = None,
- requires_grad: Optional[bool] = False
-) -> TimeTensor:
- r"""Returns a 0-D :class:`TimeTensor` of length :math:`\ceil[\bigg]{\frac{end-start}{step}}` with values from the interval
- :math:`[start, end)` taken into common difference ``step`` beginning from *start*.
-
- .. note::
- **From PyTorch documentation**:
- Note that non-integer ``step`` is subject to floating point rounding errors when comparing against ``end``;
- to avoid inconsistency, we advise a small epsilon to ``end`` in such case.
-
- :math:`out_{i+1} = out_{i} + step`
-
- .. seealso::
- See the `PyTorch documentation `__ on ``arange()`` for more informations.
-
- :param start: the starting value of the time related set of points (default: 0).
- :type start: Number
- :param end: the ending value for the time related set of points.
- :type end: Number
- :param step: the gap between each pair of adjacent time points (default: 1).
- :type step: Number
- :param out: the output timetensor.
- :type out: :class:`TimeTensor`, optional
- :param device: Destination device
- :type device: torch.device, optional
- :param device: Destination device
- :type device: torch.device, optional
- :param requires_grad: Activate gradient computation
- :type requires_grad: bool, optional
-
- Examples:
-
- >>> echotorch.tarange(0, 5)
- timetensor(tensor([0, 1, 2, 3, 4]), time_dim: 0)
- >>> echotorch.tarange(1, 4)
- timetensor(tensor([1, 2, 3]), time_dim: 0)
- >>> echotorch.tarange(1, 2.5, 0.5)
- timetensor(tensor([1.0000, 1.5000, 2.0000]), time_dim: 0)
- """
- # Get start, end, step
- if len(args) == 1:
- start = 0
- end = args[0]
- step = 1
- elif len(args) == 2:
- start = args[0]
- end = args[1]
- step = 1
- elif len(args) > 2:
- start = args[0]
- end = args[1]
- step = args[2]
- else:
- raise ValueError("At least end must be given (here nothing)")
- # end if
-
- # arange tensor
- if out is not None:
- torch.arange(
- start,
- end,
- step,
- out=out.tensor,
- dtype=dtype,
- device=device,
- requires_grad=requires_grad
- )
- return out
- else:
- arange_tensor = torch.arange(
- start,
- end,
- step,
- dtype=dtype,
- device=device,
- requires_grad=requires_grad
- )
-
- # Create timetensor
- return TimeTensor.new_timetensor(
- data=arange_tensor,
- time_dim=0
- )
- # end if
-# end arange
-
-
-# linspace
-def linspace(
- start: int,
- end: int,
- steps: float,
- out: TimeTensor = None,
- dtype: Optional[torch.dtype] = None,
- device: Optional[torch.device] = None,
- requires_grad: Optional[bool] = False
-) -> TimeTensor:
- r"""Create a 0-D timetensor of length ``steps`` whose values are evenly spaced from ``start`` to ``end``, inclusive.
- That is, values are:
-
- .. math::
- (start, start + \frac{end - start}{steps - 1}, \dots, start + (steps - 2) * \frac{end - start}{steps - 1}, end)
-
- .. seealso::
- See the `PyTorch documentation `__ on ``linspace()`` for more informations.
-
- :param start: the starting value of the time related set of points.
- :type start: float
- :param end: the ending value for the time related set of points.
- :type end: float
- :param steps: size of the constructed tensor.
- :type steps: int
- :param out: the output timetensor.
- :type out: :class:`TimeTensor`, optional
- :param dtype: the data type to perform the computation in. Default: if None, uses the global default dtype (see torch.get_default_dtype()) when both start and end are real, and corresponding complex dtype when either is complex.
- :type dtype: ``torch.dtype``, optional
- :param device: the desired device of returned tensor. Default: if None, uses the current device for the default tensor type (see ``torch.set_default_tensor_type()``). device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types.
- :type device: ``torch.device``, optional
- :param requires_grad: if autograd should record operations on the returned tensor (default: *False*).
- :type requires_grad: ``bool``
-
- Example:
-
- >>> echotorch.linspace(3, 10, steps=5)
- timetensor(tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000]), time_dim: 0)
- >>> echotorch.linspace(-10, 10, steps=5)
- timetensor(tensor([-10., -5., 0., 5., 10.]), time_dim: 0)
- >>> echotorch.linspace(start=-10, end=10, steps=5)
- timetensor(tensor([-10., -5., 0., 5., 10.]), time_dim: 0)
- >>> echotorch.linspace(start=-10, end=10, steps=1)
- timetensor(tensor([-10.]), time_dim: 0)
- """
- # linspace tensor
- if out is None:
- ls_tensor = torch.linspace(
- start,
- end,
- steps,
- dtype=dtype,
- device=device,
- requires_grad=requires_grad
- )
-
- # Create timetensor
- return TimeTensor.new_timetensor(
- data=ls_tensor,
- time_dim=0
- )
- else:
- torch.linspace(
- start,
- end,
- steps,
- out=out.tensor,
- dtype=dtype,
- device=device,
- requires_grad=requires_grad
- )
- out.time_dim = 0
- return out
- # end if
-# end linspace
-
-
-# logspace
-def logspace(
- start: int,
- end: int,
- steps: float,
- base: float = 10,
- out: TimeTensor = None,
- dtype: Optional[torch.dtype] = None,
- device: Optional[torch.device] = None,
- requires_grad: Optional[bool] = False
-) -> TimeTensor:
- r"""Create a 0-D timetensor of length ``steps`` whose values are evenly spaced from :math:`base^{start}` to :math:`base^{end}`,
- inclusive, on a logarithm scale with base ``base``. That is, the values are:
-
- .. math::
- (base^{start}, base^{\frac{end - start}{steps - 1}}, \dots, base^{start + (steps - 2) * \frac{end - start}{steps - 1}}, base^{end})
-
- .. seealso::
- See the `PyTorch documentation `__ on ``logspace()`` for more informations.
-
- :param start: the starting value of the time related set of points.
- :type start: ``float``
- :param end: the ending value for the time related set of points.
- :type end: ``float``
- :param steps: size of the constructed tensor.
- :type steps: ``int``
- :param base: base of the logarithm (default: 10)
- :type base: ``float``, optional
- :param out: the output timetensor.
- :type out: ``TimeTensor``, optional
- :param dtype: the data type to perform the computation in. Default: if None, uses the global default dtype (see torch.get_default_dtype()) when both start and end are real, and corresponding complex dtype when either is complex.
- :type dtype: ``torch.dtype``, optional
- :param device: the desired device of returned tensor. Default: if None, uses the current device for the default tensor type (see ``torch.set_default_tensor_type()``). device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types.
- :type device: ``torch.device``, optional
- :param requires_grad: if autograd should record operations on the returned tensor (default: *False*).
- :type requires_grad: ``bool``
-
- Example:
-
- >>> echotorch.logspace(start=-10, end=10, steps=5)
- timetensor(tensor([1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10]), time_dim: 0)
- >>> echotorch.logspace(start=0.1, end=1.0, steps=5)
- timetensor(tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000]), time_dim: 0)
- >>> echotorch.logspace(start=0.1, end=1.0, steps=1)
- timetensor(tensor([1.2589]), time_dim: 0)
- >>> echotorch.logspace(start=2, end=2, steps=1, base=2)
- timetensor(tensor([4.]), time_dim: 0)
- """
- if out is None:
- # logspace tensor
- ls_tensor = torch.logspace(
- start,
- end,
- steps,
- base,
- dtype=dtype,
- device=device,
- requires_grad=requires_grad
- )
-
- # Create timetensor
- return TimeTensor.new_timetensor(
- data=ls_tensor,
- time_dim=0
- )
- else:
- # Logspace tensor
- torch.logspace(
- start,
- end,
- steps,
- base,
- out=out.tensor,
- dtype=dtype,
- device=device,
- requires_grad=requires_grad
- )
- out.time_dim = 0
- return out
- # end if
-# end logspace
-
-
-# Returns empty tensor
-def empty(
- *size,
- length: int,
- batch_size: Optional[Tuple[int]] = None,
- out: TimeTensor = None,
- dtype: Optional[torch.dtype] = None,
- device: Optional[torch.device] = None,
- requires_grad: Optional[bool] = False,
- pin_memory: bool = False,
- memory_format=torch.contiguous_format
-) -> 'TimeTensor':
- r"""Returns a :class:`TimeTensor` of size ``size`` and time length ``time_length`` filled with uninitialized data.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``empty()`` for more informations.
-
- :param size: shape of the channel dimensions in the timeseries.
- :type size: ``tuple`` of ``int``
- :param length: length of the timeseries
- :type length: int
- :param batch_size: shape of the batch dimensions of the timeseries, can be ``None`` if no batch dimensions are requiered.
- :type batch_size: ``tuple`` or ints, optional
- :param out: the output timetensor.
- :type out: :class:`TimeTensor`, optional
- :param dtype: the data type to perform the computation in. Default: if None, uses the global default dtype (see torch.get_default_dtype()) when both start and end are real, and corresponding complex dtype when either is complex.
- :type dtype: ``torch.dtype``, optional
- :param device: the desired device of returned tensor. Default: if None, uses the current device for the default tensor type (see ``torch.set_default_tensor_type()``). device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types.
- :type device: ``torch.device``, optional
- :param requires_grad: Should autograd record operations on the returned timetensor?
- :type requires_grad: bool, optional
- :param pin_memory: If *True*, the returned timetensor would be allocated in the pinned memory. Works only for CPU timetensors (default: ``False``).
- :type pin_memory: ``bool``, optional
- :param memory_format: memory format of the returned :class:`TimeTensor` (default: ``torch.contiguous_format``).
- :type memory_format: ``torch.memory_format``, optional
-
- Example:
-
- >>> echotorch.empty(2, 3, length=1, dtype=torch.int32, device = 'cuda')
- timetensor([[[1., 1., 1.],
- [1., 1., 1.]]], device='cuda:0', dtype=torch.int32)
- """
- return TimeTensor.new_timetensor_with_func(
- *size,
- func=torch.empty,
- length=length,
- batch_size=batch_size,
- out=out,
- dtype=dtype,
- device=device,
- requires_grad=requires_grad,
- pin_memory=pin_memory,
- memory_format=memory_format
- )
-# end empty
-
-
-# Empty like
-def empty_like(
- input,
- dtype: Optional[torch.dtype] = None,
- layout: Optional[torch.layout] = None,
- device: Optional[torch.device] = None,
- requires_grad: Optional[bool] = False,
- memory_format=torch.preserve_format
-) -> TimeTensor:
- r"""Returns an uninitialized :class:`TimeTensor` with the same channel size and time dimension as ``input``.
- ``echotorch.empty_like(input)`` is equivalent to ``echotorch.empty(*list(input.csize()), time_length: input.tlen, dtype=input.dtype, layout=input.layout, device=input.device)``.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``empty_like()`` for more informations.
-
- :param input: the parameters of ``input`` will determine the parameters of the output tensor.
- :type input: ``Tensor``
- :param dtype: the data type to perform the computation in. Default: if None, uses the global default dtype (see torch.get_default_dtype()) when both start and end are real, and corresponding complex dtype when either is complex.
- :type dtype: ``torch.dtype``, optional
- :param device: the desired device of returned tensor. Default: if None, uses the current device for the default tensor type (see ``torch.set_default_tensor_type()``). device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types.
- :type device: ``torch.device``, optional
- :param requires_grad: Should autograd record operations on the returned timetensor?
- :type requires_grad: bool, optional
- :param memory_format: memory format of the returned :class:`TimeTensor` (default: ``torch.contiguous_format``).
- :type memory_format: ``torch.memory_format``, optional
-
- Example:
-
- >>> x = echotorch.empty((2, 3), time_length=1, dtype=torch.int32, device = 'cuda')
- >>> echotorch.empty_like(x)
- timetensor([[[1., 1., 1.],
- [1., 1., 1.]]], device='cuda:0', dtype=torch.int32)
- """
- # Data tensor
- data_tensor = torch.empty_like(
- input=input.tensor,
- dtype=dtype,
- layout=layout,
- device=device,
- requires_grad=requires_grad,
- memory_format=memory_format
- )
-
- # New timetensor
- return TimeTensor(
- data=data_tensor,
- time_dim=input.time_dim
- )
-# end empty_like
-
-
-# Empty strided
-def empty_strided(
- size,
- stride,
- length: int,
- time_stride: int,
- batch_size: Optional[Tuple[int]] = None,
- batch_stride: Optional[Tuple[int]] = None,
- dtype: torch.device = None,
- layout: torch.layout = torch.strided,
- device: torch.device = None,
- requires_grad: bool = False,
- pin_memory: bool = False
-) -> TimeTensor:
- r"""Returns a :class:`TimeTensor` filled with uninitialized data. The shape and strides of the wrapped tensor is
- defined by the argument ``size`` and ``stride``. ``echotorch.empty_strided(size, stride)`` is equivalent to
- ``echotorch.empty(*size).as_strided(size, stride)``.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``empty_strided()`` for more informations.
-
- :param size: shape of the channel dimensions in the timeseries.
- :type size: ``tuple`` of ``int``
- :param stride: stride of the channels dimensions in the timeseries.
- :type stride: ``tuple`` or ints
- :param length: time length of the timeseries.
- :type length: ``int``
- :param time_stride: stride of the time dimension.
- :type time_stride: ``int``
- :param batch_size: shape of the batch dimensions of the timeseries, can be ``None`` if no batch dimensions are requiered.
- :type batch_size: ``tuple`` or ints, optional
- :param batch_stride: stride of the batch dimensions.
- :type batch_stride: ``tuple`` or ints, optional
- :param dtype: the desired data type of the wrapped tensor (default: None, infered from ``data``).
- :type dtype: :class:`torch.dtype`, optional
- :param layout: desired layout of wrapped tensor (default: ``torch.strided``)
- :type layout: torch.layout, optional
- :param device: the estination device of the wrapped tensor (default: None, current device, see ``torch.set_default_tensor_type()``).
- :type device: :class:`torch.device`, optional
- :param requires_grad: Should operations been recorded by autograd for this timetensor?
- :type requires_grad: `bool`, optional
-
- """
- # Data tensor
- data_tensor = torch.empty_strided(
- list(batch_size) + [length] + list(size),
- list(batch_stride) + [time_stride] + list(stride),
- dtype=dtype,
- layout=layout,
- device=device,
- requires_grad=requires_grad,
- pin_memory=pin_memory
- )
-
- # Create timetensor
- return TimeTensor.new_timetensor(
- data=data_tensor,
- time_dim=len(batch_size)
- )
-# end empty_strided
-
-
-# Returns filled time tensor
-def full(
- *size,
- fill_value: Union[int, float],
- length: int,
- batch_size: Optional[Tuple[int]] = None,
- out: Optional[TimeTensor] = None,
- dtype: Optional[torch.dtype] = None,
- layout: Optional[torch.layout] = torch.strided,
- device: Optional[torch.device] = None,
- requires_grad: Optional[bool] = False
-) -> 'TimeTensor':
- r"""Returns a TimeTensor of size size and time length time_length filled with fill_value.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``full()`` for more informations.
-
- :param size: shape of the channel dimensions in the timeseries.
- :type size: ``tuple`` of ``int``
- :param fill_value: the value to fill the output timetensor with.
- :type fill_value: Scalar
- :param length: length of the timeseries
- :type length: ``int``
- :param batch_size: shape of the batch dimensions of the timeseries, can be ``None`` if no batch dimensions are requiered.
- :type batch_size: ``tuple`` or ints, optional
- :param out: the output timetensor.
- :type out: :class:`TimeTensor`, optional
- :param dtype: the data type to perform the computation in. Default: if None, uses the global default dtype (see torch.get_default_dtype()) when both start and end are real, and corresponding complex dtype when either is complex.
- :type dtype: ``torch.dtype``, optional
- :param layout: desired layout of wrapped tensor (default: ``torch.strided``)
- :type layout: torch.layout, optional
- :param device: the desired device of returned tensor. Default: if None, uses the current device for the default tensor type (see ``torch.set_default_tensor_type()``). device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types.
- :type device: ``torch.device``, optional
- :param requires_grad: if autograd should record operations on the returned tensor (default: *False*).
- :type requires_grad: ``bool``, optional
-
- Example:
-
- >>> x = echotorch.full(2, 2, length=100)
- >>> x.size()
- torch.Size([100, 2, 2])
- >>> x.tsize()
- torch.Size([2, 2])
- >>> x.tlen
- 100
- >>> echotorch.full(fill_value=1, length=5)
- timetensor([ 1., 1., 1., 1., 1.])
- """
- return TimeTensor.new_timetensor_with_func(
- *size,
- func=torch.full,
- length=length,
- batch_size=batch_size,
- out=out,
- dtype=dtype,
- device=device,
- requires_grad=requires_grad,
- fill_value=fill_value,
- layout=layout
- )
-# end full
-
-
-# Full like
-def full_like(
- input,
- fill_value: Union[int, float],
- dtype: Optional[torch.dtype] = None,
- layout: Optional[torch.layout] = None,
- device: Optional[torch.device] = None,
- requires_grad: Optional[bool] = False,
- memory_format=torch.preserve_format
-) -> TimeTensor:
- r"""Returns a :class:`TimeTensor` with the same time dimension index and size as ``input`` filled with
- ``fill_value``. ``echotorch.full_like(input, fill_value, length=100)`` is equivalent
- ``echotorch.full(input.csize(), fill_value, length=input.tlen, batch_size=input.bsize(), dtype=input.dtype, layout=input.layout, device=input.device)``.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``full_like()`` for more informations.
-
- :param input: the index of the time dimension and the size of ``input`` will be used to create the output timetensor.
- :type input: :class:`TimeTensor`
- :param fill_value: the number to fill the output timetensor with.
- :param dtype: the desired data type of the wrapped tensor (default: None, infered from ``data``).
- :type dtype: :class:`torch.dtype`, optional
- :param device: the estination device of the wrapped tensor (default: None, current device, see ``torch.set_default_tensor_type()``).
- :type device: :class:`torch.device`, optional
- :param requires_grad: Should operations been recorded by autograd for this timetensor?
- :type requires_grad: `bool`, optional
- :param memory_format: memory format of the new timetensor (default: ``torch.preserve_format``).
- :type memory_format: ``torch.memory_format``, optional
-
- Example:
-
- >>> ...
- """
- # Data tensor
- data_tensor = torch.full_like(
- input=input.tensor,
- fill_value=fill_value,
- dtype=dtype,
- layout=layout,
- device=device,
- requires_grad=requires_grad,
- memory_format=memory_format
- )
-
- # New timetensor
- return TimeTensor(
- data=data_tensor,
- time_dim=input.time_dim
- )
-# end full_like
-
-
-# Quantize per timetensor
-def quantize_per_timetensor(
- input: TimeTensor,
- scale: float,
- zero_point: int,
- dtype: Optional[torch.dtype] = None
-) -> TimeTensor:
- r"""Converts a float :class:`TimeTensor` to a quantized timetensor with given scale and zero point.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``quantize_per_tensor()`` for more informations.
-
- :param input: the index of the time dimension and the size of ``input`` will be used to create the output timetensor.
- :type input: :class:`TimeTensor`
- :param scale: scale to apply in quantization formula.
- :type scale: ``float``
- :param zero_point: offset in interger value that maps to float zero.
- :type zero_point: ``int``
- :param dtype: the desired data type of the wrapped tensor (default: None, infered from ``data``).
- :type dtype: :class:`torch.dtype`, optional
-
- Example:
-
- >>> ...
- """
- # Data tensor
- data_tensor = torch.quantize_per_tensor(
- input=input.tensor,
- scale=scale,
- zero_point=zero_point,
- dtype=dtype,
- )
-
- # New timetensor
- return TimeTensor(
- data=data_tensor,
- time_dim=input.time_dim
- )
-# end quantize_per_timetensor
-
-
-# Quantize per channel
-def quantize_per_channel(
- input: TimeTensor,
- scales: torch.Tensor,
- zero_points,
- axis: int,
- dtype: Optional[torch.dtype] = None,
-) -> TimeTensor:
- r"""Convert a float :class:`TimeTensor` to a per-channel quantized timetensor with given scales and zero points.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``quantize_per_channel()`` for more informations.
-
- :param input: the index of the time dimension and the size of ``input`` will be used to create the output timetensor.
- :type input: :class:`TimeTensor`
- :param scales: float 1D tensor of scales to be used which size matches ``input.csize(axis)``.
- :type scales: ``Tensor``
- :param zero_points: integer 1D tensor of offset to be used which size matches ``input.csize(axis)``.
- :type zero_points: ``tensor`` of ``int``
- :param axis: the channel dimension on which per-channel quantization is applied.
- :type axis: ``int``
- :param dtype: the desired data type of the wrapped tensor (default: None, infered from ``data``).
- :type dtype: :class:`torch.dtype`, optional
-
- Example:
-
- >>> ...
- """
- # Data tensor
- data_tensor = torch.quantize_per_channel(
- input=input.tensor,
- scales=scales,
- zero_points=zero_points,
- axis=axis + input.time_dim,
- dtype=dtype
- )
-
- # New timetensor
- return TimeTensor(
- data=data_tensor,
- time_dim=input.time_dim
- )
-# end quantize_per_channel
-
-
-# Dequantize
-def dequantize(
- timetensor: TimeTensor
-) -> TimeTensor:
- r"""Returns an fp32 :class:`TimeTensor` by dequantizing a quantized :class:`TimeTensor`.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``dequantize()`` for more informations.
-
- :param timetensor: A quantized :class:`TimeTensor`
- :type timetensor: :class:`TimeTensor`
-
- Example:
-
- >>> ...
- """
- # Data tensor
- data_tensor = torch.dequantize(
- timetensor.tensor
- )
-
- # New timetensor
- return TimeTensor(
- data=data_tensor,
- time_dim=timetensor.time_dim
- )
-# end dequantize
-
-
-# Complex
-def complex(
- real: TimeTensor,
- imag: TimeTensor
-) -> TimeTensor:
- r"""Returns a complex :class:`TimeTensor` with a ``real`` part and an imaginary ``imag`` part.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``complex()`` for more informations.
-
- :param real: The real part of the complex *timetensor*. Must be *float* or double.
- :type real: :class:`TimeTensor`
- :param imag: The imaginary part of the complex *timetensor*, same *dtype* as ``real``.
- :type imag: :class:`TimeTensor`
-
- Example:
-
- >>> ...
- """
- # Real and imag must have the same time dim
- assert real.time_dim == imag.time_dim
-
- # Data tensor
- data_tensor = torch.complex(
- real=real.tensor,
- imag=imag.tensor
- )
-
- # New timetensor
- return TimeTensor(
- data=data_tensor,
- time_dim=real.time_dim
- )
-# end complex
-
-
-# Polar
-def polar(
- abs: TimeTensor,
- angle: TimeTensor,
- out: Optional[TimeTensor] = None,
-) -> TimeTensor:
- r"""Returns a complex with Cartesian coordinates corresponding to polar coordinates represented by the absolute
- value ``abs`` and angle ``angle``.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``polar()`` for more informations.
-
- :param abs: absolute values as *float* or *double*.
- :type abs: :class:`TimeTensor`
- :param angle: angle values as the same *dtype* as ``abs``.
- :type angle: :class:`TimeTensor`
- :param out: the output :class:`TimeTensor`, if ``out`` is a ``torch.float32`` timetensor, ``out`` must be ``torch.complex64`` and ``torch.complex128`` if ``torch.float64``.
- :type out: :class:`TimeTensor`
-
- Example:
-
- >>> ...
- """
- # Abs and angle must have the same time dim
- assert abs.time_dim == angle.time_dim
-
- # No out
- if out is None:
- # Create data
- data_tensor = torch.polar(
- abs=abs.tensor,
- angle=angle.tensor
- )
-
- # New timetensor
- return TimeTensor(
- data=data_tensor,
- time_dim=abs.time_dim
- )
- else:
- # Out
- torch.polar(
- abs=abs.tensor,
- angle=angle.tensor,
- out=out.tensor
- )
- return out
- # end if
-# end polar
-
-
-# endregion CREATION_OPS
-
-
-# region DISTRIBUTION_OPS
-
-
-# Random time series (uniform)
-def rand(
- *size,
- length: int,
- batch_size: Optional[Tuple[int]] = None,
- out: Optional[TimeTensor] = None,
- dtype: Optional[torch.dtype] = None,
- layout: Optional[torch.layout] = torch.strided,
- device: Optional[torch.device] = None,
- requires_grad: Optional[bool] = False
-) -> TimeTensor:
- r"""Returns a :class:`TimeTensor` filled with random numbers from a uniform distribution on the interval :math:`[0, 1)`.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``rand()`` for more informations.
-
- :param size: a sequence of integers defining the shape of the output timeseries. Can be a variable number of arguments or a collection like a list or tuple.
- :type size: list of integers
- :param length: length of the timeseries.
- :type length: ``int``
- :param batch_size:
- :type batch_size:
- :param out: the output tensor.
- :type out: :class:`TimeTensor`, optional
- :param out: the output tensor.
- :type out: :class:`TimeTensor`, optional
- :param dtype: the desired data type of the wrapped tensor (default: None, infered from ``data``).
- :type dtype: :class:`torch.dtype`, optional
- :param layout: the desired layout of returned TimeTensor (default: torch.strided).
- :type layout: ``torch.layout``, optional
- :param device: the estination device of the wrapped tensor (default: None, current device, see ``torch.set_default_tensor_type()``).
- :type device: :class:`torch.device`, optional
- :param requires_grad: Should operations been recorded by autograd for this timetensor?
- :type requires_grad: `bool`, optional
-
- Example:
-
- >>> echotorch.rand(2, length=10)
- timetensor([[0.5474, 0.7742],
- [0.8091, 0.3192],
- [0.6742, 0.3458],
- [0.6646, 0.5652],
- [0.4309, 0.5330],
- [0.4052, 0.5731],
- [0.2499, 0.1044],
- [0.9394, 0.0862],
- [0.2206, 0.9380],
- [0.1908, 0.0594]], time_dim: 0)
- """
- return TimeTensor.new_timetensor_with_func(
- *size,
- func=torch.rand,
- length=length,
- batch_size=batch_size,
- out=out,
- dtype=dtype,
- device=device,
- requires_grad=requires_grad,
- layout=layout
- )
-# end rand
-
-
-# Random time series (uniform)
-def randn(
- *size,
- length: int,
- batch_size: Optional[Tuple[int]] = None,
- out: Optional[TimeTensor] = None,
- dtype: Optional[torch.dtype] = None,
- layout: Optional[torch.layout] = torch.strided,
- device: Optional[torch.device] = None,
- requires_grad: Optional[bool] = False
-) -> TimeTensor:
- r"""Returns a :class:`TimeTensor` filled with random numbers from a normal distribution with mean :math:`\mu` 0 and
- a standard deviation :math:`\sigma` of 1 (standard normal distribution).
-
- .. math::
- out_i \sim \mathcal{N}(0, 1)
-
- The parameter *size* will determine the size of the *timetensor*.
-
- .. seealso::
- See the `PyTorch documentation `__ on ``randn()`` for more informations.
-
- :param size: the shape of the timetensor as a sequence of integers (list, tuple, etc).
- :type size: list of ints
- :param length: Length of the timeseries (default: 0)
- :type length: ``int``, optional
- :param batch_size:
- :type batch_size:
- :param out: the output tensor.
- :type out: ``TimeTensor``, optional
- :param dtype: the desired data type of the wrapped tensor (default: None, infered from ``data``).
- :type dtype: :class:`torch.dtype`, optional
- :param layout: desired layout of the wrapped Tensor (default: ``torch.strided``).
- :type layout: ``torch.layout``, optional
- :param device: the estination device of the wrapped tensor (default: None, current device, see ``torch.set_default_tensor_type()``).
- :type device: :class:`torch.device`, optional
- :param requires_grad: Should operations been recorded by autograd for this timetensor?
- :type requires_grad: `bool`, optional
-
- Example:
-
- >>> x = echotorch.randn(length=10)
- >>> x
- timetensor([ 0.2610, 0.4589, 0.1833, -0.1209, -0.0103, 1.1757, 0.9236, -0.6117, 0.7906, -0.1704], time_dim: 0)
- >>> x.size()
- torch.Size([10])
- >>> x.tlen
- 10
- """
- return TimeTensor.new_timetensor_with_func(
- *size,
- func=torch.randn,
- length=length,
- batch_size=batch_size,
- out=out,
- dtype=dtype,
- device=device,
- requires_grad=requires_grad,
- layout=layout
- )
-# end randn
-
-
-# endregion DISTRIB_OPS
-
-
-# region UTILITY_OPS
-
-
-# Concatenate on time dim
-def tcat(
- *tensors: Tuple[TimeTensor]
-) -> TimeTensor:
- r"""Concatenate a given list of ``n`` timetensors or tensor on the time dimension. All timetensors
- must have the same shape (except the time dimensions) and the same time dimension
- specified or be empty. If PyTorch tensors are in the sequence, they must have the same shape and they
- will be concatenated on the same dimension as specified as time dimension in the timetensors. The concatenation
- will fail if there is only PyTorch tensors in the sequence.
-
- ``echotorch.tcat()`` is the inverse of ``echotorch.tsplit()`` and ``echotorch.tchunk()``.
-
- :param tensors: A sequence of timetensors or tensors of the same type, same time dimension and same shape.
- :return: The timetensors/tensors concatenated in a single timetensor.
- :return: ``TimeTensor``
-
- Parameters:
- **tensors** (sequence of ``TimeTensors``) - any python sequence of timetensors or PyTorch tensors of the same
- type. If timetensors are not empty, they must have the same shape, except the time dimension, and the same
- time dimension specified. If PyTorch tensors are in the sequence, they must have the same shape and they
- will be concatenated on the same dimension as specified as time dimension in the timetensors. The concatenation
- will fail if there is only PyTorch tensors in the sequence.
-
- Key Arguments:
- **out** (``TimeTensor``, optional) - the output timetensor.
-
- Example::
-
- >>> x = echotorch.randn(2, time_length=20)
- >>> x
- timetensor([[....]])
- >>> echotorch.tcat((x, x, x))
- timetensor([[...]])
-
- """
- # None
- if len(tensors) == 0:
- return None
- # end if
-
- # First time dim and ndim
- time_dim = tensors[0].time_dim
- ndim = tensors[0].ndim
-
- # Check all tensor
- for tensor in tensors:
- if tensor.time_dim != time_dim or tensor.ndim != ndim:
- raise Exception(
- "Tensor 1 and 2 must have the same number of dimension and the same time dimension (here {}/{} "
- "and {}/{}".format(ndim, time_dim, tensor.ndim, tensor.time_dim)
- )
- # end if
- # end if
-
- # Time tensor
- return torch.cat(tensors, dim=time_dim)
-# end tcat
-
-
-# Concatenate time-related dimension
-def cat(
- tensors: Tuple[TimeTensor],
- dim: int = 0
-) -> Union[TimeTensor, Any]:
- """Concatenate time-related dimensions
- """
- # None
- if len(tensors) == 0:
- return None
- # end if
-
- # First time dim and ndim
- time_dim = tensors[0].time_dim
- ndim = tensors[0].ndim
- tlen = tensors[0].tlen
-
- # Check all tensor
- for tensor in tensors:
- if tensor.time_dim != time_dim or tensor.ndim != ndim or tensor.tlen != tlen:
- raise Exception(
- "Tensor 1 and 2 must have the same number of dimension, the same time dimension and the same "
- "time length (here {}/{}, {}/{} and {}/{})".format(
- ndim,
- tensor.ndim,
- time_dim,
- tensor.time_dim,
- tlen,
- tensor.tlen
- )
- )
- # end if
- # end if
-
- # Time tensor
- return torch.cat(tensors, dim=time_dim+1+dim)
-# end cat
-
-
-# Select time index in tensor
-def tindex_select(
- input: TimeTensor,
- indices: Union[torch.IntTensor, torch.LongTensor]
-) -> TimeTensor:
- """
- Select time index in time tensor
- """
- return torch.index_select(
- input,
- input.time_dim,
- indices
- )
-# end tindex_select
-
-
-# Is timetensor
-def is_timetensor(obj) -> bool:
- r"""Returns True if `obj` is an EchoTorch timetensor.
-
- Note that this function is simply doing ``isinstance(obj, TimeTensor)``.
- Using that ``isinstance`` check is better for typechecking with mypy,
- and more explicit - so it's recommended to use that instead of
- ``is_timetensor``.
-
- :param obj: The object to test
- :type obj: `object`
- :return: True if `obj` is an EchoTorch timetensor
- :rtype: bool
-
- Example::
-
- >>> x = echotorch.timetensor([1,2,3], time_dim=0)
- >>> echotorch.is_timetensor(x)
- True
-
- """
- return isinstance(obj, TimeTensor)
-# end is_timetensor
-
-
-# endregion UTILITY_OPS
-
diff --git a/echotorch/base_tensors.py b/echotorch/base_tensors.py
deleted file mode 100644
index 0fa897c..0000000
--- a/echotorch/base_tensors.py
+++ /dev/null
@@ -1,852 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/base_tensor.py
-# Description : An abstract base class for EchoTorch tensors
-# Date : 13th of August, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/timetensor.py
-# Description : A special tensor with a time dimension
-# Date : 25th of January, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-# Imports
-from typing import Tuple, Union, Callable
-import torch
-
-
-# region BASETENSOR
-
-class BaseTensor(object):
- r"""An abstract base class for EchoTorch tensors
- """
-
- # region CONSTRUCTORS
-
- # Constructor
- def __init__(self, data: Union[torch.Tensor, 'TimeTensor']) -> None:
- r"""BaseTensor constructor
-
- :param data: The data in a torch tensor to transform to timetensor.
- :type data: ``torch.Tensor`` or ``DataTensor``
- """
- # Copy if already a timetensor
- # transform otherwise
- if isinstance(data, BaseTensor):
- tensor_data = data.tensor
- else:
- tensor_data = data
- # end if
-
- # Set tensor and time index
- self._tensor = tensor_data
- # end __init__
-
- # endregion CONSTRUCTORS
-
- # region PROPERTIES
-
- # Get tensor
- @property
- def tensor(self) -> torch.Tensor:
- r"""Get the wrapped tensor.
-
- :return: The wrapped tensor.
- :rtype: :class:`torch.Tensor`
- """
- return self._tensor
- # end tensor
-
- # endregion PROPERTIES
-
- # region CAST
-
- # To float64 basetensor
- def double(self) -> 'BaseTensor':
- r"""To float64 :class:`BaseTensor` (no copy).
-
- :return: The :class:`BaseTensor` with data casted to float64.
- :rtype: :class:`BaseTensor`
- """
- self._tensor = self._tensor.double()
- return self
- # end double
-
- # To float32 basetensor
- def float(self) -> 'BaseTensor':
- r"""To float32 :class:`BaseTensor` (no copy).
-
- :return: The :class:`BaseTensor` with data casted to float32.
- :rtype: :class:`BaseTensor`
- """
- self._tensor = self._tensor.float()
- return self
- # end float
-
- # To float16 basetensor
- def half(self) -> 'BaseTensor':
- r"""To float16 :class:`BaseTensor` (no copy)
-
- :return: The :class:`BaseTensor` with data casted to float16.
- :rtype: :class:`BaseTensor`
- """
- self._tensor = self._tensor.half()
- return self
- # end half
-
- # To bfloat16 basetensor
- def bfloat16(self) -> 'BaseTensor':
- r"""To brain float16 :class:`BaseTensor` (no copy)
-
- :return: The :class:`BaseTensor` with data casted to bfloat16.
- :rtype: :class:`BaseTensor`
- """
- self._tensor = self._tensor.bfloat16()
- # end bfloat16
-
- # To boolean basetensor
- def bool(self) -> 'BaseTensor':
- r"""To boolean :class:`BaseTensor` (no copy).
-
- :return: The :class:`BaseTensor` with data casted to boolean.
- :rtype: :class:`BaseTensor`
- """
- self._tensor = self._tensor.bool()
- return self
- # end bool
-
- # To byte basetensor
- def byte(self) -> 'BaseTensor':
- r"""To byte :class:`BaseTensor` (no copy).
-
- :return: The :class:`BaseTensor` with data casted to bytes.
- :rtype: :class:`BaseTensor`
- """
- self._tensor = self._tensor.byte()
- return self
- # end byte
-
- # To char basetensor
- def char(self) -> 'BaseTensor':
- r"""To char :class:`BaseTensor` (no copy)
-
- :return: The :class:`BaseTensor` with data casted to char
- :rtype: :class:`BaseTensor`
- """
- self._tensor = self._tensor.char()
- return self
- # end char
-
- # To short basetensor
- def short(self) -> 'BaseTensor':
- r"""To short (int16) :class:`BaseTensor` (no copy)
-
- :return: The :class:`BaseTensor` with data casted to char
- :rtype: :class:`BaseTensor`
- """
- self._tensor = self._tensor.char()
- return self
- # end char
-
- # To int timetensor
- def int(self) -> 'BaseTensor':
- r"""To int :class:`BaseTensor` (no copy)
-
- :return: The :class:`BaseTensor` with data casted to int.
- :rtype: :class:`BaseTensor`
- """
- self._tensor = self._tensor.int()
- return self
- # end int
-
- # Long
- def long(self) -> 'BaseTensor':
- r"""To long :class:`BaseTensor` (no copy)
-
- :return: The :class:`BaseTensor` with data casted to long
- :rtype: :class:`BaseTensor`
- """
- self._tensor = self._tensor.long()
- return self
- # end long
-
- # To
- def to(self, *args, **kwargs) -> 'BaseTensor':
- r"""Performs BaseTensor dtype and/or device concersion. A ``torch.dtype`` and ``torch.device`` are inferred
- from the arguments of ``self.to(*args, **kwargs)
-
- .. note::
- From PyTorch documentation: if the ``self`` BaseTensor already has the correct ``torch.dtype`` and
- ``torch.device``, then ``self`` is returned. Otherwise, the returned basetensor is a copy of ``self``
- with the desired ``torch.dtype`` and ``torch.device``.
- """
- # New tensor
- ntensor = self._tensor.to(*args, **kwargs)
-
- # Same tensor?
- if self._tensor == ntensor:
- return self
- else:
- return BaseTensor(ntensor)
- # end if
- # end to
-
- # To CUDA device
- def cuda(
- self,
- **kwargs
- ) -> 'BaseTensor':
- r"""To CUDA device.
-
- :return: BaseTensor transfered to GPU device.
- :rtype: :class:`BaseTensor`
- """
- self._tensor = self._tensor.cuda(**kwargs)
- return self
- # end cuda
-
- # To CPU device
- def cpu(
- self,
- **kwargs
- ) -> 'BaseTensor':
- r"""To CPU device.
-
- :return: BaseTensor transferred to CPU device.
- :rtype: :class:`BaseTensor`
- """
- self._tensor = self._tensor.cpu(**kwargs)
- return self
- # end cpu
-
- # region TORCH_FUNCTION
-
- # Transpose
- def t(self) -> 'BaseTensor':
- r"""Expects the basetensor to be <= 2-D tensor and transposes dimensions 0 and 1.
-
- 0-D and 1-D tensors are returned as is. When input is a 2-D tensor this is equivalent to ``transpose(input, 0, 1)``.
- """
- return BaseTensor(
- data=self._tensor.t()
- )
- # end t
-
- # Torch functions
- def __torch_function__(
- self,
- func,
- types,
- args=(),
- kwargs=None
- ):
- r"""Torch functions implementations.
- """
- # Dict if None
- if kwargs is None:
- kwargs = {}
-
- # end if
-
- # Convert timetensor to tensors
- def convert(args):
- if type(args) is BaseTensor:
- return args.tensor
- elif type(args) is tuple:
- return tuple([convert(a) for a in args])
- elif type(args) is list:
- return [convert(a) for a in args]
- else:
- return args
- # end if
-
- # end convert
-
- # Get the tensor in the arguments
- args = [convert(a) for a in args]
-
- # Execute function
- ret = func(*args, **kwargs)
-
- # Return a new base tensor
- return BaseTensor(ret)
- # end __torch_function__
-
- # endregion TORCH_FUNCTION
-
- # region OVERRIDE
-
- # Override attribute getter
- def __getattr__(self, item):
- r"""Override attribute getter and redirect unknown attributes to wrapper tensor.
- """
- if hasattr(self._tensor, item):
- return getattr(self._tensor, item)
- else:
- raise AttributeError(
- "AttributeError: Neither '{}' object nor its wrapped "
- "tensor has no attribute '{}'".format(self.__class__.__name__, item)
- )
- # end if
- # end __getattr__
-
- # Get item
- def __getitem__(self, item) -> 'BaseTensor':
- r"""Get data in the :class:`BaseTensor`.
- """
- return BaseTensor(self._tensor[item])
- # end __getitem__
-
- # Set item
- def __setitem__(self, key, value) -> None:
- r"""Set data in the :class:`BaseTensor`.
- """
- self._tensor[key] = value
- # end __setitem__
-
- # Get representation
- def __repr__(self) -> str:
- r"""Get the :class:`BaseTensor` string representation
- """
- return "basetensor({})".format(self._tensor)
- # end __repr__
-
- # Are two time-tensors equivalent
- def __eq__(
- self,
- other: 'BaseTensor'
- ) -> bool:
- r"""Are two :class:`BaseTensor` equivalent?
-
- :param other: The other :class:`BaseTensor`.
- :return: True of False if the two :class:`BaseTensor` are equivalent.
- """
- return self.tensor.ndim == other.tensor.ndim and self.tensor.size() == other.tensor.size() and \
- torch.all(self.tensor == other.tensor)
- # end __eq__
-
- # Are two base-tensors not equal
- def __ne__(
- self,
- other: 'BaseTensor'
- ) -> bool:
- r"""Are two base-tensors not equal
- """
- return not (self.__eq__(self, other))
- # end __ne__
-
- # Object addition
- def __iadd__(self, other):
- r"""Object addition with time tensors.
-
- :param other: object to add
- :type other: ``TimeTensor`` or ``torch.Tensor``
- """
- self._tensor += other
- return self
- # end __iadd__
-
- # Object substraction
- def __isub__(self, other):
- r"""Object subtraction with time tensors.
-
- :param other: object to add
- :type other: ``TimeTensor`` or ``torch.Tensor``
- """
- self._tensor -= other
- return self
- # end __isub__
-
- # Scalar addition
- def __add__(self, other):
- r"""Scalar addition with time tensors.
-
- :param other: Scalar to add
- :type other: Scalar
- """
- if isinstance(other, BaseTensor):
- self._tensor = self._tensor + other.tensor
- else:
- self._tensor = self._tensor + other
- # end if
-
- return self
- # end __add__
-
- # Scalar addition (right)
- def __radd__(self, other):
- r"""Scalar addition with time tensors (right)
-
- :param other: Scalar to add
- :type other: Scalar
- """
- if isinstance(other, BaseTensor):
- self._tensor = self._tensor + other.tensor
- else:
- self._tensor = self._tensor + other
- # end if
-
- return self
- # end __radd__
-
- # Scalar subtraction
- def __sub__(self, other):
- r"""Scalar subtraction with time tensors.
-
- :param other: Scalar to subtract.
- :type other: scalar
- """
- if isinstance(other, BaseTensor):
- self._tensor = self._tensor - other.tensor
- else:
- self._tensor = self._tensor - other
- # end if
-
- return self
- # end __sub__
-
- # Scalar subtraction (right)
- def __rsub__(self, other):
- r"""Scalar subtraction with time tensors (right).
-
- :param other: Scalar to subtract.
- :type other: scalar.
- """
- if isinstance(other, BaseTensor):
- self._tensor = self._tensor - other.tensor
- else:
- self._tensor = self._tensor - other
- # end if
-
- return self
- # end __rsub__
-
- # Scalar multiplication
- def __mul__(self, other):
- r"""Scalar multiplication with time tensors
-
- :param other: Scalar multiplier
- :type other: Scalar
- """
- if isinstance(other, BaseTensor):
- self._tensor = self._tensor * other.tensor
- else:
- self._tensor = self._tensor * other
- # end if
- return self
- # end __mul__
-
- # Scalar multiplication (right)
- def __rmul__(self, other):
- r"""Scalar multiplication with time tensors
-
- :param other: Scalar multiplier
- :param type: Scalar
- """
- if isinstance(other, BaseTensor):
- self._tensor = self._tensor * other.tensor
- else:
- self._tensor = self._tensor * other
- # end if
-
- return self
- # end __rmul__
-
- # Scalar division with base tensors
- def __truediv__(self, other):
- r"""Scalar division with base tensors.
-
- :param other: Scalar divisor.
- :param type: Scalar.
- """
- if isinstance(other, BaseTensor):
- self._tensor = self._tensor / other.tensor
- else:
- self._tensor = self._tensor / other
- # end if
-
- return self
- # end __truediv__
-
- # Less than operation with base tensors.
- def __lt__(self, other) -> 'BaseTensor':
- r"""Less than operation with base tensors.
- """
- return BaseTensor(
- data=self._tensor < other
- )
- # end __lt__
-
- # Less or equal than operation with base tensors.
- def __le__(self, other) -> 'BaseTensor':
- r"""Less than operation with base tensors.
- """
- return BaseTensor(
- data=self._tensor <= other
- )
- # end __le__
-
- # Greater than operation with base tensors.
- def __gt__(self, other) -> 'BaseTensor':
- r"""Greater than operation with base tensors.
- """
- return BaseTensor(
- data=self._tensor > other
- )
- # end __gt__
-
- # Greater or equal than operation with base tensors.
- def __ge__(self, other) -> 'BaseTensor':
- r"""Greater or equal than operation with base tensors.
- """
- return BaseTensor(
- data=self._tensor >= other
- )
- # end __ge__
-
- # endregion OVERRIDE
-
- # region STATIC
-
- # Returns a new BaseTensor with data as the tensor data.
- @classmethod
- def new_basetensor(
- cls,
- data: Union[torch.Tensor, 'BaseTensor']
- ) -> 'BaseTensor':
- r"""Returns a new :class:`BaseTensor` with data as the tensor data.
-
- :param data: data as a torch tensor or another :class:`BaseTensor`.
- :return: a new :class:`BaseTensor` with *data*.
- :rtype: :class:`BaseTensor`
- """
- return BaseTensor(
- data
- )
- # end new_basetensor
-
- # Returns new base tensor with a specific function
- @classmethod
- def new_basetensor_with_func(
- cls,
- size: Tuple[int],
- func: Callable,
- **kwargs
- ) -> 'BaseTensor':
- r"""Returns a new base tensor with a specific function to generate the data.
-
- :param func: a callable object used for creation.
- :param size: size of the :class:`BaseTensor` to be created.
- :return: a new :class:`BaseTensor` created with ``func`` of size ``size``.
- :rtype: :class:`BaseTensor`
- """
- # Create BaseTensor
- return BaseTensor(
- data=func(size, **kwargs)
- )
- # end new_basetensor_with_func
-
- # endregion STATIC
-
-# end BaseTensor
-
-# endregion BASETENSOR
-
-
-# region VARIANTS
-
-
-# Double time tensor
-class DoubleBaseTensor(BaseTensor):
- r"""Double :class:``BaseTensor`.
- """
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'BaseTensor']
- ) -> None:
- r"""Double :class:``BaseTensor` constructor
-
- Args:
- data: The data in a torch tensor to transform to :class:``BaseTensor`.
- """
- # Super call
- super(DoubleBaseTensor, self).__init__(
- self,
- data
- )
-
- # Cast data
- self.double()
- # end __init__
-
-# end DoubleBaseTensor
-
-
-# Float base tensor
-class FloatBaseTensor(BaseTensor):
- r"""Float :class:``BaseTensor`.
- """
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'BaseTensor'],
- ) -> None:
- r"""Float :class:``BaseTensor` constructor
-
- :param data: The data in a torch tensor to transform to :class:``BaseTensor`.
- """
- # Super call
- super(FloatBaseTensor, self).__init__(
- self,
- data
- )
-
- # Transform type
- self.float()
- # end __init__
-
-# end FloatBaseTensor
-
-
-# Half base tensor
-class HalfBaseTensor(BaseTensor):
- r"""Half :class:``BaseTensor`.
- """
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'BaseTensor']
- ) -> None:
- r"""Half :class:``BaseTensor` constructor
-
- :param data: The data in a torch tensor to transform to :class:``BaseTensor`.
- """
- # Super call
- super(HalfBaseTensor, self).__init__(
- self,
- data
- )
-
- # Cast data
- self.half()
- # end __init__
-
-# end HalfBaseTensor
-
-
-# 16-bit floating point 2 base tensor
-class BFloat16Tensor(BaseTensor):
- r"""16-bit floating point 2 :class:``BaseTensor`.
- """
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'BaseTensor']
- ) -> None:
- r"""16-bit :class:``BaseTensor` constructor
-
- :param data: The data in a torch tensor to transform to :class:``BaseTensor`.
- """
- # Super call
- super(BFloat16Tensor, self).__init__(
- self,
- data
- )
-
- # Cast
- self.bfloat16()
- # end __init__
-
-# end BFloat16Tensor
-
-
-# Boolean basetensor
-class BoolBaseTensor(BaseTensor):
- r"""To boolean :class:``BaseTensor`.
- """
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'BaseTensor']
- ) -> None:
- r"""Boolean :class:``BaseTensor`.
- """
- # Super call
- super(BoolBaseTensor, self).__init__(
- self,
- data
- )
-
- # Cast
- self.bool()
- # end __init__
-
-# end BoolBaseTensor
-
-
-# 8-bit integer (unsigned) base tensor
-class ByteBaseTensor(BaseTensor):
- r"""8-bit integer (unsigned) :class:``BaseTensor`.
- """
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'BaseTensor']
- ) -> None:
- r"""8-bit integer (unsigned) :class:``BaseTensor` constructor
-
- :param data: The data in a torch tensor to transform to :class:``BaseTensor`.
- """
- # Super call
- super(ByteBaseTensor, self).__init__(
- self,
- data
- )
-
- # Cast
- self.byte()
- # end __init__
-
-# end ByteBaseTensor
-
-
-# 8-bit integer (signed) base tensor
-class CharBaseTensor(BaseTensor):
- r"""8-bit integer :class:``BaseTensor`.
- """
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'BaseTensor']
- ) -> None:
- r"""8-bit integer :class:``BaseTensor`.
- """
- # Super call
- super(CharBaseTensor, self).__init__(
- self,
- data
- )
-
- # Case
- self.char()
- # end __init__
-
-# end CharTimeTensor
-
-
-# 16-bit integer (signed) base tensor.
-class ShortBaseTensor(BaseTensor):
- r"""16-bit integer (signed) :class:``BaseTensor`.
- """
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'BaseTensor']
- ) -> None:
- r"""8-bit integer :class:``BaseTensor`.
- """
- # Super call
- super(ShortBaseTensor, self).__init__(
- self,
- data
- )
-
- # Cast
- self.short()
- # end __init__
-
-# end ShortBaseTensor
-
-
-# 32-bit integer (signed) base tensor.
-class IntBaseTensor(BaseTensor):
- r"""32-bit integer (signed) :class:``BaseTensor`.
- """
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'BaseTensor']
- ) -> None:
- r"""32-bit integer :class:``BaseTensor`.
- """
- # Super call
- super(IntBaseTensor, self).__init__(
- self,
- data
- )
-
- # Cast
- self.int()
- # end __init__
-
-# end IntBaseTensor
-
-
-# 64-bit integer (signed) base tensor.
-class LongBaseTensor(BaseTensor):
- r"""64-bit integer (signed) :class:``BaseTensor`.
- """
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'BaseTensor']
- ) -> None:
- r"""64-bit integer :class:``BaseTensor`.
- """
- # Super call
- super(LongBaseTensor, self).__init__(
- self,
- data
- )
-
- # Cast
- self.long()
- # end __init__
-
-# end LongBaseTensor
-
-
-# endregion VARIANTS
-
-
diff --git a/echotorch/conceptors.py b/echotorch/conceptors.py
deleted file mode 100644
index ad104a3..0000000
--- a/echotorch/conceptors.py
+++ /dev/null
@@ -1,198 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/conceptors.py
-# Description : EchoTorch conceptors creation and management utility functions.
-# Date : 9th of April, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti, University of Neuchâtel ,
-# University of Geneva
-
-
-# Imports
-import torch
-from typing import Union, List
-import echotorch.nn.conceptors
-from echotorch.nn.conceptors import Conceptor, ConceptorSet
-from echotorch.utils.utility_functions import generalized_squared_cosine
-
-
-# conceptor zero
-def czero(input_dim: int) -> Conceptor:
- """
- Create an empty Conceptor (zero) of specific size
- """
- return Conceptor.zero(input_dim)
-# end czero
-
-
-# conceptor one
-def cone(input_dim: int) -> Conceptor:
- """
- Create an unit Conceptor of specific size
- """
- return Conceptor.identity(input_dim)
-# end cone
-
-
-# conceptor one
-def cidentity(input_dim: int) -> Conceptor:
- """
- Create an unit Conceptor of specific size
- """
- return cone(input_dim)
-# end cidentity
-
-
-# conceptor similarity
-def csim(
- c1: Union[Conceptor, List[Conceptor], ConceptorSet],
- c2: Union[Conceptor, List[Conceptor], ConceptorSet],
- based_on: str = 'C',
- sim_func: callable = generalized_squared_cosine
-):
- """
- Conceptor similarity based on generalized square cosine
- """
- # Types
- c1_list = isinstance(c1, list)
- c2_list = isinstance(c2, list)
-
- if not c1_list and not c2_list:
- return c1.sim(c2, based_on, sim_func)
- elif not c1_list and c2_list:
- return c1.sim(c2, based_on, sim_func)
- elif c1_list and not c2_list:
- return c2.sim(c1, based_on, sim_func)
- else:
- sim_matrix = torch.zeros(len(c1), len(c2))
- for c1_i, c1_c in enumerate(c1):
- for c2_i, c2_c in enumerate(c2):
- sim_matrix[c1_i, c2_i] = echotorch.nn.Conceptor.similarity(
- c1_c,
- c2_c,
- based_on,
- sim_func
- )
- # end for
- # end for
- return sim_matrix
- # end if
-# end csim
-
-
-# conceptor similarity
-def csimilarity(
- c1: Union[Conceptor, List[Conceptor], ConceptorSet],
- c2: Union[Conceptor, List[Conceptor], ConceptorSet],
- based_on: str = 'C',
- sim_func: callable = generalized_squared_cosine
-):
- """
- Conceptor similarity
- """
- if isinstance(c1, Conceptor) and isinstance(c1, Conceptor):
- return echotorch.nn.Conceptor.similarity(c1, c2, based_on, sim_func)
- elif isinstance(c1, ConceptorSet) and isinstance(c2, Conceptor) or \
- isinstance(c1, Conceptor) and isinstance(c2, list):
- return c1.sim(c2, based_on, sim_func)
- elif isinstance(c1, Conceptor) and isinstance(c2, ConceptorSet) or \
- isinstance(c1, list) and isinstance(c2, Conceptor):
- return c2.sim(c1, based_on, sim_func)
- elif isinstance(c1, list) and isinstance(c2, list):
- sim_matrix = torch.zeros(len(c1), len(c2))
- for c1_i, c1_c in c1:
- for c2_i, c2_c in c2:
- sim_matrix[c1_i, c2_i] = Conceptor.similarity(
- c1_c,
- c2_c,
- based_on,
- sim_func
- )
- # end for
- # end for
- return sim_matrix
- # end if
-# end csimilarity
-
-
-# OR operator
-def OR(c1: Conceptor, c2: Conceptor) -> Conceptor:
- """
- OR operator
- """
- return Conceptor.operator_OR(c1, c2)
-# end OR
-
-
-# AND operator
-def AND(c1: Conceptor, c2: Conceptor):
- """
- AND operator
- """
- return Conceptor.operator_AND(c1, c2)
-# end AND
-
-
-# NOT operator
-def NOT(c1: Conceptor):
- """
- NOT operator
- """
- return Conceptor.operator_NOT(c1)
-# end NOT
-
-
-# PHI operator
-def PHI(c, gamma):
- """
- PHI operator
- :param c:
- :param gamma:
- :return:
- """
- return Conceptor.operator_PHI(c, gamma)
-# end PHI
-
-
-# Conceptor constructor
-def conceptor(input_dim, aperture, *args, **kwargs):
- """
- Conceptor constructor
- """
- return Conceptor(
- input_dim,
- aperture,
- *args,
- **kwargs
- )
-# end conceptor
-
-
-# Conceptor set
-def conceptor_set(input_dim, *args, **kwargs):
- """
- Conceptor set
- :param input_dim:
- :param args:
- :param kwargs:
- :return:
- """
- return ConceptorSet(
- input_dim,
- *args,
- **kwargs
- )
-# end conctor_set
diff --git a/echotorch/data/__init__.py b/echotorch/data/__init__.py
deleted file mode 100644
index 9557e60..0000000
--- a/echotorch/data/__init__.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/data/__init__.py
-# Description : Dataset subpackages init file
-# Date : 3th of March, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-# Import functions
-from .random_processes import random_walk, moving_average, autoregressive_process, autoregressive_moving_average
-from .random_processes import weighted_moving_average, cumulative_moving_average, exponential_moving_average
-from .random_processes import rw, unirw, ma, unima, wma, cma, ema, ar, arma
-from .chaotic import henon
-
-# ALL
-__all__ = [
- # Chaotic
- 'henon',
- # Random process
- 'random_walk', 'moving_average', 'weighted_moving_average', 'exponential_moving_average', 'autoregressive_process',
- 'autoregressive_moving_average', 'rw', 'unirw', 'ma', 'unima', 'wma', 'cma', 'ema', 'ar', 'arma'
-]
diff --git a/echotorch/data/arima.py b/echotorch/data/arima.py
deleted file mode 100644
index e69de29..0000000
diff --git a/echotorch/data/basics.py b/echotorch/data/basics.py
deleted file mode 100644
index e69de29..0000000
diff --git a/echotorch/data/chaotic.py b/echotorch/data/chaotic.py
deleted file mode 100644
index 3e4ead9..0000000
--- a/echotorch/data/chaotic.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/datasets/functional/chaotic.py
-# Description : Attractor and chaos-based timeseries generation.
-# Date : 10th of August, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-# Imports
-from typing import Union, Tuple, List
-import torch
-import echotorch
-from random import shuffle
-
-
-# Henon attractor
-def henon(
- size: int,
- length: int,
- xy: Tuple[float, float],
- a: float = 1.4,
- b: float = 0.3,
- washout: int = 0
-) -> Tuple[echotorch.TimeTensor]:
- """Generate a series with the Hénon map dynamical system.
-
- Definition
- From Wikipedia: The Hénon map, sometimes called **Hénon-Pomeau attractor/map** is a discrete-time dynamical system.
- It is one of the most studied examples of dynamical systems that exhibit chaotic behavior.
- The Hénon map takes a point :math:`(x_n, y_n)` in the plane is mapped to the new point
-
- .. math::
- :nowrap:
-
- \[
- \\begin{cases}
- x_{n+1} = 1 - a x_n^2 + y_n \\\\
- y_{n+1} = b x_n
- \\end{cases}
- \]
-
- The map depends on two parameters, **a** and **b**, which for the **classical Hénon map** have values of a = 1.4 and
- b = 0.3. For the classical values the Hénon map is chaotic. For other values of a and b the map may be
- chaotic, intermittent, or converge to a periodic orbit.
-
- :param size: How many samples to generate
- :type size: ``int``
- :param length: Length of samples (time)
- :type length: ``int``
- :param xy: Starting position in the xy-plane
- :type xy: Tuple of ints
- :param a: System parameter (default: 1.4)
- :type a: Float
- :param b: Secodn system parameter (default: 0.3)
- :type b: Float
- :param washout: Time steps to remove at the beginning of samples
- :type washout: int (default: 0)
- :return: A ``list`` of ``TimeTensor`` with series generated from Henon's equations
- :rtype: ``tuple`` of ``TimeTensor``
-
- Example
- >>> x = echotorch.datasets.functional.henon(1, 100, xy=(0, 0), a=1.4, b=0.3)
- >>> x
- timetensor(tensor([[ 1.0000, 0.0000],
- [-0.4000, 0.3000],
- [ 1.0760, -0.1200],
- [-0.7409, 0.3228],
- [ 0.5543, -0.2223],
- ...
- [ 0.9608, 0.1202],
- [-0.1721, 0.2882],
- [ 1.2468, -0.0516],
- [-1.2279, 0.3740]]), time_dim: 0, tlen: 100)
- >>> echotorch.utils.timepoints2d(x)
- """
- # Samples
- samples = list()
-
- # Henon functions
- def henon_func(x: float, y: float) -> torch.Tensor:
- x_dot = 1 - (a * (x * x)) + y
- y_dot = b * x
- return torch.Tensor([x_dot, y_dot])
- # end henon_func
-
- # Washout
- for t in range(washout):
- xy = henon_func(xy[0], xy[1])
- # end for
-
- # For each sample
- for n in range(size):
- # Tensor
- sample = echotorch.zeros((2,), time_length=length)
-
- # Timesteps
- for t in range(length):
- xy = henon_func(xy[0], xy[1])
- sample[t] = xy
- # end for
-
- # Add
- samples.append(sample)
- # end for
-
- # Shuffle
- shuffle(samples)
-
- return samples
-# end henon
diff --git a/echotorch/data/datasets.py b/echotorch/data/datasets.py
deleted file mode 100644
index e69de29..0000000
diff --git a/echotorch/data/datasets/CopyTaskDataset.py b/echotorch/data/datasets/CopyTaskDataset.py
deleted file mode 100644
index 67261fd..0000000
--- a/echotorch/data/datasets/CopyTaskDataset.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/datasets/CopyTaskDataset.py
-# Description : Dataset for the copy task (Graves et al, 2016)
-# Date : 16th of July, 2020
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-# Imports
-import torch
-from typing import List, Tuple
-
-# Imports local
-from .EchoDataset import EchoDataset
-
-
-# Copy task dataset
-class CopyTaskDataset(EchoDataset):
- """
- Copy task dataset
- """
-
- # region CONSTRUCTORS
-
- # Constructor
- def __init__(self, n_samples, length_min, length_max, n_inputs, dtype=None):
- """
- Constructor
- @param n_samples: How many samples to generate
- @param length_min: Minimum length of the series
- @param length_max: Maximum length of the series
- @param n_inputs: How many inputs
- @param dtype: Data type
- """
- # Properties
- self.length_min = length_min
- self.length_max = length_max
- self.n_samples = n_samples
- self.n_inputs = n_inputs
- self.dtype = dtype
-
- # Generate data set
- self.samples = self.generate(
- self.n_samples,
- self.length_min,
- self.length_max,
- self.n_inputs,
- self.dtype
- )
- # end __init__
-
- # endregion CONSTRUCTORS
-
- # region PUBLIC
-
- # Get the whole dataset
- @property
- def data(self) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
- """
- Get the whole dataset as a list
- @return: A tuple of list
- """
- return self.samples
- # end data
-
- # endregion PUBLIC
-
- # region OVERRIDE
-
- # Length
- def __len__(self):
- """
- Length
- :return:
- """
- return self.n_samples
- # end __len__
-
- # Get item
- def __getitem__(self, idx):
- """
- Get item
- :param idx:
- :return:
- """
- return self.samples[idx]
- # end __getitem__
-
- # Extra representation
- def extra_repr(self) -> str:
- """
- Extra representation
- @return: Dataset representation as a string
- """
- return "length_min={}, length_max={}, n_inputs={}, dtype={}".format(
- self.length_min,
- self.length_max,
- self.n_inputs,
- self.dtype
- )
- # end extra_repr
-
- # Function to generate a sample
- def datafunc(self, length_min, length_max, n_inputs, dtype=None) -> Tuple[torch.Tensor, torch.Tensor]:
- """
- Function to generate a sample
- @param length_min: Minimum length
- @param length_max:
- @param n_inputs:
- @param dtype: Data type
- @return:
- """
- # Generate length
- sample_len = torch.randint(low=length_min, high=length_max, size=(1,)).item()
-
- # Create empty inputs and output
- sample_inputs = torch.zeros((sample_len * 2 + 1, n_inputs + 1), dtype=dtype)
- sample_outputs = torch.zeros((sample_len * 2 + 1, n_inputs + 1), dtype=dtype)
-
- # Generate a random pattern
- random_pattern = torch.randint(low=0, high=2, size=(sample_len, n_inputs))
-
- # Set in inputs and outputs
- sample_inputs[:sample_len, :n_inputs] = random_pattern
- sample_outputs[sample_len + 1:, :n_inputs] = random_pattern
- sample_inputs[sample_len, n_inputs] = 1.0
-
- return sample_inputs, sample_outputs
- # end datafunc
-
- # endregion OVERRIDE
-
- # region STATIC
-
- # Generate samples
- def generate(
- self,
- n_samples,
- length_min,
- length_max,
- n_inputs,
- dtype=None
- ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
- """
- Generate samples
- :param n_samples:
- :param length_min:
- :param length_max:
- :param n_inputs:
- :param dtype:
- """
- # List of samples
- samples_in = list()
- samples_out = list()
-
- # For each sample
- for i in range(n_samples):
- # Generate a sample
- sample_inputs, sample_outputs = self.datafunc(length_min, length_max, n_inputs, dtype)
-
- # Append
- samples_in.append(sample_inputs)
- samples_out.append(sample_outputs)
- # end for
-
- return samples_in, samples_out
- # end generate
-
- # endregion STATIC
-
-# end CopyTaskDataset
diff --git a/echotorch/data/datasets/DelayDataset.py b/echotorch/data/datasets/DelayDataset.py
deleted file mode 100644
index 237faaa..0000000
--- a/echotorch/data/datasets/DelayDataset.py
+++ /dev/null
@@ -1,217 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/datasets/DelayDataset.py
-# Description : Create a version of a dataset with delayed inputs.
-# Date : 17th of Marche, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti, University of Neuchâtel ,
-# University of Geneva
-
-
-# Imports
-import torch
-from typing import Union, List, Tuple
-
-# Local imports
-from .EchoDataset import EchoDataset
-
-
-# Create a version of a dataset with delayed inputs.
-class DelayDataset(EchoDataset):
- """
- Generates a series of input time series and delayed versions as outputs.
- Delay is given in number of time steps. Can be used to empirically measure the
- memory capacity of a system.
- """
-
- # region CONSTUCTORS
-
- # Constructor
- def __init__(self, root_dataset, n_delays=10, data_index=0, keep_indices=None):
- """
- Constructor
- :param root_dataset: Root dataset
- :param n_delays: Number of step to delay
- """
- # Properties
- self._root_dataset = root_dataset
- self._n_delays = n_delays
- self._data_index = data_index
- self._keep_indices = keep_indices
- # end __init__
-
- # endregion CONSTRUCTORS
-
- # region OVERRIDE
-
- # Get the whole dataset
- @property
- def data(self) -> Union[Tuple[List, List], Tuple[List, List, List]]:
- """
- Get the whole dataset (according to init parameters)
- @return: The Torch Tensor
- """
- # List of samples
- samples_in = list()
- samples_out = list()
- samples_index = list()
-
- # For each sample in the dataset
- for idx in range(len(self._root_dataset)):
- sample = self[idx]
- if self._keep_indices:
- samples_in.append(sample[0])
- samples_out.append(sample[1])
- samples_index.append(sample[2])
- else:
- samples_in.append(sample[0])
- samples_out.append(sample[1])
- # end if
- # end for
-
- # Return
- if self._keep_indices:
- return samples_in, samples_out, samples_index
- else:
- return samples_in, samples_out, samples_index
- # end if
- # end data
-
- # Length
- def __len__(self):
- """
- Length
- :return:
- """
- return len(self._root_dataset)
- # end __len__
-
- # Get item
- def __getitem__(self, idx):
- """
- Get item
- @param idx: Item index
- @return:
- """
- # Item list
- item_list = list()
-
- # Get sample from root dataset
- data = self._root_dataset[idx]
-
- # Data indices
- data_indices = [self._data_index] if type(self._data_index) is int else self._data_index
-
- # For each data index
- for data_index in data_indices:
- # Get data
- original_timeseries = data[data_index]
-
- # Future
- if self._n_delays > 0:
- # Prediction
- input_timeseries = original_timeseries[:-self._n_delays]
- output_timeseries = original_timeseries[self._n_delays:]
- elif self._n_delays < 0:
- # Memory
- input_timeseries = original_timeseries[self._n_delays:]
- output_timeseries = original_timeseries[:-self._n_delays]
- else:
- input_timeseries = original_timeseries
- output_timeseries = original_timeseries.clone()
- # end if
-
- # Add input and output
- item_list.append(input_timeseries)
- item_list.append(output_timeseries)
- # end for
-
- # Add all additional data
- if self._keep_indices is not None:
- for keep_index in self._keep_indices:
- item_list.append(data[keep_index])
- # end for
- # end if
-
- return item_list
- # end __getitem__
-
- # Extra representation
- def extra_repr(self) -> str:
- """
- Extra representation
- """
- return "root_dataset={}, n_delays={}, data_index={}, keep_indices={}".format(
- self._root_dataset,
- self._n_delays,
- self._data_index,
- self._keep_indices
- )
- # end extra_repr
-
- # Function to generate data
- def datafunc(self, idx) -> List:
- """
- Function to generate data
- @param idx: Item index
- @return: Timeseries, delay timeseries
- """
- # Item list
- item_list = list()
-
- # Get sample from root dataset
- data = self._root_dataset[idx]
-
- # Data indices
- data_indices = [self._data_index] if type(self._data_index) is int else self._data_index
-
- # For each data index
- for data_index in data_indices:
- # Get data
- original_timeseries = data[data_index]
-
- # Future
- if self._n_delays > 0:
- # Prediction
- input_timeseries = original_timeseries[:-self._n_delays]
- output_timeseries = original_timeseries[self._n_delays:]
- elif self._n_delays < 0:
- # Memory
- input_timeseries = original_timeseries[self._n_delays:]
- output_timeseries = original_timeseries[:-self._n_delays]
- else:
- input_timeseries = original_timeseries
- output_timeseries = original_timeseries.clone()
- # end if
-
- # Add input and output
- item_list.append(input_timeseries)
- item_list.append(output_timeseries)
- # end for
-
- # Add all additional data
- if self._keep_indices is not None:
- for keep_index in self._keep_indices:
- item_list.append(data[keep_index])
- # end for
- # end if
-
- return item_list
- # end datafunc
-
- # endregion OVERRIDE
-
-# end DelayDataset
diff --git a/echotorch/data/datasets/EchoDataset.py b/echotorch/data/datasets/EchoDataset.py
deleted file mode 100644
index ad598c5..0000000
--- a/echotorch/data/datasets/EchoDataset.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/datasets/EchoDataset.py
-# Description : Base class for EchoTorch datasets
-# Date : 25th of January, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-
-# Imports
-from typing import Union, List
-import torch
-from torch.utils.data.dataset import Dataset
-
-
-# EchoDataset
-class EchoDataset(Dataset):
- r"""An abstract class for EchoTorch dataset objects
- """
-
- # region CONSTRUCTORS
-
- # Constructors
- def __init__(
- self,
- n: int,
- stream: bool
- ) -> None:
- r"""Constructors
-
- Args:
- n: The size of the data set (number of samples)
- stream: Do we generate samples on the fly?
- """
- # Properties
- self._n = n
- self._stream = stream
- # end __init__
-
- # endregion CONSTRUCTORS
-
- # region OVERRIDE
-
- # Length
- def __len__(self):
- r"""Get the length of the dataset
-
- Returns: The length of the dataset (Scalar)
-
- """
- return self._n
- # end __len__
-
- # Representation
- def __repr__(self):
- r"""Returns a displayable representation of the object
-
- Returns:
- A displayable representation of the object
-
- """
- return "{}({})".format(
- self.__class__.__name__,
- self.extra_repr()
- )
- # end __repr__
-
- # endregion OVERRIDE
-
- # region TO_IMPLEMENT
-
- # Get the whole dataset
- @property
- def data(self) -> Union[torch.Tensor, List]:
- """
- Get the whole dataset (according to init parameters)
- @return: The Torch Tensor
- """
- raise Exception("data not implemented")
- # end data
-
- # Extra representation
- def extra_repr(self) -> str:
- """
- Extra representation
- """
- raise Exception("extra_repr not implemented")
- # end extra_repr
-
- # Function to generate data
- def datafunc(self, *args, **kwargs):
- """
- Function to generate data
- :param args: Positional arguments
- :param kwargs: Arguments
- """
- raise Exception("datafunc not implemented")
- # end datafunc
-
- # endregion TO_IMPLEMENT
-
-# end EchoDataset
diff --git a/echotorch/data/datasets/FuncDataset.py b/echotorch/data/datasets/FuncDataset.py
deleted file mode 100644
index 3441ef5..0000000
--- a/echotorch/data/datasets/FuncDataset.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : datasets/FuncDataset.py
-# Description : Generic dataset to transform a function into a PyTorch Dataset object.
-# Date : 9th of August, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-# Imports
-from typing import Tuple, List, Callable
-import torch
-import csv
-
-# Local imports
-from .EchoDataset import EchoDataset
-
-
-# Generic dataset to transform a function into a PyTorch Dataset object.
-class FuncDataset(EchoDataset):
- r"""Generic dataset to transform a function into a PyTorch Dataset object.
- """
-
- # Constructor
- def __init__(
- self,
- n: int,
- stream: bool,
- data_func: Callable,
- *args,
- **kwargs
- ) -> None:
- """
- Constructor
-
- Args:
- n: The Size of the dataset (the number of samples)
- stream: Do we generate samples on the fly?
- data_func: The callable object which will create the data.
- *args: Position arguments for the data function.
- **kwargs: Key arguments for the data function.
- """
- # Super
- super(FuncDataset, self).__init__(n, stream)
-
- # Properties
- self._data_func = data_func
-
- # Generate all samples if not streaming
- self._data = self._generate_dataset() if not stream else None
- # end __init__
-
- # region PRIVATE
-
- # Create the dataset by generating all samples
- def _generate_dataset(self) -> List[]:
- r"""Create the dataset by generating all samples (not streaming)
-
- Returns: Dataset samples as a list of timetensors
-
- """
-
- # endregion PRIVATE
-
- # region OVERRIDE
-
- # Get item
- def __getitem__(self, idx):
- """
- Get item
- :param idx: Sample index
- :return: Sample as torch tensor
- """
- # Generate a Markov chain with
- # specified length.
- return self._data
- # end __getitem__
-
- # endregion OVERRIDE
-
- # region STATIC
-
- # Generate data
- def datafunc(self, *args, **kwargs) -> Tuple[torch.Tensor, List]:
- r"""Generate samples from the data function.
-
- Args:
- *args:
- **kwargs:
-
- Returns:
-
- """
- pass
- # end datafunc
-
- # endregion STATIC
-
-# end DiscreteMarkovChainDataset
diff --git a/echotorch/data/datasets/TimeseriesDataset.py b/echotorch/data/datasets/TimeseriesDataset.py
deleted file mode 100644
index 7e8d1bc..0000000
--- a/echotorch/data/datasets/TimeseriesDataset.py
+++ /dev/null
@@ -1,1144 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/datasets/TimeseriesDataset.py
-# Description : Load timeseries from a directory
-# Date : 23th of February, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-
-# Imports
-import os
-import torch
-import json
-import numpy as np
-from typing import Union, List, Optional
-
-# echotorch imports
-from echotorch.transforms import Transformer
-
-# Imports local
-from .EchoDataset import EchoDataset
-
-
-# Filenames
-INFO_DATA_FILE_OUTPUT = "{:07d}TS.pth"
-PROPERTIES_FILE = "dataset_properties.json"
-INFO_METADATA_FILE_OUTPUT = "{:07d}TS.json"
-
-
-# A dataset to load time series from a directory with meta-data
-class TimeseriesDataset(EchoDataset):
- """
- A dataset to load time series from a directory with meta-data
- """
-
- # region CONSTRUCTORS
-
- # Constructor
- def __init__(
- self,
- root_directory: str,
- global_transform: Optional[Transformer] = None,
- transforms: Optional[List[Transformer]] = None,
- global_label_transform: Optional[Transformer] = None,
- label_transforms: Optional[List[Transformer]] = None,
- segments_transform: Optional[Transformer] = None,
- events_transform: Optional[Transformer] = None,
- timestep: Optional[float] = 1.0,
- range_value: Optional[float] = None,
- scale: float = 1.0,
- selected_columns: Optional[List[str]] = None,
- label_columns: Optional[List[str]] = None,
- segment_label_to_return: Optional[str] = None,
- in_memory: bool = False,
- return_segments: bool = True,
- return_events: bool = True,
- return_metadata: bool = True,
- return_labels: bool = True,
- dtype=torch.float64
- ):
- """
- Constructor
- :param root_directory: Base root directory
- :param global_transform: An EchoTorch transformer for the whole time series
- :param transforms: EchoTorch transformers for each segment labels
- :parma global_label_transorm:
- :param label_transforms:
- :param segments_transform:
- :param events_transform:
- :param timestep: The time step of time series (default: 1 second)
- :param selected_columns: Names of the columns to return in the tensor
- :param segment_label_to_return: Segment label to return ('all' for no selection)
- :param in_memory: Keep data in memory
- :param return_segments: Return segments as a tensor
- :param return_events: Return events as a tensor
- :param dtype: Data type
- """
- # Properties
- self._root_directory = root_directory
- self._timestep = timestep
- self._range_value = range_value
- self._scale = scale
- self._root_json_file = os.path.join(self._root_directory, PROPERTIES_FILE)
- self._in_memory = False
- self._segment_label_to_return = segment_label_to_return
- self._return_segments = return_segments
- self._return_events = return_events
- self._return_metadata = return_metadata
- self._return_labels = return_labels
- self._dtype = dtype
-
- # Transforms
- self._global_transform = global_transform
- self._global_label_transform = global_label_transform
- self._segments_transform = segments_transform
- self._events_transform = events_transform
-
- # Transformers
- if transforms is None:
- self._transforms = dict()
- else:
- self._transforms = transforms
- # end if
-
- # Label transformers
- if label_transforms is None:
- self._label_transforms = dict()
- else:
- self._label_transforms = label_transforms
- # end if
-
- # Load JSON file
- self._dataset_properties = self._filter_dataset(
- self._load_properties_file(self._root_json_file)
- )
-
- # Check that segment_label_to_return is ok
- if segment_label_to_return is not None and segment_label_to_return not in self.segment_label_names:
- raise Exception(
- "Parameter segment_label_to_return should be None or in {}".format(list(self.segment_label_names.keys()))
- )
- # end if
-
- # Selected columns
- if selected_columns is None:
- self._selected_columns = self.columns
- else:
- self._selected_columns = selected_columns
- # end if
-
- # Alternative columns
- if label_columns is None:
- self._label_columns = []
- else:
- self._label_columns = label_columns
- # end if
-
- # Load in memory if necessary
- self._sample_in_memory = False
- if in_memory:
- self._loaded_samples = self._load_samples()
- # end if
-
- # Build mapping
- self._index_mapping = self._build_mapping()
- # end __init__
-
- # endregion CONSTRUCTORS
-
- # region PROPERTIES
-
- # Root directory (GET)
- @property
- def root_directory(self) -> str:
- """
- Root directory
- :return: Root directory
- """
- return self._root_directory
- # end root_directory
-
- # Global transform (GET)
- @property
- def global_transform(self) -> Transformer:
- """
- Global transform (GET)
- :return: A Transformer object
- """
- return self._global_transform
- # end global_transform
-
- # Global transform (SET)
- @global_transform.setter
- def global_transform(self, value: Transformer) -> None:
- """
- Global transform (SET)
- :param value: New transformer
- """
- self._global_transform = value
- # end global_transform
-
- # Global label transform (GET)
- @property
- def global_label_transform(self) -> Transformer:
- """
- Global transform (GET)
- :return: A Transformer object
- """
- return self._global_label_transform
-
- # end global_label_transform
-
- # Global transform (SET)
- @global_label_transform.setter
- def global_label_transform(self, value: Transformer) -> None:
- """
- Global transform (SET)
- :param value: New transformer
- """
- self._global_label_transform = value
- # end global_label_transform
-
- # Transforms (GET)
- @property
- def transforms(self) -> dict:
- """
- Transform
- :return: Transformer
- """
- return self._transforms
- # end transforms
-
- # Transform (SET)
- @transforms.setter
- def transforms(self, value: dict):
- """
- Transformer (SET)
- :param value: New transformers as a dict
- """
- self._transforms = value
- # end transforms
-
- # Label transforms (GET)
- @property
- def label_transforms(self) -> dict:
- """
- Transform
- :return: Transformer
- """
- return self._label_transforms
- # end label_transforms
-
- # Label transform (SET)
- @label_transforms.setter
- def label_transforms(self, value: dict):
- """
- Transformer (SET)
- :param value: New transformers as a dict
- """
- self._label_transforms = value
- # end label_transforms
-
- # Number of samples in the dataset
- @property
- def n_samples(self) -> int:
- """
- Number of samples in the dataset
- :return: Number of samples
- """
- return self._dataset_properties['n_samples']
- # end n_samples
-
- # Sample length
- @property
- def sample_length(self) -> int:
- """
- Accumulated lengths ot the sample
- """
- return self._dataset_properties['sample_length']
- # end sample_length
-
- # Sample properties
- @property
- def sample_properties(self) -> list:
- """
- Sample properties
- """
- return self._dataset_properties['samples']
- # end sample_properties
-
- # Columns
- @property
- def columns(self):
- """
- Columns
- """
- return self._dataset_properties['column_names']
- # end columns
-
- # Columns propertiesS
- @property
- def columns_properties(self):
- """
- Columns properties
- """
- return self._dataset_properties['columns']
- # end columns_properties
-
- # Labels
- @property
- def labels(self) -> list:
- """
- Labels
- """
- return self._dataset_properties['labels']
- # end labels
-
- # Metadata
- @property
- def metadata(self) -> dict:
- """
- Metadata
- """
- return self._dataset_properties['metadata']
- # end metadata
-
- # Event type indices
- @property
- def event_type_indices(self) -> dict:
- """
- Event type indices
- """
- return self._dataset_properties['event_type_indices']
- # end event_type_indices
-
- # Event type names
- @property
- def event_type_names(self) -> list:
- """
- Event type names
- """
- return self._dataset_properties['event_type_names'].keys()
- # end event_type_names
-
- # Event types
- @property
- def event_types(self) -> list:
- """
- Event types
- """
- return self._dataset_properties['event_types']
- # end event_types
-
- # Segment label names
- @property
- def segment_label_names(self) -> list:
- """
- Segment label names
- :return: Segment label names as a list
- """
- return self._dataset_properties['segment_label_names']
- # end segment_label_names
-
- # Segment label indices
- @property
- def segment_label_indices(self) -> list:
- """
- Segment label indices
- :return: Segment label indices as a list
- """
- return self._dataset_properties['segment_label_indices']
- # end segment_label_indices
-
- # endregion PROPERTIES
-
- # region PUBLIC
-
- # Label name to index
- def label_name_to_index(self, label_name: str) -> int:
- """
- label name
- """
- return self._dataset_properties['label_names'][label_name]
- # end label_name_to_index
-
- # Label index to name
- def label_index_to_name(self, label_index: int) -> str:
- """
- Label index to name
- """
- return self._dataset_properties['label_indices'][str(label_index)]
- # end label_index_to_name
-
- # Label classes
- def label_classes(self, label_name: str) -> dict:
- """
- Get label classes
- """
- label_index = self.label_name_to_index(label_name)
- return self._dataset_properties['labels'][label_index]['classes']
- # end label_classes
-
- # Label class names
- def label_class_names(self, label_name: str):
- """
- Get label class names
- """
- label_index = self.label_name_to_index(label_name)
- return self._dataset_properties['labels'][label_index]['class_names']
- # end label_class_names
-
- # Number of samples for a class
- def label_class_n_samples(self, label_name, class_name):
- """
- Number of samples for a class
- @param label_name:
- @return:
- """
- label_index = self.label_name_to_index(label_name)
- class_index = self.class_name_to_index(label_index, class_name)
- return self._dataset_properties["label"][label_index]["classes"][str(class_index)]
- # end label_class_n_samples
-
- # Class name to index
- def class_name_to_index(self, label_index: int, class_name: str) -> Union[int, None]:
- """
- Class name to index
- """
- label_desc = self._dataset_properties["labels"][label_index]
- for class_i, class_desc in label_desc["classes"].items():
- if class_desc["name"] == class_name:
- return class_desc["id"]
- # end if
- # end for
- return None
- # end class_name_to_index
-
- # Event type properties
- def event_type_properties(self, event_name: str) -> dict:
- """
- Event type properties
- """
- event_index = self.event_name_to_index(event_name)
- return self._dataset_properties['event_types'][event_index]
- # end event_type_properties
-
- # Event name to index
- def event_name_to_index(self, event_name: str) -> int:
- """
- Event name to index
- """
- return self._dataset_properties['event_type_names'][event_name]
- # end event_name_to_index
-
- # Event name from index
- def event_index_to_name(self, event_index: int) -> str:
- """
- Event index to name
- """
- return self._dataset_properties['event_type_indices'][str(event_index)]
- # end event_index_to_name
-
- # Column properties
- def column_properties(self, column_name: str) -> dict:
- """
- Column properties
- :param column_name: Column name
- :return: Dictionary of properties
- """
- column_id = self._dataset_properties['column_names']
- return self._dataset_properties['columns'][column_id]
- # end column_properties
-
- # Column index by name
- def column_name_to_index(self, column_name: str) -> int:
- """
- Column index by name
- """
- return self._dataset_properties['column_names'][column_name]
- # end column_index_from_name
-
- # Column name from index
- def column_index_to_name(self, column_index: int) -> str:
- """
- Column name from index
- """
- return self._dataset_properties['columns'][str(column_index)]['id']
- # end column_name_from_index
-
- # Get sample by index
- def get_sample(self, sample_index: int) -> dict:
- """
- Get sample by index
- """
- if self._sample_in_memory:
- return self._loaded_samples[sample_index]
- else:
- # Data file name
- data_file_path = os.path.join(self._root_directory, INFO_DATA_FILE_OUTPUT.format(sample_index))
-
- # Load tensor data
- with open(data_file_path, 'rb') as data_file:
- return torch.load(data_file)
- # end with
- # end if
- # end get_sample
-
- # Get sample metadata
- def get_sample_metadata(self, sample_index: int) -> dict:
- """
- Get sample metadata
- """
- # Metadata file
- metadata_file_path = os.path.join(self._root_directory, INFO_METADATA_FILE_OUTPUT.format(sample_index))
-
- # Load JSON
- with open(metadata_file_path, 'r') as metadata_file:
- return json.load(metadata_file)
- # end with
- # end get_sample_metadata
-
- # Get sample labels
- def get_sample_labels(self, sample_index: int) -> list:
- """
- Get sample labels
- """
- return self._dataset_properties['samples'][sample_index]['labels']
- # end get_sample_labels
-
- # Get sample class
- def get_sample_class(self, sample_index: int, label_index: int) -> int:
- """
- Get sample class
- """
- for sample_label in self.get_sample_labels(sample_index):
- if sample_label['id'] == label_index:
- return sample_label['class']
- # end if
- # end for
- raise Exception("Unknown label index: {}".format(label_index))
- # end get_sample_class
-
- # Get sample class tensor
- def get_sample_class_tensor(self, sample_index: int, time_tensor=False, time_length=None) -> torch.Tensor:
- """
- Get sample class tensor
- """
- if time_tensor:
- if time_length is None:
- return self._create_class_time_tensor(
- self._dataset_properties['samples'][sample_index]['labels'],
- self.get_sample_length(sample_index)
- )
- else:
- return self._create_class_time_tensor(
- self._dataset_properties['samples'][sample_index]['labels'],
- time_length
- )
- # end if
- else:
- n_labels = len(self.labels)
- class_array = np.zeros(n_labels)
- for label in self.labels:
- class_array[label['id']] = self.get_sample_class(sample_index, label['id'])
- # end for
- return torch.tensor(class_array, dtype=self._dtype)
- # end if
- # end get_sample_class_tensor
-
- # Get sample length
- def get_sample_length(self, sample_index: int) -> int:
- """
- Get sample length
- """
- return self._dataset_properties['samples'][sample_index]['length']
- # end get_sample_length
-
- # Get sample properties
- def get_sample_properties(self, sample_index: int) -> dict():
- """
- Get sample properties
- """
- return self._dataset_properties['samples'][sample_index]
- # end get_sample_properties
-
- # Get sample number of segments
- def get_sample_segment_count(self, sample_index: int) -> int:
- """
- Get sample number of segments
- """
- return self._dataset_properties['samples'][sample_index]['n_segments']
- # end get_sample_segment_count
-
- # Get sample segments
- def get_sample_segments(self, sample_index: int) -> list:
- """
- Get sample segments
- """
- return self._dataset_properties['samples'][sample_index]['segments']
- # end get_sample_segments
-
- # Get sample events
- def get_sample_events(self, sample_index: int) -> list:
- """
- Get sample events
- """
- return self._dataset_properties['samples'][sample_index]['events']
- # end get_sample_events
-
- # From segment label index to name
- def segment_label_index_to_name(self, segment_label_index) -> str:
- """
- From segment label index to name
- """
- return self._dataset_properties['segment_label_indices'][segment_label_index]
- # end segment_label_index_to_name
-
- # From segment label name to index
- def segment_label_name_to_index(self, segment_label_name: str) -> int:
- """
- From segment label name to index
- """
- return self._dataset_properties['segment_label_names'][segment_label_name]
- # end segment_label_name_to_index
-
- # Get metadata entry
- def get_metadata(self, item: str):
- """
- Get metadata entry
- """
- return self._dataset_properties['metadata'][item]
- # end get_metadata
-
- # Get transformer for a gait
- def get_transform(self, segment_label_name: str) -> Transformer:
- """
- Get transformer for a gait
- :param segment_label_name: Segment label name
- :return: The transformer
- """
- return self._transforms[segment_label_name]
- # end get_transform
-
- # Set transformer for a gait
- def set_transform(self, segment_label_name: str, value: Transformer):
- """
- Set transformer for a gait
- :param segment_label_name: Segment label name
- :param value: New transformer
- """
- self._transforms[segment_label_name] = value
- # end set_transform
-
- # Get segment label stats
- def get_segment_label_stats(self, segment_label_name: str) -> dict:
- """
- Get segment label stats
- """
- segment_label_index = self.segment_label_name_to_index(segment_label_name)
- return self._dataset_properties["segment_labels"][segment_label_index]["stats"]
- # end get_segment_label_stats
-
- # endregion PUBLIC
-
- # region PRIVATE
-
- # Build mapping
- def _build_mapping(self):
- """
- Build mapping
- """
- return None
- # end _build_mapping
-
- # Apply transformers
- def _apply_transformers(
- self,
- global_transform: Transformer,
- transforms: dict,
- data_tensor: torch.Tensor,
- segments_tensor: torch.Tensor
- ) -> torch.Tensor:
- """
- Apply transformers
- :param data_tensor: Sample data tensors as a dict
- :param segments_tensor: Sample segments as a dict
- :return: Transformed data tensors as a dict
- """
- # Apply global transformers
- if global_transform is not None:
- data_tensor = global_transform(data_tensor)
- # end if
-
- # For each segment
- for segment_i in range(segments_tensor.size(0)):
- # Get segment info
- segment_start = segments_tensor[segment_i, 0]
- segment_end = segments_tensor[segment_i, 1]
- segment_label = self.segment_label_index_to_name(str(segments_tensor[segment_i, 2].item()))
-
- # There is a transformer for this?
- if segment_label in transforms.keys() and transforms[segment_label] is not None:
- data_tensor[segment_start:segment_end] = transforms[segment_label](
- data_tensor[segment_start:segment_end]
- )
- # end if
- # end for
-
- return data_tensor
- # end _apply_transformers
-
- # Apply scale
- def _apply_scale(self, x: torch.Tensor) -> torch.Tensor:
- """
- Apply scale
- :param x: Input time series
- @return: Scaled time series
- """
- return x * self._scale
- # end _apply_scale
-
- # Apply range
- def _apply_range(self, x: torch.Tensor) -> torch.Tensor:
- """
- Apply range
- @param x: Input time series
- @return: Time series ranged
- """
- # But value above/below max/min to max/min
- x[x > self._range_value] = self._range_value
- x[x < -self._range_value] = -self._range_value
- return x
- # end _apply_range
-
- # Load JSON file
- def _load_properties_file(self, json_file: str) -> dict:
- """
- Load JSON file
- :param json_file: Path to JSON file
- """
- with open(json_file, 'r') as r:
- return json.load(r)
- # end with
- # end _load_json_file
-
- # Load samples in memory
- def _load_samples(self):
- """
- Load samples in memory
- """
- # Samples
- loaded_samples = list()
-
- # For each sample
- for sample_i in range(self.n_samples):
- loaded_samples.append(
- self.get_sample(sample_i)
- )
- # end for
-
- # In memory
- self._sample_in_memory = True
-
- return loaded_samples
- # end _loaded_samples
-
- # Create time tensor for labels
- def _create_class_time_tensor(self, sample_labels, sample_len) -> torch.Tensor:
- """
- Create time tensor for labels
- """
- # Np array
- class_array = np.zeros(shape=(sample_len, len(sample_labels)), dtype=np.long)
-
- # For each label
- for label in sample_labels:
- class_array[:, label['id']] = label['class']
- # end for
-
- return torch.tensor(class_array).long()
- # end _create_class_time_tensor
-
- # Create a tensor from the dictionary
- def _create_input_tensor(self, timeseries_dict: dict, sample_length: int) -> torch.Tensor:
- """
- Create a tensor from the dictionary
- :param timeseries_dict: Dictionary of tensors for each columns
- :param sample_length: Length of the sample
- :return: The tensor with all columns
- """
- # Create an empty float tensor
- timeseries_input = torch.empty(sample_length, len(self._selected_columns), dtype=self._dtype)
-
- # For each data column
- for col_i, col_name in enumerate(self._selected_columns):
- if col_name in timeseries_dict.keys():
- timeseries_input[:, col_i] = timeseries_dict[col_name]
- # end if
- # end for
-
- return timeseries_input
- # end _create_input_tensor
-
- # Create label tensor
- def _create_label_tensor(self, timeseries_dict: dict, sample_length: int) -> torch.Tensor:
- """
- Create label tensor
- @param timeseries_dict: Dictionary of tensors for each columns
- @param sample_length: Length of the sample
- @return: The label tensor
- """
- # Create an empty float tensor
- timeseries_label = torch.empty(sample_length, len(self._label_columns), dtype=self._dtype)
-
- # For each label column
- for col_i, col_name in enumerate(self._label_columns):
- if col_name in timeseries_dict.keys():
- timeseries_label[:, col_i] = timeseries_dict[col_name]
- # end if
- # end for
-
- return timeseries_label
- # end _create_label_tensor
-
- # Create a tensor for segments
- def _create_segments_tensor(
- self,
- sample_segments: list,
- time_length: int
- ) -> torch.Tensor:
- """
- Create a tensor for segments
- :param sample_segments: Sample segments (list of dict)
- :param time_length: Time length of the series
- :return: Segments position and end as a tensor
- """
- # List of segments
- gait_segments_list = list()
-
- # For each segment
- for seg_i, segment in enumerate(sample_segments):
- # Transform to integer if necessary
- if type(segment['label']) is int:
- segment_label = segment['label']
- elif type(segment['label']) is str:
- segment_label = self.segment_label_name_to_index(segment['label'])
- else:
- raise TypeError("Segment label must be an int or a str (here {})".format(type(segment['label'])))
- # end if
-
- # Add segment
- if seg_i == len(sample_segments) - 1:
- gait_segments_list.append([segment['start'], time_length, segment_label])
- else:
- gait_segments_list.append([segment['start'], segment['end'], segment_label])
- # end if
- # end for
-
- # return gait_segments_tensor
- return torch.LongTensor(gait_segments_list)
- # end _create_gait_segments_tensor
-
- # Time t in a segment of gait type?
- def _pos_in_segment_label(self, segments_tensor: torch.Tensor, event_tensor: torch.Tensor, segment_label_name: str):
- """
- Time t in a segment of gait type?
- :param segments_tensor:
- :param event_tensor:
- :param segment_label_name:
- :return:
- """
- # Name to index
- segment_label_index = self.segment_label_name_to_index(segment_label_name)
-
- # For each segment
- time_pos = 0
- for segment_i in range(segments_tensor.size(0)):
- # Segment info
- segment_start = segments_tensor[segment_i, 0]
- segment_end = segments_tensor[segment_i, 1]
- segment_label = segments_tensor[segment_i, 2]
- segment_length = segment_end - segment_start
- if segment_label == segment_label_index:
- if segment_start <= event_tensor[0] <= segment_end and segment_start <= event_tensor[1] <= segment_end:
- event_tensor[0] = time_pos + (event_tensor[0] - segment_start)
- event_tensor[1] = time_pos + (event_tensor[1] - segment_start)
- return event_tensor
- # end if
- time_pos += segment_length
- # end if
- # end for
-
- # Tag is not found
- event_tensor[2] = -1
-
- return event_tensor
- # end _pos_in_gait_type_segment
-
- # Create a tensor for events (jumps)
- def _create_events_tensor(self, sample_events: list) -> torch.Tensor:
- """
- Create a tensor for events (jumps)
- :param sample_events:
- :return:
- """
- if len(sample_events) > 0:
- # Create an empty tensor for events data
- events_list = list()
-
- # For each events
- for event in sample_events:
- events_list.append([int(event['start']), int(event['end']), int(event['type'])])
- # end for
-
- return torch.LongTensor(events_list)
- else:
- return torch.zeros(0, 0).long()
- # end if
- # end _create_events_tensor
-
- # Filter segment tensor
- def _filter_segment_tensor(self, segments_tensor: torch.Tensor, segment_label_name: str):
- """
- Filter segment tensor
- :param segments_tensor:
- :param segment_label_name:
- """
- if segment_label_name is not None and segments_tensor.size(0) > 0:
- # Filter tensor
- filtered_tensor = segments_tensor[segments_tensor[:, 2] == self.segment_label_name_to_index(segment_label_name)]
-
- # Change time position
- time_pos = 0
- for segment_i in range(filtered_tensor.size(0)):
- # Segment info
- segment_start = filtered_tensor[segment_i, 0]
- segment_end = filtered_tensor[segment_i, 1]
- segment_length = segment_end - segment_start
- filtered_tensor[segment_i, 0] = time_pos
- filtered_tensor[segment_i, 1] = time_pos + segment_length
- time_pos = time_pos + segment_length
- # end for
-
- return filtered_tensor
- else:
- return segments_tensor
- # end if
- # end _filter_segment_tensor
-
- # Filter event tensor
- def _filter_event_tensor(self, events_tensor: torch.Tensor, segments_tensor: torch.Tensor, segment_label_name: str):
- """
- Filter segment tensor
- :param events_tensor:
- :param segment_label_name:
- """
- if segment_label_name is not None and events_tensor.size(0) > 0:
- # Change time position
- for event_i in range(events_tensor.size(0)):
- # Is event in a target segment ?
- events_tensor[event_i] = self._pos_in_segment_label(
- segments_tensor,
- events_tensor[event_i],
- segment_label_name
- )
- # end for
-
- return events_tensor[events_tensor[:, 2] != -1]
- else:
- return events_tensor
- # end if
- # end _filter_event_tensor
-
- # Filter a time series as tensor for gait type
- def _filter_ts_segment_label_name(
- self,
- timeseries_input: torch.Tensor,
- segments_tensor: torch.Tensor,
- segment_label_name: str
- ) -> torch.Tensor:
- """
- Filter a time series as tensor for gait type
- :param timeseries_input:
- :param segments_tensor:
- :param segment_label_name:
- :return:
- """
- if segment_label_name is not None:
- # List of indices in the temporal axis
- indices_list = []
-
- # For each segment
- for segment_i in range(segments_tensor.size(0)):
- # Get segment info
- segment_start = segments_tensor[segment_i, 0]
- segment_end = segments_tensor[segment_i, 1]
-
- segment_label = self.segment_label_index_to_name(str(segments_tensor[segment_i, 2].item()))
-
- # Add to list of indices of in the selected segment
- if segment_label == segment_label_name:
- indices_list += list(range(segment_start, segment_end))
- # end if
- # end for
-
- return torch.index_select(timeseries_input, dim=0, index=torch.LongTensor(indices_list))
- else:
- return timeseries_input
- # end if
- # end _filter_ts_gait_type
-
- # endregion PRIVATE
-
- # region OVERRIDE
-
- # Representation
- def extra_repr(self):
- """
- Representation
- """
- s = '{}, {}'
- return s.format(self._root_directory, self._root_json_file)
- # end extra_repr
-
- # Length of the dataset
- def __len__(self):
- """
- Length of the dataset
- """
- return self.n_samples
- # end __len__
-
- # Get an item
- def __getitem__(self, item: int) -> list:
- """
- Get an item
- """
- # Return list
- return_list = []
-
- # Remapping
- if self._index_mapping is not None:
- item = self._index_mapping[item]
- # end if
-
- # Get sample from Timeseries dataset
- timeseries_dict = self.get_sample(item)
-
- # Time length
- time_length = timeseries_dict['gait'].size(0)
-
- class_time_tensor = self.get_sample_class_tensor(sample_index=item, time_tensor=True)
- class_tensor = self.get_sample_class_tensor(sample_index=item, time_tensor=False)
-
- # Create segment tensor
- segments_tensor = self._create_segments_tensor(self.get_sample_segments(item), time_length)
- segments_tensor = self._segments_transform(segments_tensor) if self._segments_transform is not None else segments_tensor
-
- # Create jump segment tensor
- events_tensor = self._create_events_tensor(self.get_sample_events(item))
- events_tensor = self._events_transform(events_tensor) if self._events_transform is not None else events_tensor
-
- # Create a tensor from the dictionary
- timeseries_input = self._create_input_tensor(timeseries_dict, self.get_sample_length(item))
-
- # Create a tensor with label timeseries
- timeseries_labels = self._create_label_tensor(timeseries_dict, self.get_sample_length(item))
-
- # Apply transforms to input time series
- timeseries_input = self._apply_transformers(
- self._global_transform,
- self._transforms,
- timeseries_input,
- segments_tensor
- )
-
- # Apply label transforms to the class label time series
- class_time_tensor = self._apply_transformers(
- self._global_label_transform,
- self._label_transforms,
- class_time_tensor,
- segments_tensor
- )
-
- # Filter input data for targeted segment label
- timeseries_input = self._filter_ts_segment_label_name(
- timeseries_input,
- segments_tensor,
- self._segment_label_to_return
- )
-
- # Apply scale and range
- timeseries_input = self._apply_scale(timeseries_input)
- timeseries_input = self._apply_range(timeseries_input)
-
- # Filter time-related ground truth for targeted segment label
- class_time_tensor = self._filter_ts_segment_label_name(
- class_time_tensor,
- segments_tensor,
- self._segment_label_to_return
- )
-
- # Filter even tensor
- events_tensor = self._filter_event_tensor(events_tensor, segments_tensor, self._segment_label_to_return)
-
- # Filter gait tensor
- segments_tensor = self._filter_segment_tensor(segments_tensor, self._segment_label_to_return)
-
- # Add to returns
- return_list += [timeseries_input, class_time_tensor, class_tensor]
-
- # Create the tensor for segments (if needed)
- if self._return_segments:
- return_list.append(segments_tensor)
- # end if
-
- # Create the tensor for events (jumps) (if needed)
- if self._return_events:
- return_list.append(events_tensor)
- # end if
-
- # Return sample metadata
- if self._return_metadata:
- metadata_dict = self.get_sample_properties(item)
- return_list.append(metadata_dict)
- # end if
-
- # Return labeling tensor
- if self._return_labels:
- return_list.append(timeseries_labels)
- # end if
-
- return return_list
- # end __getitem__
-
- # endregion OVERRIDE
-
- # region TO_OVERRIDE
-
- # Filter dataset
- def _filter_dataset(self, dataset_desc: dict) -> dict:
- """
- Filter dataset
- """
- return dataset_desc
- # end _filter_dataset
-
- # endregion TO_OVERRIDE
-
-# end TimeseriesDataset
-
diff --git a/echotorch/data/datasets/__init__.py b/echotorch/data/datasets/__init__.py
deleted file mode 100644
index fd3b1bf..0000000
--- a/echotorch/data/datasets/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/data/datasets/__init__.py
-# Description : Dataset subpackages init file
-# Date : 3th of March, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-# Imports datasets
-from echotorch.data.datasets.LogisticMapDataset import LogisticMapDataset
-
-__all__ = [
- # Datasets
- 'CopyTaskDataset', 'DatasetComposer', 'DiscreteMarkovChainDataset', 'FromCSVDataset', 'HenonAttractor',
- 'LambdaDataset', 'LatchTaskDataset', 'LogisticMapDataset', 'LorenzAttractor', 'MackeyGlassDataset', 'MemTestDataset',
- 'NARMADataset', 'RosslerAttractor', 'SinusoidalTimeseries', 'PeriodicSignalDataset', 'RandomSymbolDataset',
- 'ImageToTimeseries', 'MarkovChainDataset', 'MixedSinesDataset', 'RepeatTaskDataset',
- 'TimeseriesBatchSequencesDataset', 'TransformDataset', 'TripletBatching', 'DelayDataset', 'EchoDataset',
- 'MackeyGlass2DDataset'
-]
diff --git a/echotorch/data/discrete.py b/echotorch/data/discrete.py
deleted file mode 100644
index e69de29..0000000
diff --git a/echotorch/data/from_file.py b/echotorch/data/from_file.py
deleted file mode 100644
index e69de29..0000000
diff --git a/echotorch/data/random_processes.py b/echotorch/data/random_processes.py
deleted file mode 100644
index 952a1f8..0000000
--- a/echotorch/data/random_processes.py
+++ /dev/null
@@ -1,648 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/datasets/functional/random_processes.py
-# Description : Examples of time series generation based on random processes
-# Date : 12th of August, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-
-# Imports
-from typing import Any, List, Optional, Tuple, Union, Callable
-import torch
-import echotorch
-
-
-# Random walk
-def random_walk(
- size: int,
- length: int,
- shape: Optional[Union[List, Tuple]] = None,
- noise_mean: Optional[float] = 0.0,
- noise_std: Optional[float] = 1.0
-) -> Tuple[echotorch.TimeTensor]:
- r"""Generate time series based on a random walk process.
-
- Definition
- (From Wikipedia) In mathematics, a **random walk** is a mathematical object, known as a stochastic or random
- process, that describes a path that consists of a succession of random steps on some mathematical space such
- as the integers.
-
- If :math:`x(t)` is the generated random walk at time *t* and :math:`z(t)` a white noise with mean
- :math:`\mu` (noise_mean) and a standard deviation :math:`\sigma` (noise_std), the :math:`x(t)` is described as
-
- .. math::
- x(t) = x({t-1}) + z(t)
-
- `Article on Wikipedia `__
-
- :param size: how many samples to generate.
- :type size: ``int``
- :param length: length of generated time series.
- :type length: ``int``
- :param shape: shape of time series.
- :type shape: ``torch.Size``, ``list`` or ``tuple`` of ``int``
- :param noise_mean: mean :math:`\mu` of the white noise.
- :type noise_mean: ``float``
- :param noise_std: standard deviation :math:`\sigma` of the white noise.
- :type noise_std: ``float``
- :return: a list of :class:`TimeTensor` with series generated from random walk.
- :rtype: list of :class:`TimeTensor`
-
- Example:
-
- >>> echotorch.data.random_walk(1, length=10000, shape=(2, 2))
- timetensor(tensor([[[-2.1806e-01, 1.5221e-01],
- [ 1.6733e-01, -9.5691e-01]],
- [[ 6.9345e-01, 4.2999e-01],
- [-1.8667e-01, -2.5323e-01]],
- [[ 4.9236e-01, -2.5215e+00],
- [-1.5146e-01, 1.5272e+00]],
- ...,
- [[ 1.6925e+02, -9.9522e+01],
- [ 9.7266e-01, 2.2402e+01]],
- [[ 1.7010e+02, -1.0009e+02],
- [ 1.0102e+00, 2.3406e+01]],
- [[ 1.7160e+02, -1.0048e+02],
- [ 7.4558e-01, 2.4151e+01]]]), time_dim: 0)
- """
- # Samples
- samples = list()
-
- # Shape
- shape = () if shape is None else shape
-
- # For each sample
- for n in range(size):
- # Generate noise Zt
- zt_noise = echotorch.randn(*shape, time_length=length+1) * noise_std + noise_mean
-
- # Space for xt
- xt = echotorch.zeros(*shape, time_length=length)
-
- # x(0)
- xt[0] = zt_noise[0]
-
- # For each timestep
- for t in range(1, length):
- xt[t] = xt[t-1] + zt_noise[t]
- # end for
-
- # Add
- samples.append(xt)
- # end for
-
- return samples
-# end random_walk
-
-
-# Random walk
-def rw(
- size: int,
- length: int,
- shape: Union[torch.Size, List, Tuple],
- noise_mean: Optional[float] = 0.0,
- noise_std: Optional[float] = 1.0
-) -> Tuple[echotorch.TimeTensor]:
- r"""Alias for :func:`echotorch.data.random_walk`.
- """
- return random_walk(
- size=size,
- length=length,
- shape=shape,
- noise_mean=noise_mean,
- noise_std=noise_std
- )
-# end rw
-
-
-# Univariate Random Walk
-def unirw(
- size: int,
- length: int,
- noise_mean: Optional[float] = 0.0,
- noise_std: Optional[float] = 1.0
-) -> Tuple[echotorch.TimeTensor]:
- r"""Generate a univariate time series based on a random walk process.
-
- See :func:`echotorch.data.random_walk` for mathematical details.
-
- :param size: how many samples to generate.
- :type size: ``int``
- :param length: lenght of generated time series.
- :type length: ``int``
- :param noise_mean: mean :math:`\mu` of the white noise.
- :type noise_mean: ``float``
- :param noise_std: standard deviation :math:`\sigma` of the white noise.
- :type noise_std: ``float``
- :return: a list of :class:`TimeTensor` with series generated from random walk.
- :rtype: list of :class:`TimeTensor`
-
- Example:
-
- >>> echotorch.data.unirw(1, length=10000)
- timetensor(tensor([ -1.5256, -2.2758, -2.9298, ..., -37.9416, -36.9469, -38.1765]), time_dim: 0)
- """
- return random_walk(
- size=size,
- length=length,
- shape=(),
- noise_mean=noise_mean,
- noise_std=noise_std
- )
-# end unirw
-
-
-# Multivariate Moving average
-def moving_average(
- samples: int,
- length: int,
- order: Optional[int] = None,
- size: Optional[int] = None,
- theta: Optional[torch.Tensor] = None,
- noise_mean: Optional[float] = 0.0,
- noise_std: Optional[float] = 1.0,
- noise_func: Optional[Callable] = echotorch.randn,
- parameters_func: Optional[Callable] = torch.rand
-) -> List[echotorch.TimeTensor]:
- r"""Create multivariate time series based on the moving average model (MA) or
- vector moving average process (VMA).
-
- The multivariate form of the Moving Average model MA(q) of order :math:`q` is of
- the form
-
- .. math::
- x(t) = z(t) + \Theta_1 z(t-1) + \dots + \Theta_q z(t-q)
-
- :math:`q` is the number of last entries used for the average. :math:`x(t)` is the moving average output at time
- *t* and :math:`z(t)` a noise with mean :math:`\mu` (*noise_mean*) and standard deviation :math:`\sigma` (*noise_std*).
- This function implements the simple moving average where past entries are equally weighted.
-
- For Weighed Moving Average (WMA) see :func:`echotorch.data.weighted_moving_average`.
-
- For Cumulative Moving Average (CMA) see :func:`echotorch.data.cumulative_moving_average`.
-
- For Exponential Moving Average (EMA) see :func:`echotorch.data.exponential_moving_average`.
-
- `Article on Wikipedia `__
-
- :param samples: how many samples to generate.
- :type samples: ``ìnt``
- :param length: length of the time series to generate.
- :type length: ``ìnt``
- :param order: value of of :math:`q`, the order of the moving average :math:`MA(q)`.
- :type order: ``ìnt``
- :param size: number of variables in the output time series.
- :type size: ``ìnt``
- :param theta: a tensor of size (order, size, size) containing parameter for each timestep as a matrix.
- :type theta: ``torch.Tensor``
- :param noise_mean: mean :math:`\mu` of the white noise
- :type noise_mean: ``float``
- :param noise_std: standard deviation :math:`\Sigma` of the white noise
- :type noise_std: ``float``
- :param noise_func: callable object to generate noise compatible with echotorch creation operator interace.
- :type noise_func: ``callable``
-
- Example:
-
- >>> moving_average = echotorch.data.moving_average(1, length=200, order=30, size=1)
- >>> plt.figure()
- >>> echotorch.viz.timeplot(moving_average[0], title="Multivariate Moving Average MA(q)")
- >>> plt.show()
-
- """
- # Check that parameters or theta or given
- if (order is None or size is None) and theta is None:
- raise ValueError(
- "Order and size, or theta must at least be given (here {}, {} and {}".format(order, size, theta)
- )
- # end if
-
- # Check theta size if given
- if theta is not None:
- # 3D tensor
- if theta.ndim != 3:
- raise ValueError(
- "Expected 3D tensor for theta with size (order, size, size), but {}D given".format(theta.ndim)
- )
- # end if
-
- # First two dim are square
- if theta.size()[1] != theta.size()[2]:
- raise ValueError(
- "Expected 3D tensor with first two dimension squared (order, size, size), "
- "but tensor of shape {} given".format(theta.size())
- )
- # end if
- # end if
-
- # Order, number of variables
- s = samples
- q = theta.size()[0] if theta is not None else order
- n = theta.size()[1] if theta is not None else size
-
- # If theta null, generate parameters
- if theta is None: theta = parameters_func(q, n, n)
-
- # Add identity for t
- theta = torch.cat((torch.unsqueeze(torch.eye(n), 0), theta), dim=0)
-
- # Samples
- samples = list()
-
- # For each sample
- for s_i in range(s):
- # Generate noise Zt
- zt = noise_func(n, length=length + q) * noise_std + noise_mean
-
- # Space for output
- xt = echotorch.zeros(n, length=length)
-
- # For each timestep
- for t in range(length):
- xt[t] = sum([torch.mv(theta[k], zt[t+q-k]) for k in range(0, q+1)])
- # end for
-
- # Add
- samples.append(xt)
- # end for
-
- return samples
-# end moving_average
-
-
-# Multivariate Moving average
-def ma(
- samples: int,
- length: int,
- order: Optional[int] = None,
- size: Optional[int] = None,
- theta: Optional[torch.Tensor] = None,
- noise_mean: Optional[float] = 0.0,
- noise_std: Optional[float] = 1.0,
- noise_func: Optional[Callable] = echotorch.randn,
- parameters_func: Optional[Callable] = torch.rand
-) -> List[echotorch.TimeTensor]:
- r"""Alias for :func:`echotorch.data.moving_average`.
- """
- return moving_average(
- samples=samples,
- length=length,
- order=order,
- size=size,
- theta=theta,
- noise_mean=noise_mean,
- noise_std=noise_std,
- noise_func=noise_func,
- parameters_func=parameters_func
- )
-# end ma
-
-
-# Univariate Moving Average
-def unima(
- samples: int,
- length: int,
- order: Optional[int] = None,
- noise_mean: Optional[float] = 0.0,
- noise_std: Optional[float] = 1.0,
- noise_func: Optional[Callable] = echotorch.randn,
- parameters_func: Optional[Callable] = torch.rand
-) -> List[echotorch.TimeTensor]:
- r"""Returns a univariate time series based on the moving average model (MA).
-
- Key arguments:
- * **theta** (``list of float``) - parameters for each timestep as a float.
-
- See :func:`echotorch.data.moving_average` for more details.
-
- Example:
-
- >>> ...
- """
- # Generate series
- ma_series = moving_average(
- samples=samples,
- length=length,
- order=order,
- size=1,
- noise_mean=noise_mean,
- noise_std=noise_std,
- noise_func=noise_func,
- parameters_func=parameters_func
- )
-
- # To 0-D
- for i in range(len(ma_series)):
- ma_series[i] = ma_series[i][:, 0]
- # end for
-
- return ma_series
-# end unima
-
-
-# Multivariate Weighed Moving Average (WMA)
-def weighted_moving_average() -> List[echotorch.TimeTensor]:
- r"""Create multivariate time series based on the weighted moving average model (WMA).
- """
- pass
-# end weighted_moving_average
-
-
-# Alias for weighted_moving_average
-def wma() -> List[echotorch.TimeTensor]:
- r"""Alias for :func:`echotorch.data.weighted_moving_average`.
- """
- pass
-# end wma
-
-
-# Multivariate Cumulative Average (CMA)
-def cumulative_moving_average() -> List[echotorch.TimeTensor]:
- r"""Create multivariate time series based on the cumulative moving average model (CMA).
- """
- pass
-# end cumulative_moving_average
-
-
-# Alias for cumulative_moving_average
-def cma() -> List[echotorch.TimeTensor]:
- r"""Alias for :func:`echotorch.data.cumulative_moving_average`.
- """
- pass
-# end cma
-
-
-# Exponential Moving Average (EMA)
-def exponential_moving_average() -> List[echotorch.TimeTensor]:
- r"""Create multivariate time series based on the exponential moving average model (EMA).
- """
- pass
-# end exponential_moving_average
-
-
-# Alias for exponential_moving_average
-def ema() -> List[echotorch.TimeTensor]:
- r"""Alias for :func:`echotorch.data.exponential_moving_average`.
- """
- pass
-# end ema
-
-
-# Multivariate Autoregressive process
-def ar(
- samples: int,
- length: int,
- order: Optional[int] = None,
- size: Optional[int] = None,
- phi: Optional[torch.Tensor] = None,
- noise_mean: Optional[float] = 0.0,
- noise_std: Optional[float] = 1.0,
- noise_func: Optional[Callable] = echotorch.randn,
- parameters_func: Optional[Callable] = torch.rand
-) -> List[echotorch.TimeTensor]:
- r"""Alias for :func:`echotorch.data.autoregressive_process`.
- """
- return autoregressive_process(
- samples=samples,
- length=length,
- order=order,
- size=size,
- phi=phi,
- noise_mean=noise_mean,
- noise_std=noise_std,
- noise_func=noise_func,
- parameters_func=parameters_func
- )
-# end ar
-
-
-# Multivariate Auto-regressive process
-def autoregressive_process(
- samples: int,
- length: int,
- order: Optional[int] = None,
- size: Optional[int] = None,
- phi: Optional[torch.Tensor] = None,
- noise_mean: Optional[float] = 0.0,
- noise_std: Optional[float] = 1.0,
- noise_func: Optional[Callable] = echotorch.randn,
- parameters_func: Optional[Callable] = torch.rand
-) -> List[echotorch.TimeTensor]:
- r"""Create uni or multivariate time series based on autoregressive process (AR) or
- vector autoregressive model (AR).
- """
- # Check that parameters or theta or given
- if (order is None or size is None) and phi is None:
- raise ValueError(
- "Order and size, or theta must at least be given (here {}, {} and {}".format(order, size, theta)
- )
- # end if
-
- # Check theta size if given
- if phi is not None:
- # 3D tensor
- if phi.ndim != 3:
- raise ValueError(
- "Expected 3D tensor for theta with size (order, size, size), but {}D given".format(phi.ndim)
- )
- # end if
-
- # First two dim are square
- if phi.size()[1] != phi.size()[2]:
- raise ValueError(
- "Expected 3D tensor with first two dimension squared (order, size, size), "
- "but tensor of shape {} given".format(phi.size())
- )
- # end if
- # end if
-
- # Order, number of variables
- s = samples
- p = phi.size()[0] if phi is not None else order
- n = phi.size()[1] if phi is not None else size
-
- # If theta null, generate parameters
- if phi is None:
- phi = parameters_func(p, n, n)
- phi /= torch.sum(phi, dim=0)
- # end if
-
- # Add identity for t
- # phi = torch.cat((torch.unsqueeze(torch.eye(n), 0), phi), dim=0)
-
- # Samples
- samples = list()
-
- # For each sample
- for s_i in range(s):
- # Generate noise Zt
- zt = noise_func(n, time_length=length) * noise_std + noise_mean
-
- # Space for output
- xt = echotorch.zeros(n, time_length=length)
-
- # For each timestep
- for t in range(length):
- xt[t] = zt[t]
- xt[t] += sum([torch.mv(phi[k], xt[t - k]) for k in range(0, p) if t - k >= 0])
- # end for
-
- # Add
- samples.append(xt)
- # end for
-
- return samples
-# end autoregressive_process
-
-
-# Alias to autoregressive_moving_average
-def arma(
- samples: int,
- length: int,
- regressive_order: Optional[int] = None,
- moving_average_order: Optional[int] = None,
- size: Optional[int] = None,
- theta: Optional[torch.Tensor] = None,
- phi: Optional[torch.Tensor] = None,
- noise_mean: Optional[float] = 0.0,
- noise_std: Optional[float] = 1.0,
- noise_func: Optional[Callable] = echotorch.randn,
- parameters_func: Optional[Callable] = torch.rand
-) -> List[echotorch.TimeTensor]:
- r"""Alias to :func:`echotorch.data.autoregressive_moving_average`.
- """
- return autoregressive_moving_average(
- samples=samples,
- length=length,
- regressive_order=regressive_order,
- moving_average_order=moving_average_order,
- size=size,
- theta=theta,
- phi=phi,
- noise_mean=noise_mean,
- noise_std=noise_std,
- noise_func=noise_func,
- parameters_func=parameters_func
- )
-# end arma
-
-
-# Multivariate AutoRegressive Moving Average process (ARMA)
-def autoregressive_moving_average(
- samples: int,
- length: int,
- regressive_order: Optional[int] = None,
- moving_average_order: Optional[int] = None,
- size: Optional[int] = None,
- theta: Optional[torch.Tensor] = None,
- phi: Optional[torch.Tensor] = None,
- noise_mean: Optional[float] = 0.0,
- noise_std: Optional[float] = 1.0,
- noise_func: Optional[Callable] = echotorch.randn,
- parameters_func: Optional[Callable] = torch.rand
-) -> List[echotorch.TimeTensor]:
- r"""Create uni or multivariate time series based on AutoRegressive Moving Average process (ARMA) or
- Vector ARMA (ARMAV).
-
- :param samples: How many samples to generate.
- :type samples: ``ìnt``
- :param length: Length of the time series to generate.
- :type length: ``ìnt``
- :param order: Value of of :math:`q`, the order of the moving average :math:`MA(q)`.
- :type order: ``ìnt``
- :param size: Number of variables in the output time series.
- :type size: ``ìnt``
- :param theta: A tensor of size (order, size, size) containing parameter for each timestep as a matrix.
- :type theta: ``torch.Tensor``
- :param noise_mean: Mean :math:`\mu` of the white noise
- :type noise_mean: ``float``
- :param noise_std: Standard deviation :math:`\Sigma` of the white noise
- :type noise_std: ``float``
- :param noise_func: Callable object to generate noise compatible with echotorch creation operator interace.
- :type noise_func: ``callable``
-
- """
- # Check that parameters or theta or given
- if (regressive_order is None or moving_average_order is None or size is None) \
- and phi is None and theta is None:
- raise ValueError(
- "Regressive order, moving average order and size, or theta must at least be "
- "given (here {}, {} and {}".format(regressive_order, moving_average_order, size, theta)
- )
- # end if
-
- # Check theta size if given
- if phi is not None and theta is not None:
- # 3D tensor
- if phi.ndim != 3 or theta.ndim != 3:
- raise ValueError(
- "Expected 3D tensor for theta and phi with size (order, size, size), but {}D given".format(phi.ndim)
- )
- # end if
-
- # First two dim are square
- if phi.size()[1] != phi.size()[2] or theta.size()[1] != theta.size()[2] or theta.size()[1] != phi.size()[1]:
- raise ValueError(
- "Expected 3D tensor with first two dimension squared (order, size, size) and equal, "
- "but tensors of shape {} and {} given".format(phi.size(), theta.size())
- )
- # end if
- # end if
-
- # Order, number of variables
- s = samples
- p = phi.size()[0] if phi is not None else regressive_order
- q = theta.size()[0] if theta is not None else moving_average_order
- n = phi.size()[1] if phi is not None else size
-
- # If phi null, generate parameters
- if phi is None:
- phi = parameters_func(p, n, n)
- phi /= torch.sum(phi, dim=0)
- # end if
-
- # If phi null, generate parameters
- if theta is None: theta = parameters_func(q, n, n)
-
- # Add identity for t
- # theta = torch.cat((torch.unsqueeze(torch.eye(n), 0), theta), dim=0)
-
- # Samples
- samples = list()
-
- # For each sample
- for s_i in range(s):
- # Generate noise Zt
- # zt = noise_func(n, time_length=length + q) * noise_std + noise_mean
- zt = noise_func(n, time_length=length + q) * noise_std + noise_mean
-
- # Space for output
- xt = echotorch.zeros(n, time_length=length)
-
- # For each timestep
- for t in range(length):
- xt[t] = zt[t]
- xt[t] += sum([torch.mv(phi[k], xt[t - k]) for k in range(0, p) if t - k >= 0])
- xt[t] += sum([torch.mv(theta[k-1], zt[t+q-k]) for k in range(1, q+1)])
- # end for
-
- # Add
- samples.append(xt)
- # end for
-
- return samples
-# end autoregressive_moving_average
diff --git a/echotorch/data/sarima.py b/echotorch/data/sarima.py
deleted file mode 100644
index e69de29..0000000
diff --git a/echotorch/data_tensors.py b/echotorch/data_tensors.py
deleted file mode 100644
index 2de501c..0000000
--- a/echotorch/data_tensors.py
+++ /dev/null
@@ -1,529 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/data_tensor.py
-# Description : A special tensor with a key-to-index information
-# Date : 13th of August, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-
-# Imports
-from typing import Optional, Tuple, Union, List, Callable, Any, Dict
-import torch
-import numpy as np
-
-# Local imports
-from .base_tensors import BaseTensor
-
-
-# region DataIndexer
-class DataIndexer(object):
- r"""Make the one-one relation between keys and indices.
- """
-
- # Constructor
- def __init__(self, keys: List[Any]) -> None:
- r"""Create a data indexer from a dictionary
-
- :param keys: List of keys that will be assigned to row/column in a dimension.
- :type keys: ``list`
- """
- # Check keys (not ints)
- if not self._check_keys(keys):
- raise ValueError("Int cannot be used as key")
- # end if
-
- # Check if there is duplicates
- if len(keys) != len(set(keys)):
- raise ValueError("Key indexing of a tensor cannot accept duplicates")
- # end if
-
- # Properties
- self._size = len(keys)
- self._keys_to_indices = self._create_keys_to_indices(keys)
- self._indices_to_keys = self._create_indices_to_keys(keys)
- # end __init__
-
- # region PROPERTIES
-
- # Keys
- @property
- def keys(self) -> List[Any]:
- r"""List of keys
- """
- return list(self._keys_to_indices.keys())
- # end keys
-
- # Indices
- @property
- def indices(self) -> List[int]:
- r"""List of indices
- """
- return list(self._indices_to_keys.keys())
- # end indices
-
- # endregion PROPERTIES
-
- # region PUBLIC
-
- # To index
- def to_index(self, key: Union[List[Any], slice, Any]) -> Union[List[int], int, Dict, slice]:
- r"""Transform a key to an index (int).
-
- :param key: A key, a list of key, or a slice
- :type key: A ``list``, a ``slice`` or any key value.
- :return: The input transformed to an index.
- :rtype: A ``list`` of ``int``, a ``int``, a ``dict`` or a ``slice``
- """
- if isinstance(key, list):
- return [self.to_index(el) for el in key]
- elif isinstance(key, slice):
- if key.step is None:
- return slice(
- self.to_index(key.start),
- self.to_index(key.stop)
- )
- else:
- return slice(
- self.to_index(key.start),
- self.to_index(key.stop),
- key.step
- )
- # end if
- elif isinstance(key, dict):
- return {k: self.to_index(v) for k, v in key.items()}
- else:
- if type(key) is int:
- # It is not a key
- return key
- else:
- # Transform the key to an index
- return self._keys_to_indices[key]
- # end if
- # end if
- # end to_index
-
- # To keys
- def to_keys(self, index: Union[List[int], int, Dict[Any, int]]) -> Union[List[Any], Any]:
- r"""Transform an index to a key.
- """
- if isinstance(index, list):
- return [self.to_keys(el) for el in index]
- elif isinstance(index, dict):
- return {k: self.to_keys(v) for k, v in index.items()}
- else:
- return self._indices_to_keys[index]
- # end if
- # end to_keys
-
- # Slice keys
- def slice_keys(self, slice_item):
- r"""Slice keys
- """
- # Indices
- if slice_item.step is None:
- return self.to_keys(
- list(
- range(
- self.to_index(slice.start),
- self.to_index(slice.stop)
- )
- )
- )
- else:
- return self.to_keys(
- list(
- range(
- self.to_index(slice.start),
- self.to_index(slice.stop),
- slice.step
- )
- )
- )
- # end if
- # end slice_keys
-
- # Filter items
- def filter_items(self, item) -> 'DataIndexer':
- r"""Create a new indexer with item filtered.
- """
- if isinstance(item, list):
- return DataIndexer(item)
- elif isinstance(item, slice):
- return DataIndexer(self.slice_keys(item))
- else:
- # Get index
- return DataIndexer([item])
- # end if
- # end filter_items
-
- # endregion PUBLIC
-
- # region PRIVATE
-
- # Check keys
- def _check_keys(self, keys: List[Any]) -> bool:
- r"""Check that there is not ints as keys
- """
- for key in keys:
- if type(key) is int:
- return False
- # end if
- # end for
- return True
- # end _check_keys
-
- # Compute keys to indices dictionary
- def _create_keys_to_indices(self, links: List[Any]) -> Dict[Any, int]:
- r"""Compute keys to indices dictionary
- """
- # Return key:index
- return {links[idx]: idx for idx in range(self._size)}
- # end _keys_to_indices
-
- # Compute indices to keys dictionary
- def _create_indices_to_keys(self, links: List[Any]) -> Dict[int, Any]:
- r"""Compute indices to keys dictionary
- """
- # Return index:key
- return {idx: links[idx] for idx in range(self._size)}
- # end _create_indices_to_keys
-
- # endregion PRIVATE
-
- # region OVERRIDE
-
- # Contains
- def __contains__(self, item):
- r"""Contains key
- """
- return item in self._keys_to_indices.keys
- # end __contains__
-
- # Get representation
- def __repr__(self) -> str:
- """
- Get a string representation
- """
- return "dataindexer(key_to_idx: {}, size:{})".format(
- self._keys_to_indices,
- self._size
- )
- # end __repr__
-
- # endregion OVERRIDE
-
-# endregion DataIndexer
-
-
-# Data Tensor
-class DataTensor(BaseTensor):
- r"""A special tensor with a key-to-index information.
- """
-
- # region CONSTRUCTORS
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'DataTensor'],
- keys: Optional[List[Union[DataIndexer, None]]] = None
- ) -> None:
- r"""DataTensor constructor
-
- :param data: The data in a torch tensor to transform to datatensor.
- :type data: ``torch.Tensor`` or ``DataTensor``
- :param keys: A dictionary with the target dimension index and a dictionary of key-to-index.
- :type keys: ``dict`` of ``dict``
-
- """
- # Base tensor
- super(DataTensor, self).__init__(data)
-
- # Init if None
- keys = [None] * data.ndim if keys is None else keys
-
- # We check that all keys corresponds to a dimension
- if len(keys) != data.ndim:
- raise ValueError(
- "Keys should be given for each dimension ({} sets of key for {} dims)".format(len(keys), data.ndim)
- )
- # end if
-
- # Set tensor and keys
- self._keys = self._build_keys(keys)
- # end __init__
-
- # endregion CONSTRUCTORS
-
- # region PROPERTIES
-
- # Get keys
- @property
- def keys(self) -> List[Union[None, DataIndexer]]:
- r"""Get keys
- """
- return self._keys
- # end keys
-
- # endregion PROPERTIES
-
- # region PUBLIC
-
- # Get the set of keys for a dimension
- def key_set(self, dim: int) -> List:
- r"""Get the set of keys for a dimension
- """
- # Get dict for this dim
- dim_indexer = self._keys[dim]
-
- # If not empty
- if dim_indexer is not None:
- return dim_indexer.keys()
- else:
- return list()
- # end if
- # end key_set
-
- # Get index for a key
- def get_index(self, dim: int, key: Any) -> Any:
- r"""Get index for a key.
-
- :param dim: The index of the dimension.
- :type dim: ``int``
- :param key: A key
- :type key: Any value
- """
- if type(key) is int:
- return key
- else:
- # Get dict for this dim
- dim_indexer = self._keys[dim]
-
- # If not empty
- if dim_indexer is not None:
- # Return value
- return dim_indexer.to_index(key)
- else:
- return key
- # end if
- # end if
- # end get_index
-
- # Get key for an index
- def get_key(self, dim: int, key: Any) -> Any:
- r"""Get key for an index.
- """
- if type(key) is int:
- # Get dict for this dim
- dim_indexer = self._keys[dim]
-
- # If not empty
- if dim_indexer is not None:
- # Return value
- return dim_indexer.to_keys(key)
- else:
- return None
- # end if
- else:
- return key
- # end if
- # end get_key
-
- # Key exists
- def is_in(self, dim: int, key: Any) -> bool:
- r"""Does the key exists.
- """
- # Get dict for this dim
- dim_indexer = self._keys[dim]
-
- # If not empty
- if dim_indexer is not None:
- # Return value
- return key in dim_indexer
- else:
- return False
- # end if
- # end is_in
-
- # endregion PUBLIC
-
- # region PRIVATE
-
- # Build data indexer
- def _build_keys(self, keys: List[Any]):
- r"""Build data indexers
- """
- key_indexers = list()
- for key in keys:
- if key is not None:
- key_indexers.append(DataIndexer(key))
- else:
- key_indexers.append(None)
- # end if
- # end for
- return key_indexers
- # end _build_keys
-
- # Remove keys
- def _new_dataindex(self, index_item):
- r"""Remove keys which are not in the final tensor.
-
- :param item:
- :type item:
- :param keys:
- :type keys:
- :return:
- :rtype:
- """
- # Keys
- output_keys = list()
- print("_remove_keys, item: {}".format(index_item))
- print("_dataindex keys: {}".format(self.keys))
- item_values = index_item if isinstance(index_item, tuple) else tuple((index_item, ))
-
- # Transform in type
- item_types = [type(el) for el in item_values]
- # Number of dimension specified in items
- n_dim = len(index_item) if isinstance(index_item, tuple) else 1
- print("n_dim: {}".format(n_dim))
- print("item_values: {}".format(item_values))
- print("item_types: {}".format(item_types))
- # Build data index
- for el_i, data_index in enumerate(self.keys):
- if el_i >= len(item_types) or item_types[el_i] is list or item_types[el_i] is slice:
- if isinstance(data_index, DataIndexer):
- output_keys.append(data_index.keys)
- else:
- output_keys.append(None)
- # end if
- # end if
- # end for
-
- return output_keys
- # end _new_dataindex
-
- # Transform indexing item
- def _item_to_index(self, item) -> Union[List[int], Tuple[int], Any]:
- r"""Transform an item (with keys) for indexing to an index.
-
- :param item: An item element (tuple, key, slice, index)
- :type item:
- """
- # List, tuple or list
- if isinstance(item, list):
- return [self.get_index(0, el) for el in item]
- elif isinstance(item, tuple):
- return tuple([self.get_index(el_i, el) for el_i, el in enumerate(item)])
- else:
- return self.get_index(0, item)
- # end if
- # end _item_to_index
-
- # Transform indexing item to keys
- def _item_to_key(self, item) -> Union[List[Any], Tuple[Any], Any]:
- r"""Transform indexing item to keys
- """
- # List, tuple or list
- if isinstance(item, list):
- return [self.get_key(0, el) for el in item]
- elif isinstance(item, tuple):
- return tuple([self.get_key(el_i, el) for el_i, el in enumerate(item)])
- else:
- return self.get_key(0, item)
- # end _item_to_key
-
- # endregion PRIVATE
-
- # region OVERRIDE
-
- # To
- def to(self, *args, **kwargs) -> 'DataTensor':
- r"""Performs TimeTensor dtype and/or device concersion. A ``torch.dtype`` and ``torch.device`` are inferred
- from the arguments of ``self.to(*args, **kwargs)
-
- .. note::
- From PyTorch documentation: if the ``self`` TimeTensor already has the correct ``torch.dtype`` and
- ``torch.device``, then ``self`` is returned. Otherwise, the returned timetensor is a copy of ``self``
- with the desired ``torch.dtype`` and ``torch.device``.
-
- Args:
- *args:
- **kwargs:
-
- Example::
- >>> ttensor = torch.randn(4, 4)
- >>> btensor = echotorch.datatensor(ttensor)
- >>> btensor.to(torch.float64)
-
- """
- # New tensor
- ntensor = self._tensor.to(*args, **kwargs)
-
- # Same tensor?
- if self._tensor == ntensor:
- return self
- else:
- return DataTensor(
- ntensor,
- keys=self._keys
- )
- # end if
- # end to
-
- # Get item
- def __getitem__(self, key_item) -> 'DataTensor':
- r"""Get data in the tensor.
- """
- print("")
- print("key_item: {}".format(key_item))
- # Transform item to index
- index_item = self._item_to_index(key_item)
- print("index_item: {}".format(index_item))
- # Get data
- tensor_data = self._tensor[index_item]
- print("data: {}".format(tensor_data.size()))
- # Get keys
- tensor_keys = self._item_to_key(key_item)
- print("keys: {}".format(tensor_keys))
- # Remove key if the dimension is deleted
- tensor_keys = self._new_dataindex(index_item)
- print("tensor_keys: {}".format(tensor_keys))
- # Create a DataTensor
- return DataTensor(tensor_data, tensor_keys)
- # return DataTensor(tensor_data)
- # end __getitem__
-
- # Set item
- def __setitem__(self, key, value) -> None:
- """
- Set data in the tensor
- """
- self._tensor[key] = value
- # end __setitem__
-
- # Get representation
- def __repr__(self) -> str:
- """
- Get a string representation
- """
- return "datatensor({}, keys: {})".format(self._tensor, self._keys)
- # end __repr__
-
- # endregion OVERRIDE
-
-# endregion DATATENSOR
-
diff --git a/echotorch/datasets/CopyTaskDataset.py b/echotorch/datasets/CopyTaskDataset.py
new file mode 100644
index 0000000..54d1c8c
--- /dev/null
+++ b/echotorch/datasets/CopyTaskDataset.py
@@ -0,0 +1,112 @@
+# -*- coding: utf-8 -*-
+#
+# File : echotorch/datasets/CopyTaskDataset.py
+# Description : Dataset for the copy task (Graves et al, 2016)
+# Date : 16th of July, 2020
+#
+# This file is part of EchoTorch. EchoTorch is free software: you can
+# redistribute it and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation, version 2.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Copyright Nils Schaetti
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+import numpy as np
+
+
+# Copy task dataset
+class CopyTaskDataset(Dataset):
+ """
+ Copy task dataset
+ """
+
+ # Constructor
+ def __init__(self, n_samples, length_min, length_max, n_inputs, dtype=torch.float32):
+ """
+ Constructor
+ :param sample_len: Sample's length
+ :param period:
+ """
+ # Properties
+ self.length_min = length_min
+ self.length_max = length_max
+ self.n_samples = n_samples
+ self.n_inputs = n_inputs
+ self.dtype = dtype
+
+ # Generate data set
+ self.samples = self._generate()
+ # end __init__
+
+ #region OVERRIDE
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.samples[idx]
+ # end __getitem__
+
+ #endregion OVERRIDE
+
+ #region PRIVATE
+
+ # Generate
+ def _generate(self):
+ """
+ Generate dataset
+ :return:
+ """
+ # List of samples
+ samples = list()
+
+ # For each sample
+ for i in range(self.n_samples):
+ # Generate length
+ sample_len = torch.randint(low=self.length_min, high=self.length_max, size=(1,)).item()
+
+ # Create empty inputs and output
+ sample_inputs = torch.zeros((sample_len * 2 + 1, self.n_inputs + 1), dtype=self.dtype)
+ sample_outputs = torch.zeros((sample_len * 2 + 1, self.n_inputs + 1), dtype=self.dtype)
+
+ # Generate a random pattern
+ random_pattern = torch.randint(low=0, high=2, size=(sample_len, self.n_inputs))
+
+ # Set in inputs and outputs
+ sample_inputs[:sample_len, :self.n_inputs] = random_pattern
+ sample_outputs[sample_len+1:, :self.n_inputs] = random_pattern
+ sample_inputs[sample_len, self.n_inputs] = 1.0
+
+ # Append
+ samples.append((sample_inputs, sample_outputs))
+ # end for
+
+ return samples
+ # end _generate
+
+ #endregion PRIVATE
+
+# end CopyTaskDataset
diff --git a/echotorch/data/datasets/DatasetComposer.py b/echotorch/datasets/DatasetComposer.py
similarity index 90%
rename from echotorch/data/datasets/DatasetComposer.py
rename to echotorch/datasets/DatasetComposer.py
index d2d297f..bd46e60 100644
--- a/echotorch/data/datasets/DatasetComposer.py
+++ b/echotorch/datasets/DatasetComposer.py
@@ -5,12 +5,9 @@
import torch
from torch.utils.data.dataset import Dataset
-# Load imports
-from .EchoDataset import EchoDataset
-
# Dataset Composer
-class DatasetComposer(EchoDataset):
+class DatasetComposer(Dataset):
"""
Compose dataset
"""
@@ -41,7 +38,7 @@ def __init__(self, datasets, *args, **kwargs):
# end for
# end __init__
- # region OVERRIDE
+ #region OVERRIDE
# Length
def __len__(self):
@@ -67,9 +64,9 @@ def __getitem__(self, idx):
return self.datasets[d][e], outputs, torch.LongTensor([d])
# end __getitem__
- # endregion OVERRIDE
+ #endregion OVERRIDE
- # region PRIVATE
+ #region PRIVATE
# Create outputs
def _create_outputs(self, i, time_length):
@@ -89,6 +86,6 @@ def _create_outputs(self, i, time_length):
return outputs
# end _create_outputs
- # endregion PRIVATE
+ #endregion PRIVATE
# end DatasetComposer
diff --git a/echotorch/data/datasets/DiscreteMarkovChainDataset.py b/echotorch/datasets/DiscreteMarkovChainDataset.py
similarity index 63%
rename from echotorch/data/datasets/DiscreteMarkovChainDataset.py
rename to echotorch/datasets/DiscreteMarkovChainDataset.py
index e7d6210..cc54f9e 100644
--- a/echotorch/data/datasets/DiscreteMarkovChainDataset.py
+++ b/echotorch/datasets/DiscreteMarkovChainDataset.py
@@ -20,23 +20,19 @@
# Copyright Nils Schaetti
# Imports
-from typing import List, Tuple
+import math
import torch
import torch.distributions.multinomial
+from torch.utils.data.dataset import Dataset
import numpy as np
-# Local imports
-from .EchoDataset import EchoDataset
-
# Discrete Markov chain dataset
-class DiscreteMarkovChainDataset(EchoDataset):
+class DiscreteMarkovChainDataset(Dataset):
"""
Discrete Markov chain dataset
"""
- # region CONSTRUCTORS
-
# Constructor
def __init__(self, n_samples, sample_length, probability_matrix, *args, **kwargs):
"""
@@ -55,8 +51,6 @@ def __init__(self, n_samples, sample_length, probability_matrix, *args, **kwargs
self._n_states = probability_matrix.size(0)
# end __init__
- # endregion CONSTRUCTORS
-
# region PRIVATE
# Generate a markov chain from a probability matrix
@@ -66,91 +60,12 @@ def _generate_markov_chain(self, length, start_state=0):
:param length: Length of the sample to generate
:param start_state: Starting state
"""
- return self.datafunc(
- length=length,
- n_states=self._n_states,
- probability_matrix=self._probability_matrix,
- start_state=start_state
- )
- # end _generate_markov_chain
-
- # endregion PRIVATE
-
- # region OVERRIDE
-
- # Get the whole dataset
- @property
- def data(self) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
- """
- Get the whole dataset (according to init parameters)
- @return: The Torch Tensor
- """
- # List of x and y
- samples_in = list()
- samples_out = list()
-
- # For each samples
- for idx in range(self._n_samples):
- states_vector, next_states_vector = self[idx]
- samples_in.append(states_vector)
- samples_out.append(next_states_vector)
- # end for
-
- return samples_in, samples_out
- # end data
-
- # Extra representation
- def extra_repr(self) -> str:
- """
- Extra representation
- """
- return "n_samples={}, sample_length={}, probability_matrix={}, n_states={}".format(
- self._n_samples,
- self._sample_length,
- self._probability_matrix,
- self._n_states
- )
- # end extra_repr
-
- # Length
- def __len__(self):
- """
- Length
- :return:
- """
- return self._n_samples
- # end __len__
-
- # Get item
- def __getitem__(self, idx):
- """
- Get item
- :param idx: Sample index
- :return: Sample as torch tensor
- """
- # Generate a Markov chain with
- # specified length.
- return self._generate_markov_chain(
- length=self._sample_length,
- start_state=np.random.randint(low=0, high=self._n_states-1)
- )
- # end __getitem__
-
- # Generate
- def datafunc(self, length, n_states, probability_matrix, start_state=0, dtype=torch.float64):
- """
- Generate
- :param length: Length
- :param n_states: How many states
- :param start_state: Starting state
- :param probability_matrix: Markov probability matrix
- """
# One-hot vector of states
- states_vector = torch.zeros(length, n_states, dtype=dtype)
+ states_vector = torch.zeros(length, self._n_states)
states_vector[0, start_state] = 1.0
# Next state to predict
- next_states_vector = torch.zeros(length, n_states, dtype=dtype)
+ next_states_vector = torch.zeros(length, self._n_states)
# Current state
current_state = start_state
@@ -158,11 +73,11 @@ def datafunc(self, length, n_states, probability_matrix, start_state=0, dtype=to
# For each time step
for t in range(1, length + 1):
# Probability to next states
- prob_next_states = probability_matrix[current_state]
+ prob_next_states = self._probability_matrix[current_state]
# Create a multinomial distribution from probs.
mnd = torch.distributions.multinomial.Multinomial(
- total_count=n_states,
+ total_count=self._n_states,
probs=prob_next_states
)
@@ -175,14 +90,42 @@ def datafunc(self, length, n_states, probability_matrix, start_state=0, dtype=to
# end if
# Save prediction
- next_states_vector[t - 1, next_state] = 1.0
+ next_states_vector[t-1, next_state] = 1.0
# Set as current
current_state = next_state
# end for
return states_vector, next_states_vector
- # end datafunc
+ # end _generate_markov_chain
+
+ # endregion ENDPRIVATE
+
+ # region OVERRIDE
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self._n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx: Sample index
+ :return: Sample as torch tensor
+ """
+ # Generate a Markov chain with
+ # specified length.
+ return self._generate_markov_chain(
+ length=self._sample_length,
+ start_state=np.random.randint(low=0, high=self._n_states-1)
+ )
+ # end __getitem__
# endregion OVERRIDE
diff --git a/echotorch/data/datasets/FromCSVDataset.py b/echotorch/datasets/FromCSVDataset.py
similarity index 71%
rename from echotorch/data/datasets/FromCSVDataset.py
rename to echotorch/datasets/FromCSVDataset.py
index 0a37f64..d45d6cd 100644
--- a/echotorch/data/datasets/FromCSVDataset.py
+++ b/echotorch/datasets/FromCSVDataset.py
@@ -20,16 +20,13 @@
# Copyright Nils Schaetti
# Imports
-from typing import Tuple, List
import torch
+from torch.utils.data.dataset import Dataset
import csv
-# Local imports
-from .EchoDataset import EchoDataset
-
# Load Time series from a CSV file
-class FromCSVDataset(EchoDataset):
+class FromCSVDataset(Dataset):
"""
Load Time series from a CSV file
"""
@@ -52,86 +49,39 @@ def __init__(self, csv_file, columns, delimiter=",", quotechar='"', *args, **kwa
self._n_columns = len(columns)
self._delimiter = delimiter
self._quotechar = quotechar
+ self._column_indices = list()
# Load
- self._data, self._column_indices = self._load_from_csv()
+ self._data = self._load_from_csv()
# end __init__
# region PRIVATE
- # Load from CSV file
- def _load_from_csv(self):
- """
- Load from CSV file
- :return:
- """
- return self.generate(
- csv_file=self._csv_file,
- delimiter=self._delimiter,
- quotechar=self._quotechar,
- n_columns=self._n_columns,
- column_indices=self._column_indices
- )
- # end _load_from_csv
-
- # endregion PRIVATE
-
- # region OVERRIDE
-
- # Length
- def __len__(self):
- """
- Length
- :return:
- """
- return 1
- # end __len__
-
- # Get item
- def __getitem__(self, idx):
- """
- Get item
- :param idx: Sample index
- :return: Sample as torch tensor
- """
- # Generate a Markov chain with
- # specified length.
- return self._data
- # end __getitem__
-
- # endregion OVERRIDE
-
- # region STATIC
-
# Find indices for each column
- @staticmethod
- def find_columns_indices(header_row, columns):
+ def _find_columns_indices(self, header_row):
"""
Find indices for each column
:param header_row: Header row
- :param columns: Columns
- :param column_indices: Column indices
"""
- column_indices = list()
- for col in columns:
+ for col in self._columns:
if col in header_row:
- column_indices.append(header_row.index(col))
+ self._column_indices.append(header_row.index(col))
else:
raise Exception("Not column \'{}\' found in the CSV".format(col))
# end if
# end for
- return column_indices
- # end find_columns_indices
+ # end _find_columns_indices
- # Generate data
- def datafunc(self, csv_file, delimiter, quotechar, columns) -> Tuple[torch.Tensor, List]:
+ # Load from CSV file
+ def _load_from_csv(self):
"""
- Generate data
+ Load from CSV file
+ :return:
"""
# Open CSV file
- with open(csv_file, 'r') as csvfile:
+ with open(self._csv_file, 'r') as csvfile:
# Read data
- spamreader = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar)
+ spamreader = csv.reader(csvfile, delimiter=self._delimiter, quotechar=self._quotechar)
# Data
data = list()
@@ -140,13 +90,13 @@ def datafunc(self, csv_file, delimiter, quotechar, columns) -> Tuple[torch.Tenso
for row_i, row in enumerate(spamreader):
# First row is the column name
if row_i == 0:
- column_indices = FromCSVDataset.find_columns_indices(row, columns, column_indices)
+ self._find_columns_indices(row)
else:
# Row list
row_list = list()
# Add each column
- for idx in column_indices:
+ for idx in self._column_indices:
row_list.append(row[idx])
# end for
@@ -156,7 +106,7 @@ def datafunc(self, csv_file, delimiter, quotechar, columns) -> Tuple[torch.Tenso
# end for
# Create tensor
- data_tensor = torch.zeros(len(data), len(columns))
+ data_tensor = torch.zeros(len(data), self._n_columns)
# Insert data in tensor
for row_i, row in enumerate(data):
@@ -165,9 +115,35 @@ def datafunc(self, csv_file, delimiter, quotechar, columns) -> Tuple[torch.Tenso
# end for
# end for
- return data_tensor, column_indices
+ return data_tensor
# end for
+ # end _load_from_csv
+
+ # endregion ENDPRIVATE
- # endregion STATIC
+ # region OVERRIDE
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return 1
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx: Sample index
+ :return: Sample as torch tensor
+ """
+ # Generate a Markov chain with
+ # specified length.
+ return self._data
+ # end __getitem__
+
+ # endregion OVERRIDE
# end DiscreteMarkovChainDataset
diff --git a/echotorch/data/datasets/HenonAttractor.py b/echotorch/datasets/HenonAttractor.py
similarity index 65%
rename from echotorch/data/datasets/HenonAttractor.py
rename to echotorch/datasets/HenonAttractor.py
index b564dc9..32ef165 100644
--- a/echotorch/data/datasets/HenonAttractor.py
+++ b/echotorch/datasets/HenonAttractor.py
@@ -7,12 +7,9 @@
from random import shuffle
import numpy as np
-# Load imports
-from .EchoDataset import EchoDataset
-
# Henon Attractor
-class HenonAttractor(EchoDataset):
+class HenonAttractor(Dataset):
"""
The Rössler attractor is the attractor for the Rössler system, a system of three non-linear ordinary differential
equations originally studied by Otto Rössler. These differential equations define a continuous-time dynamical
@@ -47,7 +44,32 @@ def __init__(self, sample_len, n_samples, xy, a, b, washout=0, normalize=False,
self.outputs = self._generate()
# end __init__
- # region PUBLIC
+ #############################################
+ # OVERRIDE
+ #############################################
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.outputs[idx]
+ # end __getitem__
+
+ ##############################################
+ # PUBLIC
+ ##############################################
# Regenerate
def regenerate(self):
@@ -59,9 +81,9 @@ def regenerate(self):
self.outputs = self._generate()
# end regenerate
- # endregion PUBLIC
-
- # region PRIVATE
+ ##############################################
+ # PRIVATE
+ ##############################################
# Henon
def _henon(self, x, y):
@@ -125,93 +147,4 @@ def _generate(self):
return samples
# end _generate
- # endregion PRIVATE
-
- # region OVERRIDE
-
- # Length
- def __len__(self):
- """
- Length
- :return:
- """
- return self.n_samples
- # end __len__
-
- # Get item
- def __getitem__(self, idx):
- """
- Get item
- :param idx:
- :return:
- """
- return self.outputs[idx]
- # end __getitem__
-
- # endregion OVERRIDE
-
- # region STATIC
-
- # Henon
- @staticmethod
- def henon(a, b, x, y):
- """
- Henon
- :param x:
- :param y:
- :return:
- """
- x_dot = 1 - (a * (x * x)) + y
- y_dot = b * x
- return x_dot, y_dot
- # end lorenz
-
- # Generate
- @staticmethod
- def generate(n_samples, sample_len, xy, a, b, washout, normalize=False, dtype=torch.float64):
- """
- Generate dataset
- :return:
- """
- # Sizes
- total_size = sample_len
-
- # Samples
- samples = list()
-
- # Washout
- for t in range(washout):
- xy = HenonAttractor.henon(a, b, xy[0], xy[1])
- # end for
-
- # For each sample
- for n in range(n_samples):
- # Tensor
- sample = torch.zeros(total_size, 2, dtype=dtype)
-
- # Timesteps
- for t in range(total_size):
- xy = HenonAttractor.henon(a, b, xy[0], xy[1])
- sample[t] = xy
- # end for
-
- # Normalize
- if normalize:
- maxval = torch.max(sample, dim=0)
- minval = torch.min(sample, dim=0)
- sample = torch.mm(torch.inv(torch.diag(maxval - minval)), (sample - minval.repeat(total_size, 1)))
- # end if
-
- # Add
- samples.append(sample)
- # end for
-
- # Shuffle
- shuffle(samples)
-
- return samples
- # end generate
-
- # endregion STATIC
-
# end HenonAttractor
diff --git a/echotorch/data/datasets/ImageToTimeseries.py b/echotorch/datasets/ImageToTimeseries.py
similarity index 97%
rename from echotorch/data/datasets/ImageToTimeseries.py
rename to echotorch/datasets/ImageToTimeseries.py
index d2765d1..5c3f493 100644
--- a/echotorch/data/datasets/ImageToTimeseries.py
+++ b/echotorch/datasets/ImageToTimeseries.py
@@ -25,12 +25,9 @@
import torch
from torch.utils.data import Dataset
-# Local imports
-from .EchoDataset import EchoDataset
-
# Image to timeseries dataset
-class ImageToTimeseries(EchoDataset):
+class ImageToTimeseries(Dataset):
"""
Image to timeseries dataset
"""
diff --git a/echotorch/data/datasets/LambdaDataset.py b/echotorch/datasets/LambdaDataset.py
similarity index 83%
rename from echotorch/data/datasets/LambdaDataset.py
rename to echotorch/datasets/LambdaDataset.py
index ccc24fc..fb0f0ab 100644
--- a/echotorch/data/datasets/LambdaDataset.py
+++ b/echotorch/datasets/LambdaDataset.py
@@ -4,21 +4,17 @@
# Imports
import torch
from torch.utils.data.dataset import Dataset
-
-# Local imports
-from .EchoDataset import EchoDataset
+import numpy as np
# Lambda dataset
-class LambdaDataset(EchoDataset):
+class LambdaDataset(Dataset):
"""
Create simple periodic signal timeseries
"""
- # region CONSTRUCTORS
-
# Constructor
- def __init__(self, sample_len, n_samples, func, start=0, dtype=None):
+ def __init__(self, sample_len, n_samples, func, start=0, dtype=torch.float32):
"""
Constructor
:param sample_len: Sample's length
@@ -35,9 +31,32 @@ def __init__(self, sample_len, n_samples, func, start=0, dtype=None):
self.outputs = self._generate()
# end __init__
- # endregion CONSTRUCTORS
+ #############################################
+ # OVERRIDE
+ #############################################
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.outputs[idx]
+ # end __getitem__
- # region PRIVATE
+ ##############################################
+ # PRIVATE
+ ##############################################
# Generate
def _generate(self):
@@ -65,29 +84,4 @@ def _generate(self):
return samples
# end _generate
- # endregion PRIVATE
-
- # region OVERRIDE
-
- # Length
- def __len__(self):
- """
- Length
- :return:
- """
- return self.n_samples
- # end __len__
-
- # Get item
- def __getitem__(self, idx):
- """
- Get item
- :param idx:
- :return:
- """
- return self.outputs[idx]
- # end __getitem__
-
- # endregion OVERRIDE
-
# end LambdaDataset
diff --git a/echotorch/data/datasets/LatchTaskDataset.py b/echotorch/datasets/LatchTaskDataset.py
similarity index 95%
rename from echotorch/data/datasets/LatchTaskDataset.py
rename to echotorch/datasets/LatchTaskDataset.py
index 0a34a10..14722a8 100644
--- a/echotorch/data/datasets/LatchTaskDataset.py
+++ b/echotorch/datasets/LatchTaskDataset.py
@@ -21,13 +21,12 @@
# Imports
import torch
-
-# Local imports
-from .EchoDataset import EchoDataset
+from torch.utils.data.dataset import Dataset
+import numpy as np
# Latch task dataset
-class LatchTaskDataset(EchoDataset):
+class LatchTaskDataset(Dataset):
"""
Latch task dataset
"""
@@ -50,7 +49,7 @@ def __init__(self, n_samples, length_min, length_max, n_pics, dtype=torch.float3
self.samples = self._generate()
# end __init__
- # region OVERRIDE
+ #region OVERRIDE
# Length
def __len__(self):
@@ -71,9 +70,9 @@ def __getitem__(self, idx):
return self.samples[idx]
# end __getitem__
- # endregion OVERRIDE
+ #endregion OVERRIDE
- # region PRIVATE
+ #region PRIVATE
# Generate
def _generate(self):
@@ -143,6 +142,6 @@ def _generate(self):
return samples
# end _generate
- # endregion PRIVATE
+ #endregion PRIVATE
# end LatchTaskDataset
diff --git a/echotorch/data/datasets/LogisticMapDataset.py b/echotorch/datasets/LogisticMapDataset.py
similarity index 63%
rename from echotorch/data/datasets/LogisticMapDataset.py
rename to echotorch/datasets/LogisticMapDataset.py
index 48360fd..12fa1e7 100644
--- a/echotorch/data/datasets/LogisticMapDataset.py
+++ b/echotorch/datasets/LogisticMapDataset.py
@@ -1,34 +1,14 @@
# -*- coding: utf-8 -*-
#
-# File : echotorch/datasets/LogisticMapDataset.py
-# Description : Generate series from the Logistic Map
-# Date : 16th of July, 2020
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
# Imports
import torch
+from torch.utils.data.dataset import Dataset
import numpy as np
-# Local imports
-from .EchoDataset import EchoDataset
-
# Logistic Map dataset
-class LogisticMapDataset(EchoDataset):
+class LogisticMapDataset(Dataset):
"""
Logistic Map dataset
"""
@@ -62,8 +42,6 @@ def __init__(self, sample_len, n_samples, alpha=5, beta=11, gamma=13, c=3.6, b=0
# end if
# end __init__
- # region OVERRIDE
-
# Length
def __len__(self):
"""
@@ -96,9 +74,9 @@ def __getitem__(self, idx):
return series
# end __getitem__
- # endregion OVERRIDE
-
- # region PRIVATE
+ #######################################
+ # Private
+ #######################################
# Logistic map
def _logistic_map(self, x, r):
@@ -111,6 +89,4 @@ def _logistic_map(self, x, r):
return r * x * (1-x)
# end logistic_map
- # endregion PRIVATE
-
# end MackeyGlassDataset
diff --git a/echotorch/data/datasets/LorenzAttractor.py b/echotorch/datasets/LorenzAttractor.py
similarity index 77%
rename from echotorch/data/datasets/LorenzAttractor.py
rename to echotorch/datasets/LorenzAttractor.py
index f2a16c5..203cf13 100644
--- a/echotorch/data/datasets/LorenzAttractor.py
+++ b/echotorch/datasets/LorenzAttractor.py
@@ -1,34 +1,14 @@
# -*- coding: utf-8 -*-
#
-# File : echotorch/datasets/EchoDataset.py
-# Description : Base class for EchoTorch datasets
-# Date : 25th of January, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
# Imports
import torch
from torch.utils.data.dataset import Dataset
-
-# Local imports
-from .EchoDataset import EchoDataset
+import numpy as np
# Lorenz Attractor
-class LorenzAttractor(EchoDataset):
+class LorenzAttractor(Dataset):
"""
The Rössler attractor is the attractor for the Rössler system, a system of three non-linear ordinary differential
equations originally studied by Otto Rössler. These differential equations define a continuous-time dynamical
@@ -65,7 +45,32 @@ def __init__(self, sample_len, n_samples, xyz, sigma, b, r, dt=0.01, washout=0,
self.outputs = self._generate()
# end __init__
- # region PUBLIC
+ #############################################
+ # OVERRIDE
+ #############################################
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.outputs[idx]
+ # end __getitem__
+
+ ##############################################
+ # PUBLIC
+ ##############################################
# Regenerate
def regenerate(self):
@@ -77,9 +82,9 @@ def regenerate(self):
self.outputs = self._generate()
# end regenerate
- # endregion PUBLIC
-
- # region PRIVATE
+ ##############################################
+ # PRIVATE
+ ##############################################
# Lorenz
def _lorenz(self, x, y, z):
@@ -94,7 +99,6 @@ def _lorenz(self, x, y, z):
y_dot = self.r * x - y - x * z
z_dot = x * y - self.b * z
return x_dot, y_dot, z_dot
-
# end _lorenz
# Generate
@@ -156,29 +160,4 @@ def _generate(self):
return samples
# end _generate
- # endregion PRIVATE
-
- # region OVERRIDE
-
- # Length
- def __len__(self):
- """
- Length
- :return:
- """
- return self.n_samples
- # end __len__
-
- # Get item
- def __getitem__(self, idx):
- """
- Get item
- :param idx:
- :return:
- """
- return self.outputs[idx]
- # end __getitem__
-
- # endregion OVERRIDE
-
# end LorenzAttractor
diff --git a/echotorch/data/datasets/MackeyGlass2DDataset.py b/echotorch/datasets/MackeyGlass2DDataset.py
similarity index 96%
rename from echotorch/data/datasets/MackeyGlass2DDataset.py
rename to echotorch/datasets/MackeyGlass2DDataset.py
index 4e0fdbd..06f7c88 100644
--- a/echotorch/data/datasets/MackeyGlass2DDataset.py
+++ b/echotorch/datasets/MackeyGlass2DDataset.py
@@ -3,13 +3,11 @@
# Imports
import torch
-
-# Local imports
-from .EchoDataset import EchoDataset
+from torch.utils.data.dataset import Dataset
# Mackey Glass dataset
-class MackeyGlass2DDataset(EchoDataset):
+class MackeyGlass2DDataset(Dataset):
"""
Mackey Glass 2D dataset
"""
diff --git a/echotorch/data/datasets/MackeyGlassDataset.py b/echotorch/datasets/MackeyGlassDataset.py
similarity index 66%
rename from echotorch/data/datasets/MackeyGlassDataset.py
rename to echotorch/datasets/MackeyGlassDataset.py
index 44daeac..136e46c 100644
--- a/echotorch/data/datasets/MackeyGlassDataset.py
+++ b/echotorch/datasets/MackeyGlassDataset.py
@@ -6,12 +6,9 @@
from torch.utils.data.dataset import Dataset
import collections
-# Local imports
-from .EchoDataset import EchoDataset
-
# Mackey Glass dataset
-class MackeyGlassDataset(EchoDataset):
+class MackeyGlassDataset(Dataset):
"""
Mackey Glass dataset
"""
@@ -39,8 +36,6 @@ def __init__(self, sample_len, n_samples, tau=17, seed=None):
# end if
# end __init__
- # region OVERRIDE
-
# Length
def __len__(self):
"""
@@ -80,36 +75,4 @@ def __getitem__(self, idx):
return inputs[:-1], inputs[1:]
# end __getitem__
- # endregion OVERRIDE
-
- # region STATIC
-
- # Generate
- @staticmethod
- def generate(sample_len, history_len, delta_t):
- # History
- history = collections.deque(1.2 * torch.ones(history_len) + 0.2 * (torch.rand(history_len) - 0.5))
-
- # Preallocate tensor for time-serie
- inp = torch.zeros(sample_len, 1)
-
- # For each time step
- for timestep in range(self.sample_len):
- for _ in range(self.delta_t):
- xtau = history.popleft()
- history.append(self.timeseries)
- self.timeseries = history[-1] + (0.2 * xtau / (1.0 + xtau ** 10) - 0.1 * history[-1]) / self.delta_t
- # end for
- inp[timestep] = self.timeseries
- # end for
-
- # Inputs
- inputs = torch.tan(inp - 1)
-
- # Squash timeseries through tanh
- return inputs[:-1], inputs[1:]
- # end generate
-
- # endregion STATIC
-
# end MackeyGlassDataset
diff --git a/echotorch/data/datasets/MarkovChainDataset.py b/echotorch/datasets/MarkovChainDataset.py
similarity index 98%
rename from echotorch/data/datasets/MarkovChainDataset.py
rename to echotorch/datasets/MarkovChainDataset.py
index 768390e..ebb8e9d 100644
--- a/echotorch/data/datasets/MarkovChainDataset.py
+++ b/echotorch/datasets/MarkovChainDataset.py
@@ -21,16 +21,15 @@
# Imports
import math
+import numpy as np
import torch
import torch.distributions.multinomial
+from torch.utils.data.dataset import Dataset
import numpy as np
-# Local imports
-from .EchoDataset import EchoDataset
-
# Markov chain dataset from patterns
-class MarkovChainDataset(EchoDataset):
+class MarkovChainDataset(Dataset):
"""
Markov chain dataset from patterns
"""
@@ -57,8 +56,6 @@ def __init__(self, datasets, states_length, morphing_length, n_samples, sample_l
self._total_length = sample_length * (states_length + 2 * morphing_length)
self._random_start = random_start
self.n_samples = n_samples
- # end __init__
-
# region PRIVATE
# Generate a markov chain from a probability matrix
diff --git a/echotorch/data/datasets/MemTestDataset.py b/echotorch/datasets/MemTestDataset.py
similarity index 55%
rename from echotorch/data/datasets/MemTestDataset.py
rename to echotorch/datasets/MemTestDataset.py
index d821d36..8620125 100644
--- a/echotorch/data/datasets/MemTestDataset.py
+++ b/echotorch/datasets/MemTestDataset.py
@@ -1,36 +1,16 @@
# -*- coding: utf-8 -*-
#
-# File : echotorch/datasets/MemTestDataset.py
-# Description : Base class for EchoTorch datasets
-# Date : 25th of January, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
# Imports
import torch
-
-# Local imports
-from .EchoDataset import EchoDataset
+from torch.utils.data.dataset import Dataset
# Generates a series of input timeseries and delayed versions as outputs.
-class MemTestDataset(EchoDataset):
+class MemTestDataset(Dataset):
"""
- Generates a series of input time series and delayed versions as outputs.
- Delay is given in number of time steps. Can be used to empirically measure the
+ Generates a series of input timeseries and delayed versions as outputs.
+ Delay is given in number of timesteps. Can be used to empirically measure the
memory capacity of a system.
"""
@@ -54,8 +34,6 @@ def __init__(self, sample_len, n_samples, n_delays=10, seed=None):
# end if
# end __init__
- # region OVERRIDE
-
# Length
def __len__(self):
"""
@@ -80,6 +58,4 @@ def __getitem__(self, idx):
return inputs, outputs
# end __getitem__
- # endregion OVERRIDE
-
# end MemTestDataset
diff --git a/echotorch/data/datasets/MixedSinesDataset.py b/echotorch/datasets/MixedSinesDataset.py
similarity index 89%
rename from echotorch/data/datasets/MixedSinesDataset.py
rename to echotorch/datasets/MixedSinesDataset.py
index f1a9785..275272f 100644
--- a/echotorch/data/datasets/MixedSinesDataset.py
+++ b/echotorch/datasets/MixedSinesDataset.py
@@ -25,12 +25,9 @@
import math
import numpy as np
-# Local imports
-from .EchoDataset import EchoDataset
-
# Mixed sines dataset
-class MixedSinesDataset(EchoDataset):
+class MixedSinesDataset(Dataset):
"""
Mixed sines dataset
"""
@@ -56,7 +53,9 @@ def __init__(self, sample_len, n_samples, periods, amplitudes, phases, dtype=tor
self.outputs = self._generate()
# end __init__
- # region OVERRIDE
+ #############################################
+ # OVERRIDE
+ #############################################
# Length
def __len__(self):
@@ -77,9 +76,9 @@ def __getitem__(self, idx):
return self.outputs[idx]
# end __getitem__
- # endregion OVERRIDE
-
- # region PUBLIC
+ ##############################################
+ # PUBLIC
+ ##############################################
# Regenerate
def regenerate(self):
@@ -91,9 +90,9 @@ def regenerate(self):
self.outputs = self._generate()
# end regenerate
- # endregion PUBLIC
-
- # region PRIVATE
+ ##############################################
+ # PRIVATE
+ ##############################################
# Random initial points
def random_initial_points(self):
@@ -131,6 +130,4 @@ def _generate(self):
return samples
# end _generate
- # endregion PRIVATE
-
# end MixedSinesDataset
diff --git a/echotorch/data/datasets/NARMADataset.py b/echotorch/datasets/NARMADataset.py
similarity index 90%
rename from echotorch/data/datasets/NARMADataset.py
rename to echotorch/datasets/NARMADataset.py
index acc21d6..50264ed 100644
--- a/echotorch/data/datasets/NARMADataset.py
+++ b/echotorch/datasets/NARMADataset.py
@@ -21,13 +21,11 @@
# Imports
import torch
-
-# Local imports
-from .EchoDataset import EchoDataset
+from torch.utils.data.dataset import Dataset
# 10th order NARMA task
-class NARMADataset(EchoDataset):
+class NARMADataset(Dataset):
"""
xth order NARMA task
WARNING: this is an unstable dataset. There is a small chance the system becomes
@@ -35,8 +33,6 @@ class NARMADataset(EchoDataset):
where this problem happens less often.
"""
- # region CONSTUCTORS
-
# Constructor
def __init__(self, sample_len, n_samples, system_order=10):
"""
@@ -68,9 +64,32 @@ def __init__(self, sample_len, n_samples, system_order=10):
self.inputs, self.outputs = self._generate()
# end __init__
- # endregion CONSTRUCTORS
+ #############################################
+ # OVERRIDE
+ #############################################
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.inputs[idx], self.outputs[idx]
+ # end __getitem__
- # region PRIVATE
+ ##############################################
+ # PRIVATE
+ ##############################################
# Generate
def _generate(self):
@@ -86,7 +105,7 @@ def _generate(self):
for k in range(self.system_order - 1, self.sample_len - 1):
outs[k + 1] = self.parameters[0] * outs[k] + self.parameters[1] * outs[k] * torch.sum(
outs[k - (self.system_order - 1):k + 1]) + 1.5 * ins[k - int(self.parameters[2])] * ins[k] + \
- self.parameters[3]
+ self.parameters[3]
# end for
inputs.append(ins)
outputs.append(outs)
@@ -95,29 +114,4 @@ def _generate(self):
return inputs, outputs
# end _generate
- # endregion PRIVATE
-
- # region OVERRIDE
-
- # Length
- def __len__(self):
- """
- Length
- :return:
- """
- return self.n_samples
- # end __len__
-
- # Get item
- def __getitem__(self, idx):
- """
- Get item
- :param idx:
- :return:
- """
- return self.inputs[idx], self.outputs[idx]
- # end __getitem__
-
- # endregion OVERRIDE
-
# end NARMADataset
diff --git a/echotorch/data/datasets/PeriodicSignalDataset.py b/echotorch/datasets/PeriodicSignalDataset.py
similarity index 91%
rename from echotorch/data/datasets/PeriodicSignalDataset.py
rename to echotorch/datasets/PeriodicSignalDataset.py
index f6f1a60..a02b624 100644
--- a/echotorch/data/datasets/PeriodicSignalDataset.py
+++ b/echotorch/datasets/PeriodicSignalDataset.py
@@ -24,12 +24,9 @@
from torch.utils.data.dataset import Dataset
import numpy as np
-# Local imports
-from .EchoDataset import EchoDataset
-
# Periodic signal timeseries
-class PeriodicSignalDataset(EchoDataset):
+class PeriodicSignalDataset(Dataset):
"""
Create simple periodic signal timeseries
"""
@@ -61,7 +58,32 @@ def __init__(self, sample_len, n_samples, period, start=1, dtype=torch.float64):
self.outputs = self._generate()
# end __init__
- # region PRIVATE
+ #############################################
+ # OVERRIDE
+ #############################################
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.outputs[idx]
+ # end __getitem__
+
+ ##############################################
+ # PRIVATE
+ ##############################################
# Generate
def _generate(self):
@@ -89,30 +111,4 @@ def _generate(self):
return samples
# end _generate
- # endregion PRIVATE
-
- # region OVERRIDE
-
- # Length
- def __len__(self):
- """
- Length
- :return:
- """
- return self.n_samples
- # end __len__
-
- # Get item
- def __getitem__(self, idx):
- """
- Get item
- :param idx:
- :return:
- """
- return self.outputs[idx]
- # end __getitem__
-
- # endregion OVERRIDE
-
-
# end PeriodicSignalDataset
diff --git a/echotorch/data/datasets/RandomSymbolDataset.py b/echotorch/datasets/RandomSymbolDataset.py
similarity index 96%
rename from echotorch/data/datasets/RandomSymbolDataset.py
rename to echotorch/datasets/RandomSymbolDataset.py
index baadb0c..15f6973 100644
--- a/echotorch/data/datasets/RandomSymbolDataset.py
+++ b/echotorch/datasets/RandomSymbolDataset.py
@@ -21,13 +21,11 @@
# Imports
import torch
-
-# Local imports
-from .EchoDataset import EchoDataset
+from torch.utils.data.dataset import Dataset
# Sequences of symbols taken randomly
-class RandomSymbolDataset(EchoDataset):
+class RandomSymbolDataset(Dataset):
"""
Sequences of symbols taken randomly
"""
diff --git a/echotorch/data/datasets/RepeatTaskDataset.py b/echotorch/datasets/RepeatTaskDataset.py
similarity index 97%
rename from echotorch/data/datasets/RepeatTaskDataset.py
rename to echotorch/datasets/RepeatTaskDataset.py
index a263120..ef5a924 100644
--- a/echotorch/data/datasets/RepeatTaskDataset.py
+++ b/echotorch/datasets/RepeatTaskDataset.py
@@ -23,12 +23,9 @@
import torch
from torch.utils.data.dataset import Dataset
-# Local imports
-from .EchoDataset import EchoDataset
-
# Repeat task dataset
-class RepeatTaskDataset(EchoDataset):
+class RepeatTaskDataset(Dataset):
"""
Repeat task dataset
"""
diff --git a/echotorch/data/datasets/RosslerAttractor.py b/echotorch/datasets/RosslerAttractor.py
similarity index 77%
rename from echotorch/data/datasets/RosslerAttractor.py
rename to echotorch/datasets/RosslerAttractor.py
index 121c82e..bf8a9c1 100644
--- a/echotorch/data/datasets/RosslerAttractor.py
+++ b/echotorch/datasets/RosslerAttractor.py
@@ -1,34 +1,14 @@
# -*- coding: utf-8 -*-
#
-# File : echotorch/datasets/RosslerAttractor.py
-# Description : Rossler Attractor
-# Date : 25th of January, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
# Imports
import torch
+from torch.utils.data.dataset import Dataset
import numpy as np
-# Local imports
-from .EchoDataset import EchoDataset
-
# Rossler Attractor
-class RosslerAttractor(EchoDataset):
+class RosslerAttractor(Dataset):
"""
The Rössler attractor is the attractor for the Rössler system, a system of three non-linear ordinary differential
equations originally studied by Otto Rössler. These differential equations define a continuous-time dynamical
@@ -65,7 +45,9 @@ def __init__(self, sample_len, n_samples, xyz, a, b, c, dt=0.01, washout=0, norm
self.outputs = self._generate()
# end __init__
- # region OVERRIDE
+ #############################################
+ # OVERRIDE
+ #############################################
# Length
def __len__(self):
@@ -86,9 +68,9 @@ def __getitem__(self, idx):
return self.outputs[idx]
# end __getitem__
- # endregion OVERRIDE
-
- # region PUBLIC
+ ##############################################
+ # PUBLIC
+ ##############################################
# Regenerate
def regenerate(self):
@@ -100,9 +82,9 @@ def regenerate(self):
self.outputs = self._generate()
# end regenerate
- # endregion PUBLIC
-
- # region PRIVATE
+ ##############################################
+ # PRIVATE
+ ##############################################
# Rossler
def _rossler(self, x, y, z):
@@ -180,6 +162,4 @@ def _generate(self):
return samples
# end _generate
- # endregion PRIVATE
-
# end RosslerAttractor
diff --git a/echotorch/data/datasets/SinusoidalTimeseries.py b/echotorch/datasets/SinusoidalTimeseries.py
similarity index 97%
rename from echotorch/data/datasets/SinusoidalTimeseries.py
rename to echotorch/datasets/SinusoidalTimeseries.py
index 1578529..31915d4 100644
--- a/echotorch/data/datasets/SinusoidalTimeseries.py
+++ b/echotorch/datasets/SinusoidalTimeseries.py
@@ -21,15 +21,13 @@
# Imports
import torch
+from torch.utils.data.dataset import Dataset
import math
import numpy as np
-# Local imports
-from .EchoDataset import EchoDataset
-
# Sinusoidal Timeseries
-class SinusoidalTimeseries(EchoDataset):
+class SinusoidalTimeseries(Dataset):
"""
Sinusoidal timeseries
"""
diff --git a/echotorch/data/datasets/SwitchAttractorDataset.py b/echotorch/datasets/SwitchAttractorDataset.py
similarity index 87%
rename from echotorch/data/datasets/SwitchAttractorDataset.py
rename to echotorch/datasets/SwitchAttractorDataset.py
index 36cfbf2..e37b72e 100644
--- a/echotorch/data/datasets/SwitchAttractorDataset.py
+++ b/echotorch/datasets/SwitchAttractorDataset.py
@@ -6,12 +6,9 @@
from torch.utils.data.dataset import Dataset
import numpy as np
-# Local imports
-from .EchoDataset import EchoDataset
-
# Switch attractor dataset
-class SwitchAttractorDataset(EchoDataset):
+class SwitchAttractorDataset(Dataset):
"""
Generate a dataset where the reservoir must switch
between two attractors.
@@ -23,6 +20,7 @@ def __init__(self, sample_len, n_samples, seed=None):
Constructor
:param sample_len: Length of the time-series in time steps.
:param n_samples: Number of samples to generate.
+ :param system_order: th order NARMA
:param seed: Seed of random number generator.
"""
# Properties
@@ -38,7 +36,32 @@ def __init__(self, sample_len, n_samples, seed=None):
self.inputs, self.outputs = self._generate()
# end __init__
- # region PRIVATE
+ #############################################
+ # OVERRIDE
+ #############################################
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.inputs[idx], self.outputs[idx]
+ # end __getitem__
+
+ ##############################################
+ # PRIVATE
+ ##############################################
# Generate
def _generate(self):
@@ -79,29 +102,4 @@ def _generate(self):
return inputs, outputs
# end _generate
- # endregion PRIVATE
-
- # region OVERRIDE
-
- # Length
- def __len__(self):
- """
- Length
- :return:
- """
- return self.n_samples
- # end __len__
-
- # Get item
- def __getitem__(self, idx):
- """
- Get item
- :param idx:
- :return:
- """
- return self.inputs[idx], self.outputs[idx]
- # end __getitem__
-
- # endregion OVERRIDE
-
# end SwitchAttractorDataset
diff --git a/echotorch/data/datasets/TimeseriesBatchSequencesDataset.py b/echotorch/datasets/TimeseriesBatchSequencesDataset.py
similarity index 86%
rename from echotorch/data/datasets/TimeseriesBatchSequencesDataset.py
rename to echotorch/datasets/TimeseriesBatchSequencesDataset.py
index 8dd40f5..e980538 100644
--- a/echotorch/data/datasets/TimeseriesBatchSequencesDataset.py
+++ b/echotorch/datasets/TimeseriesBatchSequencesDataset.py
@@ -23,20 +23,18 @@
# Imports
import math
import torch
-
-# Local imports
-from .EchoDataset import EchoDataset
+from torch.utils.data import Dataset
# Timeseries batch cutting
-class TimeseriesBatchSequencesDataset(EchoDataset):
+class TimeseriesBatchSequencesDataset(Dataset):
"""
Take a dataset of timeseries and cut all of them by window size and compose batches
"""
# Constructor
- def __init__(self, root_dataset, window_size, data_indices, stride, remove_indices, time_axis=0,
- dataset_in_memory=False, *args, **kwargs):
+ def __init__(self, root_dataset, window_size, data_indices, stride, time_axis=0, dataset_in_memory=False,
+ *args, **kwargs):
"""
Constructor
:param root_dataset: Root dataset
@@ -55,7 +53,6 @@ def __init__(self, root_dataset, window_size, data_indices, stride, remove_indic
self.stride = stride
self.time_axis = time_axis
self.dataset_in_memory = dataset_in_memory
- self.remove_indices = remove_indices
# Dataset information
self.timeseries_lengths = list()
@@ -69,7 +66,7 @@ def __init__(self, root_dataset, window_size, data_indices, stride, remove_indic
self._load_dataset()
# end __init__
- # region PRIVATE
+ #region PRIVATE
# Load dataset
def _load_dataset(self):
@@ -90,7 +87,6 @@ def _load_dataset(self):
# Length of timeseries in number of samples (sequences)
timeserie_length = timeserie_data.size(self.time_axis)
-
# timeserie_seq_length = int(math.floor(timeserie_length / self.window_size))
timeserie_seq_length = int(math.floor((timeserie_length - self.window_size) / self.stride) + 1)
@@ -112,9 +108,9 @@ def _load_dataset(self):
self.n_samples = item_position
# end _load_dataset
- # endregion PRIVATE
+ #endregion PRIVATE
- # region OVERRIDE
+ #region OVERRIDE
# Get a sample in the dataset
def __getitem__(self, item):
@@ -123,7 +119,6 @@ def __getitem__(self, item):
:param item: Item index (start 0)
:return: Dataset sample
"""
- # print("__getitem__ {}".format(item))
# Go through each samples in the root dataset
for item_i in range(len(self.root_dataset)):
# Timeserie info
@@ -142,7 +137,7 @@ def __getitem__(self, item):
# sequence_start = (item - ts_start_end['start']) * self.window_size
sequence_start = (item - ts_start_end['start']) * self.stride
sequence_end = sequence_start + self.window_size
- sequence_range = range(sequence_start, sequence_end)
+ sequence_range = list(range(sequence_start, sequence_end))
# For each data to transform
if self.data_indices is not None:
@@ -158,20 +153,8 @@ def __getitem__(self, item):
data = torch.index_select(data, self.time_axis, torch.tensor(sequence_range))
# end if
- # For each data to add batch to
- new_data = list()
- if self.remove_indices is not None:
- for data_i in range(len(data)):
- if data_i not in self.remove_indices:
- new_data.append(data[data_i])
- # end if
- # end for
- else:
- new_data = data
- # end if
-
# Return modified data
- return new_data
+ return data
# end if
# end for
# end __getitem__
@@ -196,7 +179,7 @@ def __len__(self):
return self.n_samples
# end __len__
- # endregion OVERRIDE
+ #endregion OVERRIDE
# end TimeseriesBatchSequencesDataset
diff --git a/echotorch/data/datasets/TransformDataset.py b/echotorch/datasets/TransformDataset.py
similarity index 97%
rename from echotorch/data/datasets/TransformDataset.py
rename to echotorch/datasets/TransformDataset.py
index 661eb89..a383962 100644
--- a/echotorch/data/datasets/TransformDataset.py
+++ b/echotorch/datasets/TransformDataset.py
@@ -22,13 +22,11 @@
# Imports
import math
import torch
-
-# Local imports
-from .EchoDataset import EchoDataset
+from torch.utils.data import Dataset
# Transform dataset
-class TransformDataset(EchoDataset):
+class TransformDataset(Dataset):
"""
Apply a transformation to a dataset.
"""
diff --git a/echotorch/data/datasets/TripletBatching.py b/echotorch/datasets/TripletBatching.py
similarity index 93%
rename from echotorch/data/datasets/TripletBatching.py
rename to echotorch/datasets/TripletBatching.py
index 1015998..0bc7580 100644
--- a/echotorch/data/datasets/TripletBatching.py
+++ b/echotorch/datasets/TripletBatching.py
@@ -23,22 +23,19 @@
# Imports
import random
-
-# Local imports
-from .EchoDataset import EchoDataset
+from torch.utils.data import Dataset
# Triplet batching
-class TripletBatching(EchoDataset):
+class TripletBatching(Dataset):
"""
Take a dataset with different classes and create a dataset of triplets with an anchor (A) and positive
example (same class) and a negative one (different class).
"""
# Constructor
- def __init__(
- self, root_dataset, data_index, target_index, target_count, n_samples, target_type='int', *args, **kwargs
- ) -> None:
+ def __init__(self, root_dataset, data_index, target_index, target_count, n_samples,
+ target_type='int', *args, **kwargs):
"""
Constructor
:param root_dataset: The main dataset
@@ -69,7 +66,7 @@ def __init__(
self._analyse_dataset()
# end __init__
- # region PRIVATE
+ #region PRIVATE
# Analyze the root dataset to determine the total number of samples
def _analyse_dataset(self):
@@ -101,9 +98,9 @@ def _analyse_dataset(self):
# end for
# end _analyse_dataset
- # endregion PRIVATE
+ #endregion PRIVATE
- # region OVERRIDE
+ #region OVERRIDE
# Length of the dataset
def __len__(self):
@@ -156,6 +153,6 @@ def __getitem__(self, item):
return anchor_sample, positive_sample, negative_sample
# end __getitem__
- # endregion OVERRIDE
+ #endregion OVERRIDE
# end TripletBatching
diff --git a/echotorch/datasets/__init__.py b/echotorch/datasets/__init__.py
new file mode 100644
index 0000000..823b095
--- /dev/null
+++ b/echotorch/datasets/__init__.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+#
+
+# Imports
+from .CopyTaskDataset import CopyTaskDataset
+from .DatasetComposer import DatasetComposer
+from .DiscreteMarkovChainDataset import DiscreteMarkovChainDataset
+from .FromCSVDataset import FromCSVDataset
+from .HenonAttractor import HenonAttractor
+from .ImageToTimeseries import ImageToTimeseries
+from .LambdaDataset import LambdaDataset
+from .LatchTaskDataset import LatchTaskDataset
+from .LogisticMapDataset import LogisticMapDataset
+from .LorenzAttractor import LorenzAttractor
+from .MackeyGlassDataset import MackeyGlassDataset
+from .MarkovChainDataset import MarkovChainDataset
+from .MemTestDataset import MemTestDataset
+from .MixedSinesDataset import MixedSinesDataset
+from .NARMADataset import NARMADataset
+from .RosslerAttractor import RosslerAttractor
+from .SinusoidalTimeseries import SinusoidalTimeseries
+from .PeriodicSignalDataset import PeriodicSignalDataset
+from .RandomSymbolDataset import RandomSymbolDataset
+from .RepeatTaskDataset import RepeatTaskDataset
+from .TimeseriesBatchSequencesDataset import TimeseriesBatchSequencesDataset
+from .TransformDataset import TransformDataset
+from .TripletBatching import TripletBatching
+
+__all__ = [
+ 'CopyTaskDataset', 'DatasetComposer', 'DiscreteMarkovChainDataset', 'FromCSVDataset', 'HenonAttractor',
+ 'LambdaDataset', 'LatchTaskDataset', 'LogisticMapDataset', 'LorenzAttractor', 'MackeyGlassDataset', 'MemTestDataset',
+ 'NARMADataset', 'RosslerAttractor', 'SinusoidalTimeseries', 'PeriodicSignalDataset', 'RandomSymbolDataset',
+ 'ImageToTimeseries', 'MarkovChainDataset', 'MixedSinesDataset', 'RepeatTaskDataset',
+ 'TimeseriesBatchSequencesDataset', 'TransformDataset', 'TripletBatching'
+]
diff --git a/echotorch/datasetspy2/CopyTaskDataset.py b/echotorch/datasetspy2/CopyTaskDataset.py
new file mode 100644
index 0000000..54d1c8c
--- /dev/null
+++ b/echotorch/datasetspy2/CopyTaskDataset.py
@@ -0,0 +1,112 @@
+# -*- coding: utf-8 -*-
+#
+# File : echotorch/datasets/CopyTaskDataset.py
+# Description : Dataset for the copy task (Graves et al, 2016)
+# Date : 16th of July, 2020
+#
+# This file is part of EchoTorch. EchoTorch is free software: you can
+# redistribute it and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation, version 2.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Copyright Nils Schaetti
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+import numpy as np
+
+
+# Copy task dataset
+class CopyTaskDataset(Dataset):
+ """
+ Copy task dataset
+ """
+
+ # Constructor
+ def __init__(self, n_samples, length_min, length_max, n_inputs, dtype=torch.float32):
+ """
+ Constructor
+ :param sample_len: Sample's length
+ :param period:
+ """
+ # Properties
+ self.length_min = length_min
+ self.length_max = length_max
+ self.n_samples = n_samples
+ self.n_inputs = n_inputs
+ self.dtype = dtype
+
+ # Generate data set
+ self.samples = self._generate()
+ # end __init__
+
+ #region OVERRIDE
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.samples[idx]
+ # end __getitem__
+
+ #endregion OVERRIDE
+
+ #region PRIVATE
+
+ # Generate
+ def _generate(self):
+ """
+ Generate dataset
+ :return:
+ """
+ # List of samples
+ samples = list()
+
+ # For each sample
+ for i in range(self.n_samples):
+ # Generate length
+ sample_len = torch.randint(low=self.length_min, high=self.length_max, size=(1,)).item()
+
+ # Create empty inputs and output
+ sample_inputs = torch.zeros((sample_len * 2 + 1, self.n_inputs + 1), dtype=self.dtype)
+ sample_outputs = torch.zeros((sample_len * 2 + 1, self.n_inputs + 1), dtype=self.dtype)
+
+ # Generate a random pattern
+ random_pattern = torch.randint(low=0, high=2, size=(sample_len, self.n_inputs))
+
+ # Set in inputs and outputs
+ sample_inputs[:sample_len, :self.n_inputs] = random_pattern
+ sample_outputs[sample_len+1:, :self.n_inputs] = random_pattern
+ sample_inputs[sample_len, self.n_inputs] = 1.0
+
+ # Append
+ samples.append((sample_inputs, sample_outputs))
+ # end for
+
+ return samples
+ # end _generate
+
+ #endregion PRIVATE
+
+# end CopyTaskDataset
diff --git a/echotorch/datasetspy2/DatasetComposer.py b/echotorch/datasetspy2/DatasetComposer.py
new file mode 100644
index 0000000..bd46e60
--- /dev/null
+++ b/echotorch/datasetspy2/DatasetComposer.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+#
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+
+
+# Dataset Composer
+class DatasetComposer(Dataset):
+ """
+ Compose dataset
+ """
+
+ # Constructor
+ def __init__(self, datasets, *args, **kwargs):
+ """
+ Constructor
+ :param datasets:
+ """
+ # Super
+ super(DatasetComposer, self).__init__(*args, **kwargs)
+
+ # Properties
+ self.datasets = datasets
+ self.n_datasets = len(datasets)
+
+ # Map item to datasets items
+ self.map_items = {}
+ self.n_samples = 0
+ index = 0
+ for i, d in enumerate(datasets):
+ for j in range(len(d)):
+ self.map_items[index] = (i, j)
+ index += 1
+ self.n_samples += 1
+ # end for
+ # end for
+ # end __init__
+
+ #region OVERRIDE
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ (d, e) = self.map_items[idx]
+ sample = self.datasets[d][e]
+ # print(d, e)
+ outputs = self._create_outputs(d, sample.shape[0])
+ # print(outputs[:10])
+ return self.datasets[d][e], outputs, torch.LongTensor([d])
+ # end __getitem__
+
+ #endregion OVERRIDE
+
+ #region PRIVATE
+
+ # Create outputs
+ def _create_outputs(self, i, time_length):
+ """
+ Create outputs
+ :param i:
+ :param time_length:
+ :return:
+ """
+ # print("create {}".format(i))
+ # Create tensor
+ outputs = torch.zeros(time_length, self.n_datasets)
+
+ # Put to one
+ outputs[:, i] = 1.0
+
+ return outputs
+ # end _create_outputs
+
+ #endregion PRIVATE
+
+# end DatasetComposer
diff --git a/echotorch/datasetspy2/DiscreteMarkovChainDataset.py b/echotorch/datasetspy2/DiscreteMarkovChainDataset.py
new file mode 100644
index 0000000..cc54f9e
--- /dev/null
+++ b/echotorch/datasetspy2/DiscreteMarkovChainDataset.py
@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -*-
+#
+# File : datasets/DiscreteMarkovChainDataset.py
+# Description : Create discrete samples from a Markov Chain
+# Date : 20th of December, 2019
+#
+# This file is part of EchoTorch. EchoTorch is free software: you can
+# redistribute it and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation, version 2.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Copyright Nils Schaetti
+
+# Imports
+import math
+import torch
+import torch.distributions.multinomial
+from torch.utils.data.dataset import Dataset
+import numpy as np
+
+
+# Discrete Markov chain dataset
+class DiscreteMarkovChainDataset(Dataset):
+ """
+ Discrete Markov chain dataset
+ """
+
+ # Constructor
+ def __init__(self, n_samples, sample_length, probability_matrix, *args, **kwargs):
+ """
+ Constructor
+ :param n_samples: Number of samples to generate
+ :param sample_length: Number of steps to generate for each samples
+ :param probability_matrix: Markov chain's probability matrix
+ """
+ # Super
+ super(DiscreteMarkovChainDataset, self).__init__(*args, **kwargs)
+
+ # Properties
+ self._probability_matrix = probability_matrix
+ self._n_samples = n_samples
+ self._sample_length = sample_length
+ self._n_states = probability_matrix.size(0)
+ # end __init__
+
+ # region PRIVATE
+
+ # Generate a markov chain from a probability matrix
+ def _generate_markov_chain(self, length, start_state=0):
+ """
+ Generate a sample from a probability matrix
+ :param length: Length of the sample to generate
+ :param start_state: Starting state
+ """
+ # One-hot vector of states
+ states_vector = torch.zeros(length, self._n_states)
+ states_vector[0, start_state] = 1.0
+
+ # Next state to predict
+ next_states_vector = torch.zeros(length, self._n_states)
+
+ # Current state
+ current_state = start_state
+
+ # For each time step
+ for t in range(1, length + 1):
+ # Probability to next states
+ prob_next_states = self._probability_matrix[current_state]
+
+ # Create a multinomial distribution from probs.
+ mnd = torch.distributions.multinomial.Multinomial(
+ total_count=self._n_states,
+ probs=prob_next_states
+ )
+
+ # Generate next states from probs.
+ next_state = torch.argmax(mnd.sample()).item()
+
+ # Save state
+ if t < length:
+ states_vector[t, next_state] = 1.0
+ # end if
+
+ # Save prediction
+ next_states_vector[t-1, next_state] = 1.0
+
+ # Set as current
+ current_state = next_state
+ # end for
+
+ return states_vector, next_states_vector
+ # end _generate_markov_chain
+
+ # endregion ENDPRIVATE
+
+ # region OVERRIDE
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self._n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx: Sample index
+ :return: Sample as torch tensor
+ """
+ # Generate a Markov chain with
+ # specified length.
+ return self._generate_markov_chain(
+ length=self._sample_length,
+ start_state=np.random.randint(low=0, high=self._n_states-1)
+ )
+ # end __getitem__
+
+ # endregion OVERRIDE
+
+# end DiscreteMarkovChainDataset
diff --git a/echotorch/datasetspy2/FromCSVDataset.py b/echotorch/datasetspy2/FromCSVDataset.py
new file mode 100644
index 0000000..d45d6cd
--- /dev/null
+++ b/echotorch/datasetspy2/FromCSVDataset.py
@@ -0,0 +1,149 @@
+# -*- coding: utf-8 -*-
+#
+# File : datasets/FromCSVDataset.py
+# Description : Load time series from a CSV file.
+# Date : 10th of April, 2020
+#
+# This file is part of EchoTorch. EchoTorch is free software: you can
+# redistribute it and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation, version 2.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Copyright Nils Schaetti
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+import csv
+
+
+# Load Time series from a CSV file
+class FromCSVDataset(Dataset):
+ """
+ Load Time series from a CSV file
+ """
+
+ # Constructor
+ def __init__(self, csv_file, columns, delimiter=",", quotechar='"', *args, **kwargs):
+ """
+ Constructor
+ :param csv_file: CSV file
+ :param columns: Columns to load from the CSV file
+ :param args: Args
+ :param kwargs: Dictionary args
+ """
+ # Super
+ super(FromCSVDataset, self).__init__(*args, **kwargs)
+
+ # Properties
+ self._csv_file = csv_file
+ self._columns = columns
+ self._n_columns = len(columns)
+ self._delimiter = delimiter
+ self._quotechar = quotechar
+ self._column_indices = list()
+
+ # Load
+ self._data = self._load_from_csv()
+ # end __init__
+
+ # region PRIVATE
+
+ # Find indices for each column
+ def _find_columns_indices(self, header_row):
+ """
+ Find indices for each column
+ :param header_row: Header row
+ """
+ for col in self._columns:
+ if col in header_row:
+ self._column_indices.append(header_row.index(col))
+ else:
+ raise Exception("Not column \'{}\' found in the CSV".format(col))
+ # end if
+ # end for
+ # end _find_columns_indices
+
+ # Load from CSV file
+ def _load_from_csv(self):
+ """
+ Load from CSV file
+ :return:
+ """
+ # Open CSV file
+ with open(self._csv_file, 'r') as csvfile:
+ # Read data
+ spamreader = csv.reader(csvfile, delimiter=self._delimiter, quotechar=self._quotechar)
+
+ # Data
+ data = list()
+
+ # For each row
+ for row_i, row in enumerate(spamreader):
+ # First row is the column name
+ if row_i == 0:
+ self._find_columns_indices(row)
+ else:
+ # Row list
+ row_list = list()
+
+ # Add each column
+ for idx in self._column_indices:
+ row_list.append(row[idx])
+ # end for
+
+ # Add to data
+ data.append(row_list)
+ # end if
+ # end for
+
+ # Create tensor
+ data_tensor = torch.zeros(len(data), self._n_columns)
+
+ # Insert data in tensor
+ for row_i, row in enumerate(data):
+ for col_i, e in enumerate(row):
+ data_tensor[row_i, col_i] = float(e)
+ # end for
+ # end for
+
+ return data_tensor
+ # end for
+ # end _load_from_csv
+
+ # endregion ENDPRIVATE
+
+ # region OVERRIDE
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return 1
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx: Sample index
+ :return: Sample as torch tensor
+ """
+ # Generate a Markov chain with
+ # specified length.
+ return self._data
+ # end __getitem__
+
+ # endregion OVERRIDE
+
+# end DiscreteMarkovChainDataset
diff --git a/echotorch/datasetspy2/HenonAttractor.py b/echotorch/datasetspy2/HenonAttractor.py
new file mode 100644
index 0000000..32ef165
--- /dev/null
+++ b/echotorch/datasetspy2/HenonAttractor.py
@@ -0,0 +1,150 @@
+# -*- coding: utf-8 -*-
+#
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+from random import shuffle
+import numpy as np
+
+
+# Henon Attractor
+class HenonAttractor(Dataset):
+ """
+ The Rössler attractor is the attractor for the Rössler system, a system of three non-linear ordinary differential
+ equations originally studied by Otto Rössler. These differential equations define a continuous-time dynamical
+ system that exhibits chaotic dynamics associated with the fractal properties of the attractor.
+ """
+
+ # Constructor
+ def __init__(self, sample_len, n_samples, xy, a, b, washout=0, normalize=False, seed=None):
+ """
+ Constructor
+ :param sample_len: Length of the time-series in time steps.
+ :param n_samples: Number of samples to generate.
+ :param a:
+ :param b:
+ :param c:
+ """
+ # Properties
+ self.sample_len = sample_len
+ self.n_samples = n_samples
+ self.a = a
+ self.b = b
+ self.xy = xy
+ self.normalize = normalize
+ self.washout = washout
+
+ # Seed
+ if seed is not None:
+ torch.initial_seed(seed)
+ # end if
+
+ # Generate data set
+ self.outputs = self._generate()
+ # end __init__
+
+ #############################################
+ # OVERRIDE
+ #############################################
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.outputs[idx]
+ # end __getitem__
+
+ ##############################################
+ # PUBLIC
+ ##############################################
+
+ # Regenerate
+ def regenerate(self):
+ """
+ Regenerate
+ :return:
+ """
+ # Generate data set
+ self.outputs = self._generate()
+ # end regenerate
+
+ ##############################################
+ # PRIVATE
+ ##############################################
+
+ # Henon
+ def _henon(self, x, y):
+ """
+ Henon
+ :param x:
+ :param y:
+ :param z:
+ :return:
+ """
+ x_dot = 1 - (self.a * (x * x)) + y
+ y_dot = self.b * x
+ return x_dot, y_dot
+ # end _lorenz
+
+ # Generate
+ def _generate(self):
+ """
+ Generate dataset
+ :return:
+ """
+ # Sizes
+ total_size = self.sample_len
+
+ # First position
+ xy = self.xy
+
+ # Samples
+ samples = list()
+
+ # Washout
+ for t in range(self.washout):
+ xy = self._henon(xy[0], xy[1])
+ # end for
+
+ # For each sample
+ for n in range(self.n_samples):
+ # Tensor
+ sample = torch.zeros(total_size, 2)
+
+ # Timesteps
+ for t in range(total_size):
+ xy = self._henon(xy[0], xy[1])
+ sample[t] = xy
+ # end for
+
+ # Normalize
+ if self.normalize:
+ maxval = torch.max(sample, dim=0)
+ minval = torch.min(sample, dim=0)
+ sample = torch.mm(torch.inv(torch.diag(maxval - minval)), (sample - minval.repeat(total_size, 1)))
+ # end if
+
+ # Add
+ samples.append(sample)
+ # end for
+
+ # Shuffle
+ shuffle(samples)
+
+ return samples
+ # end _generate
+
+# end HenonAttractor
diff --git a/echotorch/datasetspy2/ImageToTimeseries.py b/echotorch/datasetspy2/ImageToTimeseries.py
new file mode 100644
index 0000000..5c3f493
--- /dev/null
+++ b/echotorch/datasetspy2/ImageToTimeseries.py
@@ -0,0 +1,116 @@
+# -*- coding: utf-8 -*-
+#
+# File : echotorch/datasets/ImageToTimeseries.py
+# Description : Transform a dataset of images into timeseries.
+# Date : 6th of November, 2019
+#
+# This file is part of EchoTorch. EchoTorch is free software: you can
+# redistribute it and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation, version 2.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Copyright Nils Schaetti
+
+
+# Imports
+import math
+import torch
+from torch.utils.data import Dataset
+
+
+# Image to timeseries dataset
+class ImageToTimeseries(Dataset):
+ """
+ Image to timeseries dataset
+ """
+
+ # Constructor
+ def __init__(self, image_dataset, n_images, transpose=True):
+ """
+ Constructor
+ :param image_dataset: A Dataset object to transform
+ :param n_images: How many images to join to compose a timeserie ?
+ :param time_axis: Time dimension
+ :param transpose: Transpose image before concatenating (start from side if true, from top if false)
+ """
+ # Params
+ self._image_dataset = image_dataset
+ self._n_images = n_images
+ self._tranpose = transpose
+ # end __init__
+
+ #region OVERRIDE
+
+ # To string
+ def __str__(self):
+ """
+ To string
+ :return: String version of the object
+ """
+ str_object = "Dataset ImageToTimeseries\n"
+ str_object += "\tN. images : {}\n".format(self._n_images)
+ str_object += "\tDataset : {}".format(str(self._image_dataset))
+ return str_object
+ # end __str__
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return: How many samples
+ """
+ return int(math.ceil(len(self._image_dataset) / self._n_images))
+ # end __len__
+
+ # Get item
+ def __getitem__(self, item):
+ """
+ Get item
+ :param item: Item index
+ :return: (sample, target)
+ """
+ # Data and target
+ timeseries_data = None
+ timeseries_target = None
+
+ # Get samples
+ for i in range(self._n_images):
+ # Get sample
+ sample_data, sample_target = self._image_dataset[item * self._n_images + i]
+
+ # Transpose
+ if self._tranpose:
+ sample_data = sample_data.permute(0, 2, 1)
+ # end if
+
+ # To tensor if numeric
+ if isinstance(sample_target, int) or isinstance(sample_target, float):
+ sample_target = torch.LongTensor([[sample_target]])
+ elif isinstance(sample_target, torch.Tensor):
+ sample_target = sample_target.reshape((1, -1))
+ # end if
+
+ # Concat
+ if i == 0:
+ timeseries_data = sample_data
+ timeseries_target = sample_target
+ else:
+ timeseries_data = torch.cat((timeseries_data, sample_data), axis=1)
+ timeseries_target = torch.cat((timeseries_target, sample_target), axis=0)
+ # end for
+ # end for
+
+ return timeseries_data, timeseries_target
+ # end __getitem__
+
+ #endregion OVERRIDE
+
+# end ImageToTimeseries
diff --git a/echotorch/datasetspy2/LambdaDataset.py b/echotorch/datasetspy2/LambdaDataset.py
new file mode 100644
index 0000000..fb0f0ab
--- /dev/null
+++ b/echotorch/datasetspy2/LambdaDataset.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+#
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+import numpy as np
+
+
+# Lambda dataset
+class LambdaDataset(Dataset):
+ """
+ Create simple periodic signal timeseries
+ """
+
+ # Constructor
+ def __init__(self, sample_len, n_samples, func, start=0, dtype=torch.float32):
+ """
+ Constructor
+ :param sample_len: Sample's length
+ :param period:
+ """
+ # Properties
+ self.sample_len = sample_len
+ self.n_samples = n_samples
+ self.func = func
+ self.start = start
+ self.dtype = dtype
+
+ # Generate data set
+ self.outputs = self._generate()
+ # end __init__
+
+ #############################################
+ # OVERRIDE
+ #############################################
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.outputs[idx]
+ # end __getitem__
+
+ ##############################################
+ # PRIVATE
+ ##############################################
+
+ # Generate
+ def _generate(self):
+ """
+ Generate dataset
+ :return:
+ """
+ # List of samples
+ samples = list()
+
+ # For each sample
+ for i in range(self.n_samples):
+ # Tensor
+ sample = torch.zeros(self.sample_len, 1, dtype=self.dtype)
+
+ # Timestep
+ for t in range(self.sample_len):
+ sample[t, 0] = self.func(t + self.start)
+ # end for
+
+ # Append
+ samples.append(sample)
+ # end for
+
+ return samples
+ # end _generate
+
+# end LambdaDataset
diff --git a/echotorch/datasetspy2/LatchTaskDataset.py b/echotorch/datasetspy2/LatchTaskDataset.py
new file mode 100644
index 0000000..14722a8
--- /dev/null
+++ b/echotorch/datasetspy2/LatchTaskDataset.py
@@ -0,0 +1,147 @@
+# -*- coding: utf-8 -*-
+#
+# File : echotorch/datasets/LatchTaskDataset.py
+# Description : Dataset for the latch task
+# Date : 16th of July, 2020
+#
+# This file is part of EchoTorch. EchoTorch is free software: you can
+# redistribute it and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation, version 2.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Copyright Nils Schaetti
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+import numpy as np
+
+
+# Latch task dataset
+class LatchTaskDataset(Dataset):
+ """
+ Latch task dataset
+ """
+
+ # Constructor
+ def __init__(self, n_samples, length_min, length_max, n_pics, dtype=torch.float32):
+ """
+ Constructor
+ :param sample_len: Sample's length
+ :param period:
+ """
+ # Properties
+ self.length_min = length_min
+ self.length_max = length_max
+ self.n_samples = n_samples
+ self.n_pics = n_pics
+ self.dtype = dtype
+
+ # Generate data set
+ self.samples = self._generate()
+ # end __init__
+
+ #region OVERRIDE
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.samples[idx]
+ # end __getitem__
+
+ #endregion OVERRIDE
+
+ #region PRIVATE
+
+ # Generate
+ def _generate(self):
+ """
+ Generate dataset
+ :return:
+ """
+ # List of samples
+ samples = list()
+
+ # For each sample
+ for i in range(self.n_samples):
+ # Generate length
+ sample_len = torch.randint(low=self.length_min, high=self.length_max, size=(1,)).item()
+
+ # Create empty inputs and output
+ sample_inputs = torch.zeros((1, sample_len), dtype=self.dtype)
+ sample_outputs = torch.zeros((1, sample_len), dtype=self.dtype)
+
+ # List of pic position
+ pic_positions = list()
+
+ # Generate random positions
+ for p in range(self.n_pics):
+ # Random pic position
+ random_pic_position = torch.randint(high=sample_len, size=(1,)).item()
+
+ # Save pic position
+ pic_positions.append(random_pic_position)
+
+ # Set pic in input
+ sample_inputs[0, random_pic_position] = 1.0
+ # end for
+
+ # Order pic positions
+ pic_positions.sort()
+
+ # For each pic
+ first_pic = True
+ for i in range(self.n_pics):
+ if first_pic:
+ if i == self.n_pics - 1:
+ pic_pos1 = pic_positions[i]
+ pic_pos2 = sample_len
+ else:
+ pic_pos1 = pic_positions[i]
+ pic_pos2 = pic_positions[i+1] + 1
+ # end if
+
+ # Length of segment
+ segment_length = pic_pos2 - pic_pos1
+
+ # Set in outputs
+ sample_outputs[0, pic_pos1:pic_pos2] = torch.ones(segment_length, dtype=self.dtype)
+
+ # Not first pic
+ first_pic = False
+ else:
+ first_pic = True
+ # end if
+ # end for
+
+ # Append
+ samples.append((sample_inputs, sample_outputs))
+ # end for
+
+ return samples
+ # end _generate
+
+ #endregion PRIVATE
+
+# end LatchTaskDataset
diff --git a/echotorch/datasetspy2/LogisticMapDataset.py b/echotorch/datasetspy2/LogisticMapDataset.py
new file mode 100644
index 0000000..12fa1e7
--- /dev/null
+++ b/echotorch/datasetspy2/LogisticMapDataset.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+#
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+import numpy as np
+
+
+# Logistic Map dataset
+class LogisticMapDataset(Dataset):
+ """
+ Logistic Map dataset
+ """
+
+ # Constructor
+ def __init__(self, sample_len, n_samples, alpha=5, beta=11, gamma=13, c=3.6, b=0.13, seed=None):
+ """
+ Constructor
+ :param sample_len:
+ :param n_samples:
+ :param alpha:
+ :param beta:
+ :param gamma:
+ :param c:
+ :param b:
+ :param seed:
+ """
+ # Properties
+ self.sample_len = sample_len
+ self.n_samples = n_samples
+ self.alpha = alpha
+ self.beta = beta
+ self.gamma = gamma
+ self.c = c
+ self.b = b
+ self.p2 = np.pi * 2
+
+ # Init seed if needed
+ if seed is not None:
+ torch.manual_seed(seed)
+ # end if
+ # end __init__
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ # Time and forces
+ t = np.linspace(0, 1, self.sample_len, endpoint=0)
+ dforce = np.sin(self.p2 * self.alpha * t) + np.sin(self.p2 * self.beta * t) + np.sin(self.p2 * self.gamma * t)
+
+ # Series
+ series = torch.zeros(self.sample_len, 1)
+ series[0] = 0.6
+
+ # Generate
+ for i in range(1, self.sample_len):
+ series[i] = self._logistic_map(series[i-1], self.c + self.b * dforce[i])
+ # end for
+
+ return series
+ # end __getitem__
+
+ #######################################
+ # Private
+ #######################################
+
+ # Logistic map
+ def _logistic_map(self, x, r):
+ """
+ Logistic map
+ :param x:
+ :param r:
+ :return:
+ """
+ return r * x * (1-x)
+ # end logistic_map
+
+# end MackeyGlassDataset
diff --git a/echotorch/datasetspy2/LorenzAttractor.py b/echotorch/datasetspy2/LorenzAttractor.py
new file mode 100644
index 0000000..203cf13
--- /dev/null
+++ b/echotorch/datasetspy2/LorenzAttractor.py
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+#
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+import numpy as np
+
+
+# Lorenz Attractor
+class LorenzAttractor(Dataset):
+ """
+ The Rössler attractor is the attractor for the Rössler system, a system of three non-linear ordinary differential
+ equations originally studied by Otto Rössler. These differential equations define a continuous-time dynamical
+ system that exhibits chaotic dynamics associated with the fractal properties of the attractor.
+ """
+
+ # Constructor
+ def __init__(self, sample_len, n_samples, xyz, sigma, b, r, dt=0.01, washout=0, normalize=False, seed=None):
+ """
+ Constructor
+ :param sample_len: Length of the time-series in time steps.
+ :param n_samples: Number of samples to generate.
+ :param a:
+ :param b:
+ :param c:
+ """
+ # Properties
+ self.sample_len = sample_len
+ self.n_samples = n_samples
+ self.xyz = xyz
+ self.dt = dt
+ self.normalize = normalize
+ self.washout = washout
+ self.sigma = sigma
+ self.b = b
+ self.r = r
+
+ # Seed
+ if seed is not None:
+ torch.initial_seed(seed)
+ # end if
+
+ # Generate data set
+ self.outputs = self._generate()
+ # end __init__
+
+ #############################################
+ # OVERRIDE
+ #############################################
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.outputs[idx]
+ # end __getitem__
+
+ ##############################################
+ # PUBLIC
+ ##############################################
+
+ # Regenerate
+ def regenerate(self):
+ """
+ Regenerate
+ :return:
+ """
+ # Generate data set
+ self.outputs = self._generate()
+ # end regenerate
+
+ ##############################################
+ # PRIVATE
+ ##############################################
+
+ # Lorenz
+ def _lorenz(self, x, y, z):
+ """
+ Lorenz
+ :param x:
+ :param y:
+ :param z:
+ :return:
+ """
+ x_dot = self.sigma * (y - x)
+ y_dot = self.r * x - y - x * z
+ z_dot = x * y - self.b * z
+ return x_dot, y_dot, z_dot
+ # end _lorenz
+
+ # Generate
+ def _generate(self):
+ """
+ Generate dataset
+ :return:
+ """
+ # Sizes
+ total_size = self.sample_len
+
+ # List of samples
+ samples = list()
+
+ # XYZ
+ xyz = self.xyz
+
+ # Washout
+ for t in range(self.washout):
+ # Derivatives of the X, Y, Z state
+ x_dot, y_dot, z_dot = self._lorenz(xyz[0], xyz[1], xyz[2])
+
+ # Apply changes
+ xyz[0] += self.dt * x_dot
+ xyz[1] += self.dt * y_dot
+ xyz[2] += self.dt * z_dot
+ # end for
+
+ # For each sample
+ for i in range(self.n_samples):
+ # Tensor
+ sample = torch.zeros(self.sample_len, 3)
+ for t in range(self.sample_len):
+ # Derivatives of the X, Y, Z state
+ x_dot, y_dot, z_dot = self._lorenz(xyz[0], xyz[1], xyz[2])
+
+ # Apply changes
+ xyz[0] += self.dt * x_dot
+ xyz[1] += self.dt * y_dot
+ xyz[2] += self.dt * z_dot
+
+ # Set
+ sample[t, 0] = xyz[0]
+ sample[t, 1] = xyz[1]
+ sample[t, 2] = xyz[2]
+ # end for
+
+ # Normalize
+ if self.normalize:
+ maxval = torch.max(sample, dim=0)
+ minval = torch.min(sample, dim=0)
+ sample = torch.mm(torch.inv(torch.diag(maxval - minval)), (sample - minval.repeat(total_size, 1)))
+ # end if
+
+ # Append
+ samples.append(sample)
+ # end for
+
+ return samples
+ # end _generate
+
+# end LorenzAttractor
diff --git a/echotorch/datasetspy2/MackeyGlass2DDataset.py b/echotorch/datasetspy2/MackeyGlass2DDataset.py
new file mode 100644
index 0000000..06f7c88
--- /dev/null
+++ b/echotorch/datasetspy2/MackeyGlass2DDataset.py
@@ -0,0 +1,98 @@
+# -*- coding: utf-8 -*-
+#
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+
+
+# Mackey Glass dataset
+class MackeyGlass2DDataset(Dataset):
+ """
+ Mackey Glass 2D dataset
+ """
+
+ # Constructor
+ def __init__(self, sample_len, n_samples, tau, subsample_rate, normalize=False, seed=None):
+ """
+ Constructor
+ :param sample_len: Length of the time-series in time steps.
+ :param n_samples: Number of samples to generate.
+ :param tau: Delay of the MG with commonly used value of tau=17 (mild chaos) and tau=30 is moderate chaos.
+ :param seed: Seed of random number generator.
+ """
+ # Properties
+ self.sample_len = sample_len
+ self.n_samples = n_samples
+ self.tau = tau
+ self.delta_t = 10
+ self.timeseries = 1.2
+ self.history_len = tau * self.delta_t
+ self.subsample_rate = subsample_rate
+ self.normalize = normalize
+
+ # Init seed if needed
+ if seed is not None:
+ torch.manual_seed(seed)
+ # end if
+ # end __init__
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ # Total size
+ total_size = self.sample_len
+ oldval = 1.2
+ samples = list()
+
+ # History
+ history = 1.2 * torch.ones(self.history_len) + 0.2 * (torch.rand(self.history_len) - 0.5)
+
+ # For each sample
+ for n in range(self.n_samples):
+ # Preallocate tensor for time-serie
+ sample = torch.zeros(self.sample_len, 2)
+
+ # For each time step
+ step = 0
+ for t in range(total_size):
+ for _ in range(self.delta_t * self.subsample_rate):
+ step = step + 1
+ tauval = history[step % self.history_len]
+ newval = oldval + (0.2 * tauval / (1.0 + tauval**10) - 0.1 * oldval) / self.delta_t
+ history[step % self.history_len] = oldval
+ oldval = newval
+ # end for
+ sample[t, 0] = newval
+ sample[t, 1] = tauval
+ # end for
+
+ # Normalize
+ if self.normalize:
+ maxval = torch.max(sample, dim=0)
+ minval = torch.min(sample, dim=0)
+ sample = torch.mm(torch.inv(torch.diag(maxval - minval)), (sample - minval.repeat(total_size, 1)))
+ # end if
+
+ # Append
+ samples.append(sample)
+ # end for
+
+ # Squash timeseries through tanh
+ return samples
+ # end __getitem__
+
+# end MackeyGlassDataset
diff --git a/echotorch/datasetspy2/MackeyGlassDataset.py b/echotorch/datasetspy2/MackeyGlassDataset.py
new file mode 100644
index 0000000..136e46c
--- /dev/null
+++ b/echotorch/datasetspy2/MackeyGlassDataset.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+#
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+import collections
+
+
+# Mackey Glass dataset
+class MackeyGlassDataset(Dataset):
+ """
+ Mackey Glass dataset
+ """
+
+ # Constructor
+ def __init__(self, sample_len, n_samples, tau=17, seed=None):
+ """
+ Constructor
+ :param sample_len: Length of the time-series in time steps.
+ :param n_samples: Number of samples to generate.
+ :param tau: Delay of the MG with commonly used value of tau=17 (mild chaos) and tau=30 is moderate chaos.
+ :param seed: Seed of random number generator.
+ """
+ # Properties
+ self.sample_len = sample_len
+ self.n_samples = n_samples
+ self.tau = tau
+ self.delta_t = 10
+ self.timeseries = 1.2
+ self.history_len = tau * self.delta_t
+
+ # Init seed if needed
+ if seed is not None:
+ torch.manual_seed(seed)
+ # end if
+ # end __init__
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ # History
+ history = collections.deque(1.2 * torch.ones(self.history_len) + 0.2 * (torch.rand(self.history_len) - 0.5))
+
+ # Preallocate tensor for time-serie
+ inp = torch.zeros(self.sample_len, 1)
+
+ # For each time step
+ for timestep in range(self.sample_len):
+ for _ in range(self.delta_t):
+ xtau = history.popleft()
+ history.append(self.timeseries)
+ self.timeseries = history[-1] + (0.2 * xtau / (1.0 + xtau ** 10) - 0.1 * history[-1]) / self.delta_t
+ # end for
+ inp[timestep] = self.timeseries
+ # end for
+
+ # Inputs
+ inputs = torch.tan(inp - 1)
+
+ # Squash timeseries through tanh
+ return inputs[:-1], inputs[1:]
+ # end __getitem__
+
+# end MackeyGlassDataset
diff --git a/echotorch/datasetspy2/MarkovChainDataset.py b/echotorch/datasetspy2/MarkovChainDataset.py
new file mode 100644
index 0000000..ebb8e9d
--- /dev/null
+++ b/echotorch/datasetspy2/MarkovChainDataset.py
@@ -0,0 +1,218 @@
+# -*- coding: utf-8 -*-
+#
+# File : examples/conceptors/boolean_operations.py
+# Description : Conceptor boolean operation
+# Date : 16th of December, 2019
+#
+# This file is part of EchoTorch. EchoTorch is free software: you can
+# redistribute it and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation, version 2.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Copyright Nils Schaetti
+
+# Imports
+import math
+import numpy as np
+import torch
+import torch.distributions.multinomial
+from torch.utils.data.dataset import Dataset
+import numpy as np
+
+
+# Markov chain dataset from patterns
+class MarkovChainDataset(Dataset):
+ """
+ Markov chain dataset from patterns
+ """
+
+ # Constructor
+ def __init__(self, datasets, states_length, morphing_length, n_samples, sample_length, probability_matrix,
+ random_start=0, *args, **kwargs):
+ """
+ Constructor
+ :param datasets: Datasets
+ :param states_length: State length
+ :param morphing_length: Morphing length between patterns
+ """
+ # Super
+ super(MarkovChainDataset, self).__init__(*args, **kwargs)
+
+ # Properties
+ self._probability_matrix = probability_matrix
+ self.states = datasets
+ self.n_states = len(datasets)
+ self._states_length = states_length
+ self._morphing_length = morphing_length
+ self._sample_length = sample_length
+ self._total_length = sample_length * (states_length + 2 * morphing_length)
+ self._random_start = random_start
+ self.n_samples = n_samples
+ # region PRIVATE
+
+ # Generate a markov chain from a probability matrix
+ def _generate_markov_chain(self, length, start_state=0):
+ """
+ Generate a sample from a probability matrix
+ :param probability_matrix: Probability matrix of the Markov chain
+ :param length: Length of the sample to generate
+ :param start_state: Starting state
+ """
+ # Vector of states
+ states_vector = torch.zeros(length)
+ states_vector[0] = start_state
+
+ # Current state
+ current_state = start_state
+
+ # For each time step
+ for t in range(1, length):
+ # Probability to next states
+ prob_next_states = self._probability_matrix[current_state]
+
+ # Create a multinomial distribution from probs.
+ mnd = torch.distributions.multinomial.Multinomial(
+ total_count=self.n_states,
+ probs=prob_next_states
+ )
+
+ # Generate next states from probs.
+ next_state = torch.argmax(mnd.sample()).item()
+
+ # Save next state
+ states_vector[t] = next_state
+
+ # Set as current
+ current_state = next_state
+ # end for
+
+ return states_vector
+ # end _generate_markov_chain
+
+ # endregion ENDPRIVATE
+
+ # region OVERRIDE
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ # Generate a Markov chain with
+ # specified length
+ markov_chain = self._generate_markov_chain(
+ length=self._sample_length,
+ start_state=np.random.randint(low=0, high=self.n_states-1)
+ )
+
+ # Length of each state with morphing
+ state_lenght_and_morph = self._states_length + 2 * self._morphing_length
+
+ # Length of sample to get for each state
+ state_sample_length = self._states_length + 4 * self._morphing_length
+
+ # Empty tensor for the result
+ inputs = torch.zeros(self._total_length, 1)
+
+ # Outputs with each state at time t
+ outputs = torch.zeros(self._total_length, self.n_states)
+
+ # Save the list of samples
+ list_of_state_samples = list()
+
+ # For each state in the Markov chain
+ for state_i in range(self._sample_length):
+ # State
+ current_state = int(markov_chain[state_i].item())
+
+ # Get state dataset
+ state_dataset = self.states[current_state]
+
+ # Get a random sample
+ state_sample = state_dataset[np.random.randint(len(state_dataset))]
+
+ # State sample's length and minimum length
+ minimum_length = (state_sample_length + self._random_start)
+
+ # Check that the sample has
+ # the minimum size
+ if state_sample.size(0) >= minimum_length:
+ # Random start
+ if self._random_start > 0:
+ random_start = np.random.randint(0, self._random_start)
+ else:
+ random_start = 0
+ # end if
+
+ # Get a random part
+ state_part = state_sample[random_start:random_start+state_sample_length]
+
+ # Add to the list
+ list_of_state_samples.append(state_part)
+ else:
+ raise Exception("State sample length is not enough ({} vs {})!".format(state_sample_length, minimum_length))
+ # end if
+ # end for
+
+ # Go through time to compose the sample
+ for t in range(self._total_length):
+ # In which state we are
+ state_step = math.floor(t / state_lenght_and_morph)
+ last_step = state_step - 1
+ next_step = state_step + 1
+
+ # Bounds
+ state_start_time = state_step * state_lenght_and_morph
+ state_end_time = (state_step + 1) * state_lenght_and_morph
+
+ # Position in the state
+ state_position = t - state_start_time
+
+ # Are we in the morphing period
+ if self._morphing_length > 0:
+ if t - state_start_time < self._morphing_length and state_step > 0:
+ m = -(1.0 / (2.0 * self._morphing_length)) * state_position + 0.5
+ last_state_position = state_sample_length - self._morphing_length + state_position
+ inputs[t] = m * list_of_state_samples[last_step][last_state_position]
+ inputs[t] += (1.0 - m) * list_of_state_samples[state_step][state_position]
+ elif state_end_time - t < self._morphing_length and state_step != self._sample_length - 1:
+ m = (1.0 / (2.0 * self._morphing_length)) * (state_end_time - t)
+ next_state_position = state_sample_length - (self._morphing_length + self._states_length)
+ inputs[t] = m * list_of_state_samples[next_step][next_state_position]
+ inputs[t] += (1.0 - m) * list_of_state_samples[state_step][state_position]
+ else:
+ inputs[t] = list_of_state_samples[state_step][state_position]
+ # end if
+ else:
+ inputs[t] = list_of_state_samples[state_step][state_position]
+ # end if
+
+ # Outputs
+ outputs[t, int(markov_chain[state_step].item())] = 1.0
+ # end for
+
+ return inputs, outputs, markov_chain
+ # end __getitem__
+
+ # endregion OVERRIDE
+
+# end DatasetComposer
diff --git a/echotorch/datasetspy2/MemTestDataset.py b/echotorch/datasetspy2/MemTestDataset.py
new file mode 100644
index 0000000..8620125
--- /dev/null
+++ b/echotorch/datasetspy2/MemTestDataset.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+#
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+
+
+# Generates a series of input timeseries and delayed versions as outputs.
+class MemTestDataset(Dataset):
+ """
+ Generates a series of input timeseries and delayed versions as outputs.
+ Delay is given in number of timesteps. Can be used to empirically measure the
+ memory capacity of a system.
+ """
+
+ # Constructor
+ def __init__(self, sample_len, n_samples, n_delays=10, seed=None):
+ """
+ Constructor
+ :param sample_len: Length of the time-series in time steps.
+ :param n_samples: Number of samples to generate.
+ :param n_delays: Number of step to delay
+ :param seed: Seed of random number generator.
+ """
+ # Properties
+ self.sample_len = sample_len
+ self.n_samples = n_samples
+ self.n_delays = n_delays
+
+ # Init seed if needed
+ if seed is not None:
+ torch.manual_seed(seed)
+ # end if
+ # end __init__
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ inputs = (torch.rand(self.sample_len, 1) - 0.5) * 1.6
+ outputs = torch.zeros(self.sample_len, self.n_delays)
+ for k in range(self.n_delays):
+ outputs[:, k:k+1] = torch.cat((torch.zeros(k + 1, 1), inputs[:-k - 1, :]), dim=0)
+ # end for
+ return inputs, outputs
+ # end __getitem__
+
+# end MemTestDataset
diff --git a/echotorch/datasetspy2/MixedSinesDataset.py b/echotorch/datasetspy2/MixedSinesDataset.py
new file mode 100644
index 0000000..275272f
--- /dev/null
+++ b/echotorch/datasetspy2/MixedSinesDataset.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+#
+# File : echotorch/datasets/MixedSinesDataset.py
+# Description : Mixed sines signals
+# Date : 10th of September, 2020
+#
+# This file is part of EchoTorch. EchoTorch is free software: you can
+# redistribute it and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation, version 2.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Copyright Nils Schaetti, University of Neuchâtel
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+import math
+import numpy as np
+
+
+# Mixed sines dataset
+class MixedSinesDataset(Dataset):
+ """
+ Mixed sines dataset
+ """
+
+ # Constructor
+ def __init__(self, sample_len, n_samples, periods, amplitudes, phases, dtype=torch.float64):
+ """
+ Constructor
+ :param sample_len: Length of the time-series in time steps.
+ :param n_samples: Number of samples to generate.
+ :param a:
+ :param b:
+ :param c:
+ """
+ # Properties
+ self.sample_len = sample_len
+ self.n_samples = n_samples
+ self.dtype = dtype
+ self.sine1 = lambda n: amplitudes[0] * math.sin(2.0 * math.pi * (n + phases[0]) / periods[0])
+ self.sine2 = lambda n: amplitudes[1] * math.sin(2.0 * math.pi * (n + phases[1]) / periods[1])
+
+ # Generate data set
+ self.outputs = self._generate()
+ # end __init__
+
+ #############################################
+ # OVERRIDE
+ #############################################
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.outputs[idx]
+ # end __getitem__
+
+ ##############################################
+ # PUBLIC
+ ##############################################
+
+ # Regenerate
+ def regenerate(self):
+ """
+ Regenerate
+ :return:
+ """
+ # Generate data set
+ self.outputs = self._generate()
+ # end regenerate
+
+ ##############################################
+ # PRIVATE
+ ##############################################
+
+ # Random initial points
+ def random_initial_points(self):
+ """
+ Random initial points
+ :return:
+ """
+ # Set
+ return np.random.random() * (math.pi * 2.0)
+ # end random_initial_points
+
+ # Generate
+ def _generate(self):
+ """
+ Generate dataset
+ :return:
+ """
+ # List of samples
+ samples = list()
+
+ # For each sample
+ for i in range(self.n_samples):
+ # Tensor
+ sample = torch.zeros(self.sample_len, 1, dtype=self.dtype)
+
+ # Time steps
+ for t in range(self.sample_len):
+ sample[t, 0] = self.sine1(i * self.sample_len + t) + self.sine2(i * self.sample_len + t)
+ # end for
+
+ # Append
+ samples.append(sample)
+ # end for
+
+ return samples
+ # end _generate
+
+# end MixedSinesDataset
diff --git a/echotorch/datasetspy2/NARMADataset.py b/echotorch/datasetspy2/NARMADataset.py
new file mode 100644
index 0000000..50264ed
--- /dev/null
+++ b/echotorch/datasetspy2/NARMADataset.py
@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+#
+# File : echotorch/datasets/NARMADataset.py
+# Description : NARMA timeseries
+# Date : 10th of September, 2020
+#
+# This file is part of EchoTorch. EchoTorch is free software: you can
+# redistribute it and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation, version 2.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Copyright Nils Schaetti, University of Neuchâtel
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+
+
+# 10th order NARMA task
+class NARMADataset(Dataset):
+ """
+ xth order NARMA task
+ WARNING: this is an unstable dataset. There is a small chance the system becomes
+ unstable, leading to an unusable dataset. It is better to use NARMA30 which
+ where this problem happens less often.
+ """
+
+ # Constructor
+ def __init__(self, sample_len, n_samples, system_order=10):
+ """
+ Constructor
+ :param sample_len: Length of the time-series in time steps.
+ :param n_samples: Number of samples to generate.
+ :param system_order: th order NARMA
+ """
+ # Properties
+ self.sample_len = sample_len
+ self.n_samples = n_samples
+ self.system_order = system_order
+
+ # System order
+ self.parameters = torch.zeros(4)
+ if system_order == 10:
+ self.parameters[0] = 0.3
+ self.parameters[1] = 0.05
+ self.parameters[2] = 9
+ self.parameters[3] = 0.1
+ else:
+ self.parameters[0] = 0.2
+ self.parameters[1] = 0.04
+ self.parameters[2] = 29
+ self.parameters[3] = 0.001
+ # end if
+
+ # Generate data set
+ self.inputs, self.outputs = self._generate()
+ # end __init__
+
+ #############################################
+ # OVERRIDE
+ #############################################
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.inputs[idx], self.outputs[idx]
+ # end __getitem__
+
+ ##############################################
+ # PRIVATE
+ ##############################################
+
+ # Generate
+ def _generate(self):
+ """
+ Generate dataset
+ :return:
+ """
+ inputs = list()
+ outputs = list()
+ for i in range(self.n_samples):
+ ins = torch.rand(self.sample_len, 1) * 0.5
+ outs = torch.zeros(self.sample_len, 1)
+ for k in range(self.system_order - 1, self.sample_len - 1):
+ outs[k + 1] = self.parameters[0] * outs[k] + self.parameters[1] * outs[k] * torch.sum(
+ outs[k - (self.system_order - 1):k + 1]) + 1.5 * ins[k - int(self.parameters[2])] * ins[k] + \
+ self.parameters[3]
+ # end for
+ inputs.append(ins)
+ outputs.append(outs)
+ # end for
+
+ return inputs, outputs
+ # end _generate
+
+# end NARMADataset
diff --git a/echotorch/datasetspy2/PeriodicSignalDataset.py b/echotorch/datasetspy2/PeriodicSignalDataset.py
new file mode 100644
index 0000000..a02b624
--- /dev/null
+++ b/echotorch/datasetspy2/PeriodicSignalDataset.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+#
+# File : echotorch/datasets/PeriodicSignalDataset.py
+# Description : Periodic patterns timeseries
+# Date : 10th of September, 2020
+#
+# This file is part of EchoTorch. EchoTorch is free software: you can
+# redistribute it and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation, version 2.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Copyright Nils Schaetti, University of Neuchâtel
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+import numpy as np
+
+
+# Periodic signal timeseries
+class PeriodicSignalDataset(Dataset):
+ """
+ Create simple periodic signal timeseries
+ """
+
+ # Constructor
+ def __init__(self, sample_len, n_samples, period, start=1, dtype=torch.float64):
+ """
+ Constructor
+ :param sample_len: Sample's length
+ :param period:
+ """
+ # Properties
+ self.sample_len = sample_len
+ self.n_samples = n_samples
+ self.period = period
+ self.start = start
+ self.dtype = dtype
+
+ # Period
+ max_val = np.max(period)
+ min_val = np.min(period)
+ self.rp = 1.8 * (period - min_val) / (max_val - min_val) - 0.9
+ self.period_length = len(period)
+
+ # Function
+ self.func = lambda n: self.rp[(n + 1) % self.period_length]
+
+ # Generate data set
+ self.outputs = self._generate()
+ # end __init__
+
+ #############################################
+ # OVERRIDE
+ #############################################
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.outputs[idx]
+ # end __getitem__
+
+ ##############################################
+ # PRIVATE
+ ##############################################
+
+ # Generate
+ def _generate(self):
+ """
+ Generate dataset
+ :return:
+ """
+ # List of samples
+ samples = list()
+
+ # For each sample
+ for i in range(self.n_samples):
+ # Tensor
+ sample = torch.zeros(self.sample_len, 1, dtype=self.dtype)
+
+ # Timestep
+ for t in range(self.sample_len):
+ sample[t, 0] = self.func(i * self.sample_len + t)
+ # end for
+
+ # Append
+ samples.append(sample)
+ # end for
+
+ return samples
+ # end _generate
+
+# end PeriodicSignalDataset
diff --git a/echotorch/datasetspy2/RandomSymbolDataset.py b/echotorch/datasetspy2/RandomSymbolDataset.py
new file mode 100644
index 0000000..15f6973
--- /dev/null
+++ b/echotorch/datasetspy2/RandomSymbolDataset.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+#
+# File : echotorch/datasets/RandomSymbolDataset.py
+# Description : Create sequence of symbol chosen randomly.
+# Date : 10th of September, 2020
+#
+# This file is part of EchoTorch. EchoTorch is free software: you can
+# redistribute it and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation, version 2.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Copyright Nils Schaetti, University of Neuchâtel
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+
+
+# Sequences of symbols taken randomly
+class RandomSymbolDataset(Dataset):
+ """
+ Sequences of symbols taken randomly
+ """
+
+ # Constructor
+ def __init__(self, sample_len, n_samples, vocabulary_size, random_func=None):
+ """
+ Constructor
+ :param sample_len: Length of sequences
+ :param n_samples: Number of samples to generate
+ :param vocabulary_size: How many symbols in the vocabulary
+ :param random_func: Random function to call to choose symbols, if None taken a random generator from pytorch
+ :param dtype: Data type (float or double)
+ """
+ # Properties
+ self._sample_len = sample_len
+ self._n_samples = n_samples
+ self._vocabulary_size = vocabulary_size
+ self._random_func = random_func
+
+ # Generate samples
+ self._samples = self._generate()
+ # end __init__
+
+ # region PRIVATE
+
+ # Generate samples
+ def _generate(self):
+ """
+ Generate samples
+ :return: Generated samples
+ """
+ samples = list()
+ for sample_i in range(self._n_samples):
+ samples.append(torch.randint(low=0, high=10, size=(self._sample_len, 1)))
+ # end for
+ return samples
+ # endregion PRIVATE
+
+ # region OVERRIDE
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self._n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self._samples[idx]
+ # end __getitem__
+
+ # endregion OVERRIDE
+
+# end PeriodicSignalDataset
diff --git a/echotorch/datasetspy2/RepeatTaskDataset.py b/echotorch/datasetspy2/RepeatTaskDataset.py
new file mode 100644
index 0000000..ef5a924
--- /dev/null
+++ b/echotorch/datasetspy2/RepeatTaskDataset.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+#
+# File : echotorch/datasets/RepeatTaskDataset.py
+# Description : Dataset for the repeat task (Graves et al, 2016)
+# Date : 16th of July, 2020
+#
+# This file is part of EchoTorch. EchoTorch is free software: you can
+# redistribute it and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation, version 2.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Copyright Nils Schaetti
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+
+
+# Repeat task dataset
+class RepeatTaskDataset(Dataset):
+ """
+ Repeat task dataset
+ """
+
+ # Constructor
+ def __init__(self, n_samples, length_min, length_max, n_inputs, max_repeat, min_repeat=1, dtype=torch.float32):
+ """
+ Constructor
+ :param sample_len: Sample's length
+ :param period:
+ """
+ # Properties
+ self.length_min = length_min
+ self.length_max = length_max
+ self.n_samples = n_samples
+ self.n_inputs = n_inputs
+ self.max_repeat = max_repeat
+ self.min_repeat = min_repeat
+ self.dtype = dtype
+
+ # Generate data set
+ self.samples = self._generate()
+ # end __init__
+
+ #region OVERRIDE
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.samples[idx]
+ # end __getitem__
+
+ #endregion OVERRIDE
+
+ #region PRIVATE
+
+ # Generate
+ def _generate(self):
+ """
+ Generate dataset
+ :return:
+ """
+ # List of samples
+ samples = list()
+
+ # For each sample
+ for i in range(self.n_samples):
+ # Random number of repeat
+ num_repeats = torch.randint(low=self.min_repeat, high=self.max_repeat+1, size=(1,)).item()
+
+ # Generate length
+ sample_len = torch.randint(low=self.length_min, high=self.length_max, size=(1,)).item()
+
+ # Total length
+ total_length = sample_len + 1 + (sample_len * num_repeats)
+
+ # Create empty inputs and output
+ sample_inputs = torch.zeros((total_length, self.n_inputs + 1), dtype=self.dtype)
+ sample_outputs = torch.zeros((total_length, self.n_inputs + 1), dtype=self.dtype)
+
+ # Generate a random pattern
+ random_pattern = torch.randint(low=0, high=2, size=(sample_len, self.n_inputs))
+
+ # Set in inputs
+ sample_inputs[:sample_len, :self.n_inputs] = random_pattern
+ sample_inputs[sample_len, self.n_inputs] = 1.0
+
+ # Set each repeat
+ for p in range(num_repeats):
+ start_pos = sample_len + 1 + (sample_len * p)
+ sample_outputs[start_pos:(start_pos+sample_len), :self.n_inputs] = random_pattern
+ # end for
+
+ # Append
+ samples.append((sample_inputs, sample_outputs))
+ # end for
+
+ return samples
+ # end _generate
+
+ #endregion PRIVATE
+
+# end RepeatTaskDataset
diff --git a/echotorch/datasetspy2/RosslerAttractor.py b/echotorch/datasetspy2/RosslerAttractor.py
new file mode 100644
index 0000000..bf8a9c1
--- /dev/null
+++ b/echotorch/datasetspy2/RosslerAttractor.py
@@ -0,0 +1,165 @@
+# -*- coding: utf-8 -*-
+#
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+import numpy as np
+
+
+# Rossler Attractor
+class RosslerAttractor(Dataset):
+ """
+ The Rössler attractor is the attractor for the Rössler system, a system of three non-linear ordinary differential
+ equations originally studied by Otto Rössler. These differential equations define a continuous-time dynamical
+ system that exhibits chaotic dynamics associated with the fractal properties of the attractor.
+ """
+
+ # Constructor
+ def __init__(self, sample_len, n_samples, xyz, a, b, c, dt=0.01, washout=0, normalize=False, seed=None):
+ """
+ Constructor
+ :param sample_len: Length of the time-series in time steps.
+ :param n_samples: Number of samples to generate.
+ :param a:
+ :param b:
+ :param c:
+ """
+ # Properties
+ self.sample_len = sample_len
+ self.n_samples = n_samples
+ self.a = a
+ self.b = b
+ self.c = c
+ self.dt = dt
+ self.normalize = normalize
+ self.washout = washout
+ self.xyz = xyz
+
+ # Seed
+ if seed is not None:
+ np.random.seed(seed)
+ # end if
+
+ # Generate data set
+ self.outputs = self._generate()
+ # end __init__
+
+ #############################################
+ # OVERRIDE
+ #############################################
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.outputs[idx]
+ # end __getitem__
+
+ ##############################################
+ # PUBLIC
+ ##############################################
+
+ # Regenerate
+ def regenerate(self):
+ """
+ Regenerate
+ :return:
+ """
+ # Generate data set
+ self.outputs = self._generate()
+ # end regenerate
+
+ ##############################################
+ # PRIVATE
+ ##############################################
+
+ # Rossler
+ def _rossler(self, x, y, z):
+ """
+ Lorenz
+ :param x:
+ :param y:
+ :param z:
+ :return:
+ """
+ x_dot = -(y + z)
+ y_dot = x + self.a * y
+ z_dot = self.b + x * z - self.c * z
+ return x_dot, y_dot, z_dot
+ # end _lorenz
+
+ # Generate
+ def _generate(self):
+ """
+ Generate dataset
+ :return:
+ """
+ # Sizes
+ total_size = self.sample_len
+
+ # List of samples
+ samples = list()
+
+ # XYZ
+ xyz = self.xyz
+
+ # Washout
+ for t in range(self.washout):
+ # Derivatives of the X, Y, Z state
+ x_dot, y_dot, z_dot = self._rossler(xyz[0], xyz[1], xyz[2])
+
+ # Apply changes
+ xyz[0] += self.dt * x_dot
+ xyz[1] += self.dt * y_dot
+ xyz[2] += self.dt * z_dot
+ # end for
+
+ # For each sample
+ for i in range(self.n_samples):
+ # Tensor
+ sample = torch.zeros(total_size, 3)
+
+ # Time steps
+ for t in range(1, self.sample_len):
+ # Derivatives of the X, Y, Z state
+ x_dot, y_dot, z_dot = self._rossler(xyz[0], xyz[1], xyz[2])
+
+ # Apply changes
+ xyz[0] += self.dt * x_dot
+ xyz[1] += self.dt * y_dot
+ xyz[2] += self.dt * z_dot
+
+ # Set
+ sample[t, 0] = xyz[0]
+ sample[t, 1] = xyz[1]
+ sample[t, 2] = xyz[2]
+ # end for
+
+ # Normalize
+ if self.normalize:
+ maxval = torch.max(sample, dim=0)
+ minval = torch.min(sample, dim=0)
+ sample = torch.mm(torch.inv(torch.diag(maxval - minval)), (sample - minval.repeat(total_size, 1)))
+ # end if
+
+ # Append
+ samples.append(sample)
+ # end for
+
+ return samples
+ # end _generate
+
+# end RosslerAttractor
diff --git a/echotorch/datasetspy2/SinusoidalTimeseries.py b/echotorch/datasetspy2/SinusoidalTimeseries.py
new file mode 100644
index 0000000..31915d4
--- /dev/null
+++ b/echotorch/datasetspy2/SinusoidalTimeseries.py
@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -*-
+#
+# File : echotorch/datasets/SinusoidalTimeseries.py
+# Description : Create a dataset from sine patterns.
+# Date : 5th of September, 2020
+#
+# This file is part of EchoTorch. EchoTorch is free software: you can
+# redistribute it and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation, version 2.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Copyright Nils Schaetti
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+import math
+import numpy as np
+
+
+# Sinusoidal Timeseries
+class SinusoidalTimeseries(Dataset):
+ """
+ Sinusoidal timeseries
+ """
+
+ # Constructor
+ def __init__(self, sample_len, n_samples, period, a=1.0, m=0.0, start=1, dtype=torch.float64):
+ """
+ Constructor
+ :param sample_len: Length of the time-series in time steps.
+ :param n_samples: Number of samples to generate.
+ :param a:
+ :param b:
+ :param c:
+ """
+ # Properties
+ self.sample_len = sample_len
+ self.n_samples = n_samples
+ self.dtype = dtype
+ self.func = lambda n: a * math.sin(2.0 * math.pi * (n + start) / period) + m
+
+ # Generate data set
+ self.outputs = self._generate()
+ # end __init__
+
+ #region OVERRIDE
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.outputs[idx]
+ # end __getitem__
+
+ #endregion OVERRIDE
+
+ #region PUBLIC
+
+ # Regenerate
+ def regenerate(self):
+ """
+ Regenerate
+ :return:
+ """
+ # Generate data set
+ self.outputs = self._generate()
+ # end regenerate
+
+ #endregion PUBLIC
+
+ #region PRIVATE
+
+ # Random initial points
+ def random_initial_points(self):
+ """
+ Random initial points
+ :return:
+ """
+ # Set
+ return np.random.random() * (math.pi * 2.0)
+ # end random_initial_points
+
+ # Generate
+ def _generate(self):
+ """
+ Generate dataset
+ :return:
+ """
+ # List of samples
+ samples = list()
+
+ # For each sample
+ for i in range(self.n_samples):
+ # Tensor
+ sample = torch.zeros(self.sample_len, 1, dtype=self.dtype)
+
+ # Time steps
+ for t in range(self.sample_len):
+ sample[t, 0] = self.func(i * self.sample_len + t)
+ # end for
+
+ # Append
+ samples.append(sample)
+ # end for
+
+ return samples
+ # end _generate
+
+ #endregion PRIVATE
+
+# end SinusoidalTimeseries
diff --git a/echotorch/datasetspy2/SwitchAttractorDataset.py b/echotorch/datasetspy2/SwitchAttractorDataset.py
new file mode 100644
index 0000000..e37b72e
--- /dev/null
+++ b/echotorch/datasetspy2/SwitchAttractorDataset.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+#
+
+# Imports
+import torch
+from torch.utils.data.dataset import Dataset
+import numpy as np
+
+
+# Switch attractor dataset
+class SwitchAttractorDataset(Dataset):
+ """
+ Generate a dataset where the reservoir must switch
+ between two attractors.
+ """
+
+ # Constructor
+ def __init__(self, sample_len, n_samples, seed=None):
+ """
+ Constructor
+ :param sample_len: Length of the time-series in time steps.
+ :param n_samples: Number of samples to generate.
+ :param system_order: th order NARMA
+ :param seed: Seed of random number generator.
+ """
+ # Properties
+ self.sample_len = sample_len
+ self.n_samples = n_samples
+
+ # Init seed if needed
+ if seed is not None:
+ torch.manual_seed(seed)
+ # end if
+
+ # Generate data set
+ self.inputs, self.outputs = self._generate()
+ # end __init__
+
+ #############################################
+ # OVERRIDE
+ #############################################
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return:
+ """
+ return self.n_samples
+ # end __len__
+
+ # Get item
+ def __getitem__(self, idx):
+ """
+ Get item
+ :param idx:
+ :return:
+ """
+ return self.inputs[idx], self.outputs[idx]
+ # end __getitem__
+
+ ##############################################
+ # PRIVATE
+ ##############################################
+
+ # Generate
+ def _generate(self):
+ """
+ Generate dataset
+ :return:
+ """
+ inputs = list()
+ outputs = list()
+
+ # Generate each sample
+ for i in range(self.n_samples):
+ # Start end stop
+ start = np.random.randint(0, self.sample_len)
+ stop = np.random.randint(start, start + self.sample_len / 2)
+
+ # Limits
+ if stop >= self.sample_len:
+ stop = self.sample_len - 1
+ # end if
+
+ # Sample tensor
+ inp = torch.zeros(self.sample_len, 1)
+ out = torch.zeros(self.sample_len)
+
+ # Set inputs
+ inp[start, 0] = 1.0
+ inp[stop] = 1.0
+
+ # Set outputs
+ out[start:stop] = 1.0
+
+ # Add
+ inputs.append(inp)
+ outputs.append(out)
+ # end for
+
+ return inputs, outputs
+ # end _generate
+
+# end SwitchAttractorDataset
diff --git a/echotorch/datasetspy2/TimeseriesBatchSequencesDataset.py b/echotorch/datasetspy2/TimeseriesBatchSequencesDataset.py
new file mode 100644
index 0000000..e980538
--- /dev/null
+++ b/echotorch/datasetspy2/TimeseriesBatchSequencesDataset.py
@@ -0,0 +1,185 @@
+# -*- coding: utf-8 -*-
+#
+# File : datasets/TimeseriesBatchCutting.py
+# Description : Take a dataset of timeseries and cut all of them by window size and compose batches
+# Date : 20th of July, 2020
+#
+# This file is part of EchoTorch. EchoTorch is free software: you can
+# redistribute it and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation, version 2.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Copyright Nils Schaetti
+
+
+# Imports
+import math
+import torch
+from torch.utils.data import Dataset
+
+
+# Timeseries batch cutting
+class TimeseriesBatchSequencesDataset(Dataset):
+ """
+ Take a dataset of timeseries and cut all of them by window size and compose batches
+ """
+
+ # Constructor
+ def __init__(self, root_dataset, window_size, data_indices, stride, time_axis=0, dataset_in_memory=False,
+ *args, **kwargs):
+ """
+ Constructor
+ :param root_dataset: Root dataset
+ :param window_size: Sequence size in the timeseries
+ :param data_indices: Which output of dataset is a timeseries tensor
+ :param stride: Stride
+ :param time_axis: Which axis is the temporal dimension in the output tensor
+ """
+ # Call upper class
+ super(TimeseriesBatchSequencesDataset, self).__init__(*args, **kwargs)
+
+ # Parameters
+ self.root_dataset = root_dataset
+ self.window_size = window_size
+ self.data_indices = data_indices
+ self.stride = stride
+ self.time_axis = time_axis
+ self.dataset_in_memory = dataset_in_memory
+
+ # Dataset information
+ self.timeseries_lengths = list()
+ self.timeseries_total_length = 0
+ self.root_dataset_n_samples = 0
+ self.timeseries_sequences_info = list()
+ self.n_samples = 0
+ self.dataset_samples = list()
+
+ # Load dataset
+ self._load_dataset()
+ # end __init__
+
+ #region PRIVATE
+
+ # Load dataset
+ def _load_dataset(self):
+ """
+ Load dataset
+ :return:
+ """
+ # Item position
+ item_position = 0
+
+ # Load root dataset
+ for item_i in range(len(self.root_dataset)):
+ # Get data
+ data = self.root_dataset[item_i]
+
+ # Get the first timeserie returned by the dataset
+ timeserie_data = data[self.data_indices[0]] if self.data_indices is not None else data
+
+ # Length of timeseries in number of samples (sequences)
+ timeserie_length = timeserie_data.size(self.time_axis)
+ # timeserie_seq_length = int(math.floor(timeserie_length / self.window_size))
+ timeserie_seq_length = int(math.floor((timeserie_length - self.window_size) / self.stride) + 1)
+
+ # Save length and total length
+ self.timeseries_lengths.append(timeserie_length)
+ self.timeseries_total_length += timeserie_length
+ self.timeseries_sequences_info.append({'start': item_position, 'end': item_position + timeserie_seq_length})
+
+ # Keep in memory if asked for
+ if self.dataset_in_memory:
+ self.dataset_samples.append(data)
+ # end if
+
+ # Increment item position
+ item_position += timeserie_seq_length
+ # end for
+
+ # Total number of samples
+ self.n_samples = item_position
+ # end _load_dataset
+
+ #endregion PRIVATE
+
+ #region OVERRIDE
+
+ # Get a sample in the dataset
+ def __getitem__(self, item):
+ """
+ Get a sample in the dataset
+ :param item: Item index (start 0)
+ :return: Dataset sample
+ """
+ # Go through each samples in the root dataset
+ for item_i in range(len(self.root_dataset)):
+ # Timeserie info
+ ts_start_end = self.timeseries_sequences_info[item_i]
+
+ # The item is in this sample
+ if ts_start_end['start'] <= item < ts_start_end['end']:
+ # Get the corresponding timeseries
+ if self.dataset_in_memory:
+ data = list(self.dataset_samples[item_i]) if self.data_indices is not None else self.dataset_samples[item_i]
+ else:
+ data = list(self.root_dataset[item_i]) if self.data_indices is not None else self.root_dataset[item_i]
+ # end if
+
+ # Sequence start and end
+ # sequence_start = (item - ts_start_end['start']) * self.window_size
+ sequence_start = (item - ts_start_end['start']) * self.stride
+ sequence_end = sequence_start + self.window_size
+ sequence_range = list(range(sequence_start, sequence_end))
+
+ # For each data to transform
+ if self.data_indices is not None:
+ for data_i in self.data_indices:
+ # Get timeserie
+ timeserie_data = data[data_i]
+
+ # Get sequence according to time axis
+ data[data_i] = torch.index_select(timeserie_data, self.time_axis, torch.tensor(sequence_range))
+ # end for
+ else:
+ # Get sequence according to time axis
+ data = torch.index_select(data, self.time_axis, torch.tensor(sequence_range))
+ # end if
+
+ # Return modified data
+ return data
+ # end if
+ # end for
+ # end __getitem__
+
+ # To string
+ def __str__(self):
+ """
+ To string
+ :return: String version of the object
+ """
+ str_object = "Dataset TimeseriesBatchSequencesDataset\n"
+ str_object += "\tWindow size : {}\n".format(self.window_size)
+ return str_object
+ # end __str__
+
+ # Length
+ def __len__(self):
+ """
+ Length
+ :return: How many samples
+ """
+ return self.n_samples
+ # end __len__
+
+ #endregion OVERRIDE
+
+# end TimeseriesBatchSequencesDataset
+
diff --git a/echotorch/datasetspy2/TransformDataset.py b/echotorch/datasetspy2/TransformDataset.py
new file mode 100644
index 0000000..a383962
--- /dev/null
+++ b/echotorch/datasetspy2/TransformDataset.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+#
+# File : echotorch/datasets/TransformDataset.py
+# Description : Apply a transformation to a dataset.
+# Date : 21th of July, 2020
+#
+# This file is part of EchoTorch. EchoTorch is free software: you can
+# redistribute it and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation, version 2.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Copyright Nils Schaetti ,
+
+# Imports
+import math
+import torch
+from torch.utils.data import Dataset
+
+
+# Transform dataset
+class TransformDataset(Dataset):
+ """
+ Apply a transformation to a dataset.
+ """
+
+ # Constructor
+ def __init__(self, root_dataset, transform, transform_indices=None, transform_target=None,
+ transform_target_indices=None, *args, **kwargs):
+ """
+ Constructor
+ :param root_dataset: The dataset to transform.
+ :param transform: A Transformer object applied to the timeseries.
+ :param transform_indices: The indices to select which data returned by the dataset to apply the
+ transformation to. Or None if applied directly.
+ :param transform_target: A Transformer object applied to the target timeseries.
+ :param transform_target_indices: The indices to select which data returned by the dataset to apply the
+ target transformation to. Or None if applied directly.
+ """
+ # Call upper class
+ super(TransformDataset, self).__init__(*args, **kwargs)
+
+ # Properties
+ self._root_dataset = root_dataset
+ self._transform = transform
+ self._transform_indices = transform_indices
+ self._transform_target = transform_target
+ self._transform_target_indices = transform_target_indices
+ # end __init__
+
+ #region OVERRIDE
+
+ # Length of the dataset
+ def __len__(self):
+ """
+ Length of the dataset
+ :return: Length of the dataset
+ """
+ return len(self._root_dataset)
+ # end __len__
+
+ # Get item
+ def __getitem__(self, item):
+ """
+ Get item
+ :param item: Index
+ :return:
+ """
+ # Get data from the item
+ item_data = self._root_dataset[item]
+
+ # Transform each inputs
+ if self._transform is not None:
+ if self._transform_indices is not None:
+ for data_i in self._transform_indices:
+ item_data[data_i] = self._transform(item_data[data_i])
+ # end for
+ else:
+ item_data = self._transform(item_data)
+ # end if
+ # end if
+
+ # Transform each outputs
+ if self._transform_target is not None:
+ if self._transform_target_indices is not None:
+ for data_i in self._transform_target_indices:
+ item_data[data_i] = self._transform_target(item_data[data_i])
+ # end for
+ else:
+ item_data = self._transform_target(item_data)
+ # end if
+ # end if
+
+ return item_data
+ # end __getitem__
+
+ #endregion OVERRIDE
+
+# end TransformDataset
diff --git a/echotorch/datasetspy2/TripletBatching.py b/echotorch/datasetspy2/TripletBatching.py
new file mode 100644
index 0000000..0bc7580
--- /dev/null
+++ b/echotorch/datasetspy2/TripletBatching.py
@@ -0,0 +1,158 @@
+# -*- coding: utf-8 -*-
+#
+# File : datasets/TripletBatching.py
+# Description : Take a dataset with different classes and create a dataset of triplets with an anchor (A) and positive
+# example (same class) and a negative one (different class).
+# Date : 21th of July, 2020
+#
+# This file is part of EchoTorch. EchoTorch is free software: you can
+# redistribute it and/or modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation, version 2.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Copyright Nils Schaetti ,
+
+
+# Imports
+import random
+from torch.utils.data import Dataset
+
+
+# Triplet batching
+class TripletBatching(Dataset):
+ """
+ Take a dataset with different classes and create a dataset of triplets with an anchor (A) and positive
+ example (same class) and a negative one (different class).
+ """
+
+ # Constructor
+ def __init__(self, root_dataset, data_index, target_index, target_count, n_samples,
+ target_type='int', *args, **kwargs):
+ """
+ Constructor
+ :param root_dataset: The main dataset
+ """
+ # Call upper class
+ super(TripletBatching, self).__init__(*args, **kwargs)
+
+ # Properties
+ self._root_dataset = root_dataset
+ self._data_index = data_index
+ self._target_index = target_index
+ self._target_count = target_count
+ self._n_samples = n_samples
+ self._target_type = target_type
+
+ # Item indices for each target classes
+ self._targets_indices = {}
+ self._targets_indices_len = {}
+ for t_i in range(self._target_count):
+ self._targets_indices[t_i] = list()
+ self._targets_indices_len[t_i] = 0
+ # end for
+
+ # List of target classes
+ self._target_classes = list()
+
+ # Analyse the dataset
+ self._analyse_dataset()
+ # end __init__
+
+ #region PRIVATE
+
+ # Analyze the root dataset to determine the total number of samples
+ def _analyse_dataset(self):
+ """
+ Analyse the root dataset
+ """
+ # For each samples
+ for data_i in range(len(self._root_dataset)):
+ # Data
+ data = self._root_dataset[data_i]
+
+ # Get target class
+ target_class = data[self._target_index]
+
+ # Transform to key
+ if self._target_type == 'tensor':
+ target_class = target_class.item()
+ # end if
+
+ # Save index
+ self._targets_indices[target_class].append(data_i)
+ self._targets_indices_len[target_class] += 1
+
+ # Add to list of target classes
+ # As we have at least on example
+ if target_class not in self._target_classes:
+ self._target_classes.append(target_class)
+ # end if
+ # end for
+ # end _analyse_dataset
+
+ #endregion PRIVATE
+
+ #region OVERRIDE
+
+ # Length of the dataset
+ def __len__(self):
+ """
+ Length of the dataset
+ :return: How many samples
+ """
+ return self._n_samples
+ # end __len__
+
+ # Get a sample in the dataset
+ def __getitem__(self, item):
+ """
+ Get a sample in the dataset
+ :param item: Item index (start 0)
+ :return: Dataset sample
+ """
+ # Number of classes
+ classes_count = len(self._target_classes)
+
+ # Choose a random anchor class
+ anchor_class = self._target_classes[random.randrange(classes_count)]
+
+ # Indices of anchor class
+ anchor_class_indices = self._targets_indices[anchor_class]
+ anchor_class_indices_count = len(anchor_class_indices)
+
+ # Choose a random anchor
+ anchor_index = anchor_class_indices[random.randrange(anchor_class_indices_count)]
+ anchor_sample = self._root_dataset[anchor_index]
+
+ # Choose a random positive example
+ anchor_class_indices.remove(anchor_index)
+ positive_index = anchor_class_indices[random.randrange(anchor_class_indices_count-1)]
+ positive_sample = self._root_dataset[positive_index]
+
+ # Choose a random negative class
+ targets_classes = self._target_classes.copy()
+ targets_classes.remove(anchor_class)
+ negative_class = targets_classes[random.randrange(classes_count-1)]
+
+ # Indices of negative class
+ negative_class_indices = self._targets_indices[negative_class]
+ negative_class_indices_count = len(negative_class_indices)
+
+ # Choose a random negative example
+ negative_index = negative_class_indices[random.randrange(negative_class_indices_count)]
+ negative_sample = self._root_dataset[negative_index]
+
+ return anchor_sample, positive_sample, negative_sample
+ # end __getitem__
+
+ #endregion OVERRIDE
+
+# end TripletBatching
diff --git a/echotorch/datasetspy2/__init__.py b/echotorch/datasetspy2/__init__.py
new file mode 100644
index 0000000..823b095
--- /dev/null
+++ b/echotorch/datasetspy2/__init__.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+#
+
+# Imports
+from .CopyTaskDataset import CopyTaskDataset
+from .DatasetComposer import DatasetComposer
+from .DiscreteMarkovChainDataset import DiscreteMarkovChainDataset
+from .FromCSVDataset import FromCSVDataset
+from .HenonAttractor import HenonAttractor
+from .ImageToTimeseries import ImageToTimeseries
+from .LambdaDataset import LambdaDataset
+from .LatchTaskDataset import LatchTaskDataset
+from .LogisticMapDataset import LogisticMapDataset
+from .LorenzAttractor import LorenzAttractor
+from .MackeyGlassDataset import MackeyGlassDataset
+from .MarkovChainDataset import MarkovChainDataset
+from .MemTestDataset import MemTestDataset
+from .MixedSinesDataset import MixedSinesDataset
+from .NARMADataset import NARMADataset
+from .RosslerAttractor import RosslerAttractor
+from .SinusoidalTimeseries import SinusoidalTimeseries
+from .PeriodicSignalDataset import PeriodicSignalDataset
+from .RandomSymbolDataset import RandomSymbolDataset
+from .RepeatTaskDataset import RepeatTaskDataset
+from .TimeseriesBatchSequencesDataset import TimeseriesBatchSequencesDataset
+from .TransformDataset import TransformDataset
+from .TripletBatching import TripletBatching
+
+__all__ = [
+ 'CopyTaskDataset', 'DatasetComposer', 'DiscreteMarkovChainDataset', 'FromCSVDataset', 'HenonAttractor',
+ 'LambdaDataset', 'LatchTaskDataset', 'LogisticMapDataset', 'LorenzAttractor', 'MackeyGlassDataset', 'MemTestDataset',
+ 'NARMADataset', 'RosslerAttractor', 'SinusoidalTimeseries', 'PeriodicSignalDataset', 'RandomSymbolDataset',
+ 'ImageToTimeseries', 'MarkovChainDataset', 'MixedSinesDataset', 'RepeatTaskDataset',
+ 'TimeseriesBatchSequencesDataset', 'TransformDataset', 'TripletBatching'
+]
diff --git a/echotorch/utils/evaluation/CrossValidationWithDev.py b/echotorch/evaluation/CrossValidationWithDev.py
similarity index 65%
rename from echotorch/utils/evaluation/CrossValidationWithDev.py
rename to echotorch/evaluation/CrossValidationWithDev.py
index 687c0ee..9bdb616 100644
--- a/echotorch/utils/evaluation/CrossValidationWithDev.py
+++ b/echotorch/evaluation/CrossValidationWithDev.py
@@ -25,12 +25,9 @@
from torch.utils.data.dataset import Dataset
import numpy as np
-# Imports local
-from echotorch.datasets import EchoDataset
-
# Do a k-fold cross validation with a dev set on a data set
-class CrossValidationWithDev(EchoDataset):
+class CrossValidationWithDev(Dataset):
"""
Do K-fold cross validation with a dev set on a data set
"""
@@ -161,27 +158,6 @@ def _create_folds(self, k, samples_indices=None):
#region OVERRIDE
- # # Dataset size
- # def __len__(self):
- # """
- # Dataset size
- # :return:
- # """
- # # Test length
- # dev_test_length = self.fold_sizes[self.fold]
- # train_length = len(self.root_dataset) - dev_test_length
- # dev_length = int(dev_test_length * self.dev_ratio)
- # test_length = dev_test_length - dev_length
- #
- # if self.mode == 'train':
- # return int(train_length * self.train_size)
- # elif self.mode == 'dev':
- # return dev_length
- # else:
- # return test_length
- # # end if
- # # end __len__
-
# Dataset size
def __len__(self):
"""
@@ -189,12 +165,11 @@ def __len__(self):
:return:
"""
# Test length
- test_length = self.fold_sizes[self.fold]
- dev_train_length = len(self.root_dataset) - test_length
- dev_length = int(math.ceil(dev_train_length * self.dev_ratio))
- train_length = dev_train_length - dev_length
+ dev_test_length = self.fold_sizes[self.fold]
+ train_length = len(self.root_dataset) - dev_test_length
+ dev_length = int(dev_test_length * self.dev_ratio)
+ test_length = dev_test_length - dev_length
- # According to set
if self.mode == 'train':
return int(train_length * self.train_size)
elif self.mode == 'dev':
@@ -212,18 +187,21 @@ def __getitem__(self, item):
:return:
"""
# Get target set
- test_set = self.folds[self.fold]
+ dev_test_set = self.folds[self.fold]
indexes_copy = self.indexes.copy()
- train_dev_set = np.setdiff1d(indexes_copy, test_set)
- dev_train_length = len(self.root_dataset) - len(test_set)
- dev_length = int(math.ceil(dev_train_length * self.dev_ratio))
- train_length = int(math.floor((dev_train_length - dev_length) * self.train_size))
+ train_set = np.setdiff1d(indexes_copy, dev_test_set)
+ train_length = len(self.root_dataset) - len(dev_test_set)
+ train_length = int(train_length * self.train_size)
+ train_set = train_set[:train_length]
- # Train / dev sets
- train_set = train_dev_set[dev_length:dev_length + train_length]
- dev_set = train_dev_set[:dev_length]
+ # Dev/test length
+ dev_length = int(len(dev_test_set) * self.dev_ratio)
- # Train/dev/test
+ # Dev/test sets
+ dev_set = dev_test_set[:dev_length]
+ test_set = dev_test_set[dev_length:]
+
+ # Train/test
if self.mode == 'train':
return self.root_dataset[train_set[item]]
elif self.mode == 'dev':
@@ -233,38 +211,6 @@ def __getitem__(self, item):
# end if
# end __getitem__
- # # Get item
- # def __getitem__(self, item):
- # """
- # Get item
- # :param item:
- # :return:
- # """
- # # Get target set
- # dev_test_set = self.folds[self.fold]
- # indexes_copy = self.indexes.copy()
- # train_set = np.setdiff1d(indexes_copy, dev_test_set)
- # train_length = len(self.root_dataset) - len(dev_test_set)
- # train_length = int(train_length * self.train_size)
- # train_set = train_set[:train_length]
- #
- # # Dev/test length
- # dev_length = int(len(dev_test_set) * self.dev_ratio)
- #
- # # Dev/test sets
- # dev_set = dev_test_set[:dev_length]
- # test_set = dev_test_set[dev_length:]
- #
- # # Train/test
- # if self.mode == 'train':
- # return self.root_dataset[train_set[item]]
- # elif self.mode == 'dev':
- # return self.root_dataset[dev_set[item]]
- # else:
- # return self.root_dataset[test_set[item]]
- # # end if
- # # end __getitem__
-
#endregion OVERRIDE
# end CrossValidationWithDev
diff --git a/echotorch/utils/evaluation/__init__.py b/echotorch/evaluation/__init__.py
similarity index 100%
rename from echotorch/utils/evaluation/__init__.py
rename to echotorch/evaluation/__init__.py
diff --git a/echotorch/matrices.py b/echotorch/matrices.py
deleted file mode 100644
index 594b9dd..0000000
--- a/echotorch/matrices.py
+++ /dev/null
@@ -1,228 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/matrices.py
-# Description : EchoTorch matrix creation utility functions.
-# Date : 30th of March, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti, University of Neuchâtel ,
-# University of Geneva
-
-# Imports
-import torch
-import echotorch.utils.matrix_generation as etmg
-from echotorch.utils.matrix_generation import MatrixGenerator
-
-
-# Cycle matrix with jumps generator
-def cycle_with_jumps_generator(
- connectivity: float = 1.0, spectra_radius: float = 1.0, apply_spectral_radius: bool = False, scale: float = 1.0,
- cycle_weight: float = 1.0, jump_weight: float = 1.0, jump_size: float = 2.0
-) -> MatrixGenerator:
- """
- Cycle matrix with jumps generator
- """
- return etmg.matrix_factory.get_generator(
- "cycle_with_jumps",
- connectivity=connectivity,
- spectra_radius=spectra_radius,
- apply_spectral_radius=apply_spectral_radius,
- scale=scale,
- cycle_weight=cycle_weight,
- jump_weight=jump_weight,
- jump_size=jump_size
- )
-# end cycle_with_jumps_generator
-
-
-# Generate cycle matrix with jumps (Rodan and Tino, 2012)
-def cycle_with_jumps(
- *size, connectivity: float = 1.0, spectra_radius: float = 1.0, apply_spectral_radius: bool = False,
- scale: float = 1.0, cycle_weight: float = 1.0, jump_weight: float = 1.0, jump_size: float = 2.0,
- dtype=None
-) -> torch.Tensor:
- """
- Generate cycle matrix with jumps (Rodan and Tino, 2012)
- """
- # Cycle with jumps generator
- matrix_generator = cycle_with_jumps_generator(
- connectivity=connectivity,
- spectra_radius=spectra_radius,
- apply_spectral_radius=apply_spectral_radius,
- scale=scale,
- cycle_weight=cycle_weight,
- jump_weight=jump_weight,
- jump_size=jump_size
- )
-
- # Generate matrix
- return matrix_generator.generate(size=size, dtype=dtype)
-# end cycle_with_jumps
-
-
-# Matlab loader generator
-def matlab_generator(
- file_path: str, entity_name: str, shape: tuple = None, spectral_radius: float = 1.0,
- apply_spectral_radius: bool = False, scale: float = 1.0
-) -> MatrixGenerator:
- """
- Matlab loader generator
- """
- return etmg.matrix_factory.get_generator(
- "matlab",
- file_path=file_path,
- entity_name=entity_name,
- shape=shape,
- spectral_radius=spectral_radius,
- scale=scale,
- apply_spectral_radius=apply_spectral_radius
- )
-# end matlab_generator
-
-
-# Load matrix from matlab file
-def matlab(
- file_path: str, entity_name: str, *size, spectral_radius: float = 1.0, apply_spectral_radius: bool = False,
- scale: float = 1.0, dtype=None
-) -> torch.Tensor:
- """
- Load matrix from matlab file
- """
- matrix_generator = matlab_generator(
- file_path=file_path,
- entity_name=entity_name,
- shape=size,
- spectral_radius=spectral_radius,
- scale=scale,
- apply_spectral_radius=apply_spectral_radius
- )
-
- # Generate matrix
- return matrix_generator.generate(size=size, dtype=dtype)
-# end matlab
-
-
-# Normal matrix generator
-def normal_generator(
- connectivity: float = 1.0, spectral_radius: float = 1.0, scale: float = 1.0, mean: float = 0.0,
- std: float = 1.0, minimum_edges: float = 0, apply_spectral_radius: bool = False
-) -> MatrixGenerator:
- """
- Create a generator to create normal matrices
- @param connectivity: Connectivity coefficient
- @param spectral_radius: Spectral radius
- @param scale: Scaling factor for the generated matrices
- @param mean: Mean parameter for the normal distribution
- @param std: Standard deviation parameter for the normal distribution
- @param minimum_edges: Minimum number of edge(s) present in the matrix
- @param apply_spectral_radius: True to apply the spectral radius rescaling, False otherwise
- @return: A MatrixGenerator to generate normal matrices
- """
- return etmg.matrix_factory.get_generator(
- "normal",
- connectivity=connectivity,
- spectral_radius=spectral_radius,
- scale=scale,
- apply_spectral_radius=apply_spectral_radius,
- mean=mean,
- std=std,
- minimum_edges=minimum_edges
- )
-# end normal_generator
-
-
-# Normal matrix generation
-def normal(*size, connectivity=1.0, spectral_radius=1.0, scale=1.0, mean=0.0, std=1.0, minimum_edges=0,
- apply_spectral_radius=False, dtype=None):
- """
- Generate a matrix from a normal distribution
- @param size: Size of the output matrix as a tuple
- @param connectivity:
- @param spectral_radius:
- @param scale:
- @param mean:
- @param std:
- @param minimum_edges:
- @param apply_spectral_radius:
- @param dtype:
- @return:
- """
- # Matrix generator
- matrix_generator = normal_generator(
- connectivity=connectivity,
- spectral_radius=spectral_radius,
- scale=scale,
- apply_spectral_radius=apply_spectral_radius,
- mean=mean,
- std=std,
- minimum_edges=minimum_edges
- )
-
- # Generate matrix
- return matrix_generator.generate(size=size, dtype=dtype)
-# end normal
-
-
-# Uniform matrix generator
-def uniform_generator(connectivity=1.0, spectral_radius=1.0, scale=1.0, input_set=[1.0, -1.0], minimum_edges=0,
- min=-1.0, max=1.0, apply_spectral_radius=False):
- """
- Uniform matrix generator
- """
- return etmg.matrix_factory.get_generator(
- "uniform",
- connectivity=connectivity,
- spectral_radius=spectral_radius,
- scale=scale,
- input_set=input_set,
- minimum_edges=minimum_edges,
- min=min,
- max=max,
- apply_spectral_radius=apply_spectral_radius
- )
-# end uniform_generator
-
-
-# Uniform matrix generation
-def uniform(*size, connectivity=1.0, spectral_radius=1.0, scale=1.0, input_set=[1.0, -1.0], minimum_edges=0,
- min=-1.0, max=1.0, apply_spectral_radius=False, dtype=None):
- """
- Uniform matrix generation
- :param connectivity:
- :param spectral_radius:
- :param scale:
- :param input_set:
- :param minimum_edges:
- :param min:
- :param max:
- :param apply_spectral_radius:
- :param dtype:
- """
- # Matrix generator
- matrix_generator = uniform_generator(
- connectivity=connectivity,
- spectral_radius=spectral_radius,
- scale=scale,
- input_set=input_set,
- minimum_edges=minimum_edges,
- min=min,
- max=max,
- apply_spectral_radius=apply_spectral_radius
- )
-
- # Generate matrix
- return matrix_generator.generate(size=size, dtype=dtype)
-# end uniform
-
diff --git a/echotorch/nn/advanced/HNilsNet.py b/echotorch/models/HNilsNet.py
similarity index 100%
rename from echotorch/nn/advanced/HNilsNet.py
rename to echotorch/models/HNilsNet.py
diff --git a/echotorch/nn/advanced/NilsNet.py b/echotorch/models/NilsNet.py
similarity index 100%
rename from echotorch/nn/advanced/NilsNet.py
rename to echotorch/models/NilsNet.py
diff --git a/echotorch/nn/advanced/TNilsNet.py b/echotorch/models/TNilsNet.py
similarity index 100%
rename from echotorch/nn/advanced/TNilsNet.py
rename to echotorch/models/TNilsNet.py
diff --git a/echotorch/models/__init__.py b/echotorch/models/__init__.py
index bddf9e1..83464dd 100644
--- a/echotorch/models/__init__.py
+++ b/echotorch/models/__init__.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# File : echotorch/nn/ESN.py
-# Description : An Echo State Network module.
-# Date : 26th of January, 2018
+# File : echotorch/models/__init__.py
+# Description : Models init.
+# Date : 09th of April, 2018
#
# This file is part of EchoTorch. EchoTorch is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
@@ -20,6 +20,5 @@
# Copyright Nils Schaetti, University of Neuchâtel
# Imports
-from . import conceptors
-from . import reservoir
-
+from .HNilsNet import HNilsNet
+from .NilsNet import NilsNet
diff --git a/echotorch/models/conceptors/__init__.py b/echotorch/models/conceptors/__init__.py
deleted file mode 100644
index 835051f..0000000
--- a/echotorch/models/conceptors/__init__.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/nn/ESN.py
-# Description : An Echo State Network module.
-# Date : 26th of January, 2018
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti, University of Neuchâtel
-
-
-# Imports
-from .IncSPESN import IncSPESN
-from .SPESN import SPESN
diff --git a/echotorch/models/reservoir/__init__.py b/echotorch/models/reservoir/__init__.py
deleted file mode 100644
index f55927f..0000000
--- a/echotorch/models/reservoir/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/nn/ESN.py
-# Description : An Echo State Network module.
-# Date : 26th of January, 2018
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti, University of Neuchâtel
-
-# Imports
-from .BDESN import BDESN
-from .ESN import ESN
-from .LiESN import LiESN
-from .StackedESN import StackedESN
diff --git a/echotorch/modules.py b/echotorch/modules.py
deleted file mode 100644
index f280b71..0000000
--- a/echotorch/modules.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/modules.py
-# Description : Utility functions to create modules
-# Date : 5th of February, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-
-# Imports
-import torch
-import echotorch.nn as etnn
-import echotorch.utils.matrix_generation as etmm
-
-
-# Create an Echo State Network (ESN)
-def esn(input_dim, hidden_dim, output_dim, spectral_radius, leaky_rate, w_connectivity,
- win_connectivity, wbias_connectivity, input_scaling, bias_scaling, ridge_param,
- softmax_output=False, dtype=torch.float64):
- """
- Create an Echo State Network (ESN)
- """
- # Internal matrix generator
- w_generator = etmm.matrix_factory.get_generator(
- "uniform",
- connectivity=w_connectivity,
- spectral_radius=spectral_radius
- )
-
- # Input-to-reservoir generator
- win_generator = etmm.matrix_factory.get_generator(
- "uniform",
- connectivity=1.0 if hidden_dim < 100 else win_connectivity,
- apply_spectral_radius=False,
- scale=input_scaling
- )
-
- # Reservoir bias generator
- wbias_generator = etmm.matrix_factory.get_generator(
- "uniform",
- connectivity=wbias_connectivity,
- apply_spectral_radius=False,
- scale=bias_scaling
- )
-
- # Create the ESN
- return etnn.LiESN(
- input_dim=input_dim,
- hidden_dim=hidden_dim,
- output_dim=output_dim,
- leaky_rate=leaky_rate,
- w_generator=w_generator,
- win_generator=win_generator,
- wbias_generator=wbias_generator,
- input_scaling=input_scaling,
- ridge_param=ridge_param,
- softmax_output=softmax_output,
- dtype=dtype
- )
-# end esn
diff --git a/echotorch/nn/NeuralFilter.py b/echotorch/nn/NeuralFilter.py
index 2f3faaf..b0cb03c 100644
--- a/echotorch/nn/NeuralFilter.py
+++ b/echotorch/nn/NeuralFilter.py
@@ -19,7 +19,15 @@
#
# Copyright Nils Schaetti
-# Imports
+"""
+Created on 25 November 2019
+@author: Nils Schaetti
+"""
+
+import torch
+import torch.sparse
+import torch.nn as nn
+import numpy as np
from .Node import Node
diff --git a/echotorch/nn/Node.py b/echotorch/nn/Node.py
index 1df3e12..a62e2db 100644
--- a/echotorch/nn/Node.py
+++ b/echotorch/nn/Node.py
@@ -300,25 +300,25 @@ def _inverse(self, name, M, code_class, code_pos):
# Show condition number
if self._debug == Node.DEBUG_OUTPUT:
- print(
+ print((
"DEBUG - INFO : Condition number while inversing {} : {} (at {}:{})".format(
name,
condition_number,
code_class,
code_pos
)
- )
+ ))
# Bad condition number
if condition_number > 14:
- print(
+ print((
"DEBUG - WARNING : High condition number while inversing {} : {} (at {}:{})".format(
name,
condition_number,
code_class,
code_pos
)
- )
+ ))
# end if
# end if
return torch.inverse(M)
@@ -341,25 +341,25 @@ def _pinverse(self, name, M, code_class, code_pos):
# Show condition number
if self._debug == Node.DEBUG_OUTPUT:
- print(
+ print((
"DEBUG - INFO : Condition number while pseudo-inversing {} : {} (at {}:{})".format(
name,
condition_number,
code_class,
code_pos
)
- )
+ ))
# Bad condition number
if condition_number > 14:
- print(
+ print((
"DEBUG - WARNING : High condition number while pseudo-inversing {} : {} (at {}:{})".format(
name,
condition_number,
code_class,
code_pos
)
- )
+ ))
# end if
# end if
return torch.pinverse(M)
@@ -380,7 +380,7 @@ def _call_debug_point(self, name, value, code_class, code_pos):
fatal_type = Node.FAIL + "DEBUG - FATAL: {} are not of the same type! (module:{}, outside:{}) (at {}:{})" + Node.ENDC
# If debug point set
- if self._debug > Node.NO_DEBUG and name in self._debug_points.keys():
+ if self._debug > Node.NO_DEBUG and name in list(self._debug_points.keys()):
# Get value
value_from_module = value
value_from_outside, precision = self._debug_points[name]
@@ -394,7 +394,7 @@ def _call_debug_point(self, name, value, code_class, code_pos):
# In debug output, print difference
if self._debug == Node.DEBUG_OUTPUT:
- print(info_precision_scalar.format(name, abs_diff, code_class, code_pos))
+ print((info_precision_scalar.format(name, abs_diff, code_class, code_pos)))
# end if
# In debug test case, call test call for evaluation
@@ -402,7 +402,7 @@ def _call_debug_point(self, name, value, code_class, code_pos):
self._test_case.assertAlmostEqual(value_from_module, value_from_outside, precision)
# In debut test, test if precision is ok
elif abs_diff > precision:
- print(error_precision.format(name, value_from_module, value_from_outside, code_class, code_pos))
+ print((error_precision.format(name, value_from_module, value_from_outside, code_class, code_pos)))
# end if
# Matrix/Tensor
elif isinstance(value_from_module, torch.Tensor):
@@ -413,14 +413,14 @@ def _call_debug_point(self, name, value, code_class, code_pos):
# In debug output, print difference
if self._debug == Node.DEBUG_OUTPUT:
- print(info_precision_matrix.format(name, norm_diff, code_class, code_pos))
+ print((info_precision_matrix.format(name, norm_diff, code_class, code_pos)))
# end if
# In debug test case, call test case for evaluation
if self._debug == Node.DEBUG_TEST_CASE:
self._test_case.assertTensorAlmostEqual(value_from_module, value_from_outside, precision)
if norm_diff > precision:
- print(
+ print((
error_precision.format(
name,
norm_diff,
@@ -429,14 +429,14 @@ def _call_debug_point(self, name, value, code_class, code_pos):
code_class,
code_pos
)
- )
+ ))
# end if
else:
- print(fatal_size.format(name, value_from_module.size(), value_from_outside.size(), code_class, code_pos))
+ print((fatal_size.format(name, value_from_module.size(), value_from_outside.size(), code_class, code_pos)))
raise Exception()
# end if
else:
- print(fatal_type.format(name, type(value_from_module), type(value_from_outside), code_class, code_pos))
+ print((fatal_type.format(name, type(value_from_module), type(value_from_outside), code_class, code_pos)))
raise Exception()
# end if
# end if
diff --git a/echotorch/nn/__init__.py b/echotorch/nn/__init__.py
index 45b0bcc..71be68b 100644
--- a/echotorch/nn/__init__.py
+++ b/echotorch/nn/__init__.py
@@ -21,41 +21,42 @@
# Import basis
from .Node import Node
-from .NeuralFilter import NeuralFilter
# Import conceptor nodes
-# from .conceptors.Conceptor import Conceptor
-# from .conceptors.ConceptorNet import ConceptorNet
+from .conceptors.Conceptor import Conceptor
+from .conceptors.ConceptorNet import ConceptorNet
# Import feature transformation nodes
-# from .features.ICACell import ICACell
-# from .features.OnlinePCACell import OnlinePCACell
-# from .features.PCACell import PCACell
-# from .features.SFACell import SFACell
+from .features.ICACell import ICACell
+from .features.OnlinePCACell import OnlinePCACell
+from .features.PCACell import PCACell
+from .features.SFACell import SFACell
# Functional
-# from .functional.losses import CSTLoss
+from .functional.losses import CSTLoss
# Import reservoir nodes
-# from .reservoir.BDESN import BDESN
-# from .reservoir.BDESNCell import BDESNCell
-# from .reservoir.BDESNPCA import BDESNPCA
-# from .reservoir.EESN import EESN
-# from .reservoir.ESN import ESN
-# from .reservoir.ESNCell import ESNCell
-# from .reservoir.GatedESN import GatedESN
-# from .reservoir.HESN import HESN
-# from .reservoir.LiESN import LiESN
-# from .reservoir.LiESNCell import LiESNCell
-# from .reservoir.StackedESN import StackedESN
+from .reservoir.BDESN import BDESN
+from .reservoir.BDESNCell import BDESNCell
+from .reservoir.BDESNPCA import BDESNPCA
+from .reservoir.EESN import EESN
+from .reservoir.ESN import ESN
+from .reservoir.ESNCell import ESNCell
+from .reservoir.GatedESN import GatedESN
+from .reservoir.HESN import HESN
+from .reservoir.LiESN import LiESN
+from .reservoir.LiESNCell import LiESNCell
+from .reservoir.StackedESN import StackedESN
# Import linear nodes
-# from .linear.RRCell import RRCell
+from .linear.RRCell import RRCell
# Import utils nodes
-# from .utils.Identity import Identity
+from .utils.Identity import Identity
# All
__all__ = [
- 'Node', 'NeuralFilter'
+ 'Conceptor', 'ConceptorNet', 'ICACell', 'OnlinePCACell', 'PCACell', 'SFACell',
+ 'BDESN', 'BDESNPCA', 'EESN', 'ESN', 'ESNCell', 'GatedESN', 'HESN', 'LiESN', 'LiESNCell', 'Node', 'StackedESN',
+ 'RRCell', 'Identity', 'CSTLoss'
]
diff --git a/echotorch/nn/conceptors/Conceptor.py b/echotorch/nn/conceptors/Conceptor.py
index 9e8724a..4605809 100644
--- a/echotorch/nn/conceptors/Conceptor.py
+++ b/echotorch/nn/conceptors/Conceptor.py
@@ -25,9 +25,7 @@
"""
# Imports
-from __future__ import annotations
import torch
-from typing import Union, List
from torch.autograd import Variable
import math
from ..NeuralFilter import NeuralFilter
@@ -481,12 +479,7 @@ def NOT_(self):
# end NOT_
# Similarity
- def sim(
- self,
- other: Union[Conceptor, List[Conceptor]],
- based_on='C',
- sim_func=generalized_squared_cosine
- ) -> Union[float, torch.Tensor]:
+ def sim(self, other, based_on='C', sim_func=generalized_squared_cosine):
"""
Generalized Cosine Similarity
:param other: Second operand
@@ -494,15 +487,7 @@ def sim(
:param sim_func: Similarity function (default: generalized_squared_cosine)
:return: Similarity between self and other ([0, 1])
"""
- if isinstance(other, Conceptor):
- return Conceptor.similarity(self, other, based_on, sim_func)
- elif isinstance(other, list):
- sim_vector = torch.zeros(len(other))
- for other_i, other_c in enumerate(other):
- sim_vector[other_i] = Conceptor.similarity(self, other_c, based_on, sim_func)
- # end for
- return sim_vector
- # end if
+ return Conceptor.similarity(self, other, based_on, sim_func)
# end sim
# Delta measure (sensibility of Frobenius norm to change of aperture)
@@ -537,14 +522,6 @@ def copy(self):
return new_C
# end copy
- # Make a copy of the conceptor
- def clone(self):
- """
- Make a copy of the Conceptor
- """
- return self.copy()
- # end clone
-
# endregion PUBLIC
# region PRIVATE
@@ -838,10 +815,6 @@ def computeR(C, aperture, inv_algo=torch.inverse):
if torch.all(torch.eq(C, torch.eye(C_dim, dtype=C.dtype))):
return None
else:
- targetC = torch.eye(C_dim) - C
- # print("I - C: {}".format(targetC))
- U, S, V = torch.svd(targetC)
- # print("U of target: {}".format(U))
return math.pow(aperture, -2) * torch.mm(C, inv_algo(torch.eye(C_dim) - C))
# end if
# end R
diff --git a/echotorch/nn/conceptors/ConceptorNet.py b/echotorch/nn/conceptors/ConceptorNet.py
index 8f36518..b484d83 100644
--- a/echotorch/nn/conceptors/ConceptorNet.py
+++ b/echotorch/nn/conceptors/ConceptorNet.py
@@ -25,8 +25,9 @@
"""
# Imports
+import torch
from ..reservoir import ESN
-from echotorch.models.conceptors.SPESN import SPESN
+from .SPESN import SPESN
# Conceptor Network
diff --git a/echotorch/nn/conceptors/ConceptorSet.py b/echotorch/nn/conceptors/ConceptorSet.py
index 7c7d620..80ca9f6 100644
--- a/echotorch/nn/conceptors/ConceptorSet.py
+++ b/echotorch/nn/conceptors/ConceptorSet.py
@@ -20,9 +20,7 @@
# Copyright Nils Schaetti
# Imports
-from __future__ import annotations
import torch
-from typing import Union, List
from ..NeuralFilter import NeuralFilter
from .Conceptor import Conceptor
from echotorch.utils import quota, rank
@@ -127,7 +125,7 @@ def A(self, tol=1e-14):
A = Conceptor(input_dim=self._conceptor_dim, aperture=1, dtype=self._dtype)
# For each conceptor
- for kc, C in self._conceptors.items():
+ for kc, C in list(self._conceptors.items()):
A.OR_(C, tol=tol)
# end for
@@ -153,7 +151,7 @@ def is_null(self):
"""
The set contains only zero null conceptors
"""
- for k, c in self.conceptors.items():
+ for k, c in list(self.conceptors.items()):
if not c.is_null():
return False
# end if
@@ -172,54 +170,6 @@ def PHI(self, gamma):
# end for
# end PHI
- # Similarity between conceptors and a given one
- def sim(
- self,
- other: Union[Conceptor, List[Conceptor], ConceptorSet],
- based_on='C',
- sim_func=generalized_squared_cosine
- ) -> torch.Tensor:
- """
- Similarity between conceptors and a given one
- :param conceptor:
- :param based_on:
- :param sim_func:
- """
- if isinstance(other, Conceptor):
- # Similarity vector
- sim_vector = torch.zeros(self.count)
-
- # For each conceptor
- for i in range(self.count):
- sim_vector[i] = Conceptor.similarity(
- other,
- self.conceptors[i],
- based_on=based_on,
- sim_func=sim_func
- )
- # end for
-
- return sim_vector
- elif isinstance(other, ConceptorSet) or isinstance(other, list):
- # Similarity vector
- sim_matrix = torch.zeros(self.count, len(other))
-
- # For each pair of conceptor
- for i in range(self.count):
- for j in range(len(other)):
- sim_matrix[i, j] = Conceptor.similarity(
- self.conceptors[i],
- other[j],
- based_on=based_on,
- sim_func=sim_func
- )
- # end for
- # end for
-
- return sim_matrix
- # end if
- # end sim
-
# Similarity between two conceptors
def similarity(self, conceptor_i, conceptor_j, based_on='C', sim_func=generalized_squared_cosine):
"""
@@ -300,7 +250,7 @@ def set(self, k):
Set k index to use
:param conceptor_i: Conceptor index to use
"""
- if k in self.conceptors.keys():
+ if k in list(self.conceptors.keys()):
self._current_conceptor_index = k
else:
raise Exception("Unknown conceptor {}".format(k))
@@ -392,7 +342,7 @@ def Eneg(self, conceptor_i, x, tol=1e-14):
others = Conceptor(input_dim=self._conceptor_dim, aperture=1, dtype=self._dtype)
# For each conceptor
- for kc, C in self._conceptors.items():
+ for kc, C in list(self._conceptors.items()):
if kc != conceptor_i:
others.OR_(C, tol=tol)
# end if
@@ -505,7 +455,7 @@ def filter_transform(self, X, *args, **kwargs):
:return: Filtered signal
"""
# Morphing vector present ?
- if "morphing_vector" in kwargs.keys():
+ if "morphing_vector" in list(kwargs.keys()):
# Morphing vector
morphing_vector = kwargs["morphing_vector"]
@@ -530,14 +480,6 @@ def extra_repr(self):
return s.format(**self.__dict__)
# end extra_repr
- # Length
- def __len__(self):
- """
- Length
- """
- return len(self._conceptors)
- # end __len__
-
# Get item
def __getitem__(self, item):
"""
diff --git a/echotorch/nn/conceptors/IncConceptorNet.py b/echotorch/nn/conceptors/IncConceptorNet.py
index e7f89cc..b180bb5 100644
--- a/echotorch/nn/conceptors/IncConceptorNet.py
+++ b/echotorch/nn/conceptors/IncConceptorNet.py
@@ -24,10 +24,13 @@
@author: Nils Schaetti
"""
-# EchoTorch imports
-from echotorch.models.reservoir.ESN import ESN
-from echotorch.nn.conceptors.ConceptorNet import ConceptorNet
-from echotorch.models.conceptors.IncSPESN import IncSPESN
+# Imports
+import torch
+from .. import Node
+from ..linear import IncRRCell
+from ..reservoir import ESN
+from .ConceptorNet import ConceptorNet
+from .IncSPESN import IncSPESN
# Incremental learning-based Conceptor Network
diff --git a/echotorch/models/conceptors/IncSPESN.py b/echotorch/nn/conceptors/IncSPESN.py
similarity index 94%
rename from echotorch/models/conceptors/IncSPESN.py
rename to echotorch/nn/conceptors/IncSPESN.py
index 421b1fe..59d24e4 100644
--- a/echotorch/models/conceptors/IncSPESN.py
+++ b/echotorch/nn/conceptors/IncSPESN.py
@@ -26,15 +26,12 @@
# Imports
import torch
-
-# EchoTorch imports
-from echotorch.nn.linear.IncRRCell import IncRRCell
-from echotorch.nn.linear.IncForgRRCell import IncForgRRCell
-from echotorch.nn.conceptors.SPESNCell import SPESNCell
-from echotorch.nn.conceptors.IncSPESNCell import IncSPESNCell
-from echotorch.nn.conceptors.IncForgSPESNCell import IncForgSPESNCell
-from echotorch.nn.Node import Node
-from echotorch.models.reservoir.ESN import ESN
+from ..linear import IncRRCell, IncForgRRCell
+from .IncSPESNCell import IncSPESNCell
+from .IncForgSPESNCell import IncForgSPESNCell
+from .SPESNCell import SPESNCell
+from ..reservoir import ESN
+from ..Node import Node
# Self-Predicting Echo State Network module with incremental learning
diff --git a/echotorch/nn/conceptors/IncSPESNCell.py b/echotorch/nn/conceptors/IncSPESNCell.py
index a08f9bb..7dcee0c 100644
--- a/echotorch/nn/conceptors/IncSPESNCell.py
+++ b/echotorch/nn/conceptors/IncSPESNCell.py
@@ -28,9 +28,9 @@
import math
import torch
from torch.autograd import Variable
-
-# EchoTorch imports
+from echotorch.nn.reservoir.ESNCell import ESNCell
from .SPESNCell import SPESNCell
+import matplotlib.pyplot as plt
# Self-Predicting ESN Cell with incremental learning
diff --git a/echotorch/models/conceptors/SPESN.py b/echotorch/nn/conceptors/SPESN.py
similarity index 96%
rename from echotorch/models/conceptors/SPESN.py
rename to echotorch/nn/conceptors/SPESN.py
index 5081982..b2ede79 100644
--- a/echotorch/models/conceptors/SPESN.py
+++ b/echotorch/nn/conceptors/SPESN.py
@@ -26,12 +26,10 @@
# Imports
import torch
-
-# EchoTorch imports
from echotorch.nn.linear.RRCell import RRCell
-from echotorch.nn.Node import Node
-from echotorch.nn.conceptors.SPESNCell import SPESNCell
-from echotorch.models.reservoir.ESN import ESN
+from .SPESNCell import SPESNCell
+from ..reservoir import ESN
+from ..Node import Node
# Self-Predicting Echo State Network module.
diff --git a/echotorch/nn/conceptors/__init__.py b/echotorch/nn/conceptors/__init__.py
index e219f79..3a12fc1 100644
--- a/echotorch/nn/conceptors/__init__.py
+++ b/echotorch/nn/conceptors/__init__.py
@@ -25,8 +25,9 @@
from .ConceptorSet import ConceptorSet
from .IncConceptorNet import IncConceptorNet
from .IncForgSPESNCell import IncForgSPESNCell
+from .IncSPESN import IncSPESN
from .IncSPESNCell import IncSPESNCell
-from echotorch.models.conceptors.SPESN import SPESN
+from .SPESN import SPESN
from .SPESNCell import SPESNCell
# All
diff --git a/echotorch/nn/features/OnlinePCACell.py b/echotorch/nn/features/OnlinePCACell.py
index 182886b..3fd22f6 100644
--- a/echotorch/nn/features/OnlinePCACell.py
+++ b/echotorch/nn/features/OnlinePCACell.py
@@ -105,7 +105,7 @@ def init_eigen_vectors(self, init_eigen_vectors=None):
# Check input dim
assert(
self.input_dim == self._init_v.shape[0]), \
- Exception(u"Dimension mismatch. init_eigen_vectors shape[0] must be {}, given {}".format(
+ Exception("Dimension mismatch. init_eigen_vectors shape[0] must be {}, given {}".format(
self.input_dim,
self._init_v.shape[0]
)
diff --git a/echotorch/nn/features/PCACell.py b/echotorch/nn/features/PCACell.py
index eca6807..b954479 100644
--- a/echotorch/nn/features/PCACell.py
+++ b/echotorch/nn/features/PCACell.py
@@ -149,7 +149,7 @@ def finalize(self):
# We need more observations than variables
if self.tlen < self.input_dim:
- raise Exception(u"The number of observations ({}) is larger than the number of input variables ({})".format(self.tlen, self.input_dim))
+ raise Exception("The number of observations ({}) is larger than the number of input variables ({})".format(self.tlen, self.input_dim))
# end if
# Total variance
@@ -165,7 +165,7 @@ def finalize(self):
# end if
# Indexes
- indexes = range(d.size(0)-1, -1, -1)
+ indexes = list(range(d.size(0)-1, -1, -1))
# Sort by descending order
d = torch.take(d, Variable(torch.LongTensor(indexes)))
@@ -262,7 +262,7 @@ def _inverse(self, y, n=None):
# end if
if n > self.output_dim:
- raise Exception(u"y has dimension {} but should but at most {}".format(n, self.output_dim))
+ raise Exception("y has dimension {} but should but at most {}".format(n, self.output_dim))
# end if
# Get reconstruction matrix
diff --git a/echotorch/nn/linear/IncForgRRCell.py b/echotorch/nn/linear/IncForgRRCell.py
index 7d04f75..005f620 100644
--- a/echotorch/nn/linear/IncForgRRCell.py
+++ b/echotorch/nn/linear/IncForgRRCell.py
@@ -28,11 +28,10 @@
import torch.sparse
import torch
from torch.autograd import Variable
-
-# EchoTorch imports
-import echotorch.nn.conceptors.Conceptor as Conceptor
-from echotorch.nn.linear.IncRRCell import IncRRCell
-from echotorch.utils import nrmse, quota, rank
+from ..conceptors import Conceptor
+from .IncRRCell import IncRRCell
+from echotorch.utils import nrmse
+from echotorch.utils import quota, rank
# Incremental Ridge Regression node
diff --git a/echotorch/nn/linear/RRCell.py b/echotorch/nn/linear/RRCell.py
index 3e2de24..7c88bc3 100644
--- a/echotorch/nn/linear/RRCell.py
+++ b/echotorch/nn/linear/RRCell.py
@@ -178,13 +178,9 @@ def finalize(self):
self.xTy = self.xTy / self._n_samples
# end if
- # Eye
- eye_I = torch.eye(self._input_dim + self._with_bias, dtype=self._dtype)
- eye_I = eye_I.cuda() if self.xTx.is_cuda else eye_I
-
# We need to solve wout = (xTx)^(-1)xTy
# Covariance matrix xTx
- ridge_xTx = self.xTx + self._ridge_param * eye_I
+ ridge_xTx = self.xTx + self._ridge_param * torch.eye(self._input_dim + self._with_bias, dtype=self._dtype)
# Inverse / pinverse
if self._learning_algo == "inv":
diff --git a/echotorch/nn/advanced/RMM.py b/echotorch/nn/machines/RMM.py
similarity index 97%
rename from echotorch/nn/advanced/RMM.py
rename to echotorch/nn/machines/RMM.py
index 8592448..76002a4 100644
--- a/echotorch/nn/advanced/RMM.py
+++ b/echotorch/nn/machines/RMM.py
@@ -27,8 +27,8 @@
# Imports
import torch
from echotorch.nn.linear.RRCell import RRCell
-from echotorch.nn.reservoir import ESN
-from echotorch.nn.Node import Node
+from ..reservoir import ESN
+from ..Node import Node
# Reservoir Memory Machines
diff --git a/echotorch/nn/advanced/__init__.py b/echotorch/nn/machines/__init__.py
similarity index 88%
rename from echotorch/nn/advanced/__init__.py
rename to echotorch/nn/machines/__init__.py
index c6def8b..cc27835 100644
--- a/echotorch/nn/advanced/__init__.py
+++ b/echotorch/nn/machines/__init__.py
@@ -19,10 +19,14 @@
#
# Copyright Nils Schaetti
+"""
+Created on 17th of July, 2020
+@author: Nils Schaetti
+"""
+
# Imports
-from .HNilsNet import HNilsNet
-from .NilsNet import NilsNet
-from .RMM import RMM
+
# All
-__all__ = ['HNilsNet', 'NilsNet', 'RMM']
+__all__ = [
+]
diff --git a/echotorch/models/reservoir/BDESN.py b/echotorch/nn/reservoir/BDESN.py
similarity index 98%
rename from echotorch/models/reservoir/BDESN.py
rename to echotorch/nn/reservoir/BDESN.py
index b66e35d..c1d3875 100644
--- a/echotorch/models/reservoir/BDESN.py
+++ b/echotorch/nn/reservoir/BDESN.py
@@ -27,7 +27,7 @@
# Imports
import torch
import torch.nn as nn
-from echotorch.nn.reservoir.BDESNCell import BDESNCell
+from .BDESNCell import BDESNCell
from echotorch.nn.linear.RRCell import RRCell
diff --git a/echotorch/nn/reservoir/EESN.py b/echotorch/nn/reservoir/EESN.py
index 7180d8a..b8474ba 100644
--- a/echotorch/nn/reservoir/EESN.py
+++ b/echotorch/nn/reservoir/EESN.py
@@ -21,7 +21,7 @@
import torch.sparse
import torch.nn as nn
-from echotorch.models.reservoir.LiESN import LiESN
+from echotorch.nn.reservoir.LiESN import LiESN
# An ESN with an embedding layer
diff --git a/echotorch/models/reservoir/ESN.py b/echotorch/nn/reservoir/ESN.py
similarity index 99%
rename from echotorch/models/reservoir/ESN.py
rename to echotorch/nn/reservoir/ESN.py
index f7d9b5b..a14f432 100644
--- a/echotorch/models/reservoir/ESN.py
+++ b/echotorch/nn/reservoir/ESN.py
@@ -28,8 +28,8 @@
import torch
import echotorch.utils.matrix_generation as mg
from echotorch.nn.linear.RRCell import RRCell
-from echotorch.nn.Node import Node
-from echotorch.nn.reservoir.ESNCell import ESNCell
+from ..Node import Node
+from .ESNCell import ESNCell
# Echo State Network module.
diff --git a/echotorch/nn/reservoir/ESNCell.py b/echotorch/nn/reservoir/ESNCell.py
index c2af541..189c029 100644
--- a/echotorch/nn/reservoir/ESNCell.py
+++ b/echotorch/nn/reservoir/ESNCell.py
@@ -29,13 +29,13 @@
import torch.sparse
from torch.autograd import Variable
import echotorch.utils
-# from echotorch.viz import Observable
+from echotorch.utils.visualisation import Observable
from ..Node import Node
# Echo State Network layer
# Basis cell for ESN.
-class ESNCell(Node):
+class ESNCell(Node, Observable):
"""
Echo State Network layer
Basis cell for ESN
@@ -69,7 +69,7 @@ def __init__(self, input_dim, output_dim, w, w_in, w_bias, input_scaling=1.0, no
)
# Init. Observable super-class
- # Observable.__init__(self)
+ Observable.__init__(self)
# Params
self._input_scaling = input_scaling
diff --git a/echotorch/nn/reservoir/FreeRunESNCell.py b/echotorch/nn/reservoir/FreeRunESNCell.py
index 03f0154..99c52ff 100644
--- a/echotorch/nn/reservoir/FreeRunESNCell.py
+++ b/echotorch/nn/reservoir/FreeRunESNCell.py
@@ -30,7 +30,7 @@
from torch.autograd import Variable
import echotorch.utils
-from echotorch.visualisation import Observable
+from echotorch.utils.visualisation import Observable
from .LiESNCell import LiESNCell
diff --git a/echotorch/nn/reservoir/HESN.py b/echotorch/nn/reservoir/HESN.py
index 26a5898..d536a81 100644
--- a/echotorch/nn/reservoir/HESN.py
+++ b/echotorch/nn/reservoir/HESN.py
@@ -20,7 +20,7 @@
# Copyright Nils Schaetti
import torch.sparse
-from echotorch.models.reservoir.LiESN import LiESN
+from echotorch.nn.reservoir.LiESN import LiESN
# ESN with input pre-trained and used with transfer learning
diff --git a/echotorch/models/reservoir/LiESN.py b/echotorch/nn/reservoir/LiESN.py
similarity index 92%
rename from echotorch/models/reservoir/LiESN.py
rename to echotorch/nn/reservoir/LiESN.py
index 7e67c10..6eb67da 100644
--- a/echotorch/models/reservoir/LiESN.py
+++ b/echotorch/nn/reservoir/LiESN.py
@@ -25,9 +25,9 @@
"""
import torch
-from echotorch.nn.reservoir.LiESNCell import LiESNCell
-from echotorch.models.reservoir.ESN import ESN
-from echotorch.nn.Node import Node
+from .LiESNCell import LiESNCell
+from echotorch.nn.reservoir.ESN import ESN
+from ..Node import Node
# Leaky-Integrated Echo State Network module
@@ -38,7 +38,7 @@ class LiESN(ESN):
# Constructor
def __init__(self, input_dim, hidden_dim, output_dim, leaky_rate, w_generator, win_generator, wbias_generator,
- input_scaling=1.0, nonlin_func=torch.tanh, learning_algo='inv',
+ spectral_radius=0.9, bias_scaling=1.0, input_scaling=1.0, nonlin_func=torch.tanh, learning_algo='inv',
ridge_param=0.0, with_bias=True, softmax_output=False, washout=0, debug=Node.NO_DEBUG, test_case=None,
dtype=torch.float32):
"""
@@ -47,6 +47,8 @@ def __init__(self, input_dim, hidden_dim, output_dim, leaky_rate, w_generator, w
:param hidden_dim: Reservoir hidden space dimension
:param output_dim: Output space dimension
:param leaky_rate: Leaky-rate
+ :param spectral_radius: Spectral radius
+ :param bias_scaling: Bias scaling
:param input_scaling: Input scaling
:param w_generator: Internal weight matrix generator
:param win_generator: Input-reservoir weight matrix generator
diff --git a/echotorch/models/reservoir/StackedESN.py b/echotorch/nn/reservoir/StackedESN.py
similarity index 98%
rename from echotorch/models/reservoir/StackedESN.py
rename to echotorch/nn/reservoir/StackedESN.py
index 8625a70..b3d1ebe 100644
--- a/echotorch/models/reservoir/StackedESN.py
+++ b/echotorch/nn/reservoir/StackedESN.py
@@ -29,9 +29,9 @@
import torch
import torch.nn as nn
from torch.autograd import Variable
-from echotorch.nn.reservoir.LiESNCell import LiESNCell
-from echotorch.nn.linear.RRCell import RRCell
-from echotorch.nn.reservoir.ESNCell import ESNCell
+from .LiESNCell import LiESNCell
+from ..linear.RRCell import RRCell
+from .ESNCell import ESNCell
import numpy as np
diff --git a/echotorch/nn/reservoir/__init__.py b/echotorch/nn/reservoir/__init__.py
index 5d0be14..49f8308 100644
--- a/echotorch/nn/reservoir/__init__.py
+++ b/echotorch/nn/reservoir/__init__.py
@@ -20,15 +20,18 @@
# Copyright Nils Schaetti
# Imports
+from .BDESN import BDESN
from .BDESNCell import BDESNCell
from .BDESNPCA import BDESNPCA
from .DeepESN import DeepESN
from .EESN import EESN
-from echotorch.models.reservoir.ESN import ESN
+from .ESN import ESN
from .ESNCell import ESNCell
from .GatedESN import GatedESN
from .HESN import HESN
+from .LiESN import LiESN
from .LiESNCell import LiESNCell
+from .StackedESN import StackedESN
# All
__all__ = [
diff --git a/echotorch/nodes.py b/echotorch/nodes.py
deleted file mode 100644
index 1df3e12..0000000
--- a/echotorch/nodes.py
+++ /dev/null
@@ -1,509 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/nn/Node.py
-# Description : Basis node for EchoTorch.
-# Date : 29th of October, 2019
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-"""
-Created on 29 October 2019
-@author: Nils Schaetti
-"""
-
-import torch
-import torch.sparse
-import torch.nn as nn
-import numpy as np
-
-
-# Basis node for EchoTorch
-class Node(nn.Module):
- """
- Basis node for EchoTorch
- """
-
- # Debug mode
- NO_DEBUG = 0
- DEBUG_TEST = 1
- DEBUG_OUTPUT = 2
- DEBUG_TEST_CASE = 3
-
- # Colors
- HEADER = '\033[95m'
- OKBLUE = '\033[94m'
- OKGREEN = '\033[92m'
- WARNING = '\033[93m'
- FAIL = '\033[91m'
- ENDC = '\033[0m'
- BOLD = '\033[1m'
- UNDERLINE = '\033[4m'
-
- # Constructor
- def __init__(self, input_dim, output_dim, debug=NO_DEBUG, test_case=None, dtype=torch.float32):
- """
- Constructor
- :param input_dim: Node's input dimension.
- :param output_dim: Node's output dimension.
- :param debug: Set debug mode
- :param test_case: Test case to call.
- :param dtype: Node's type.
- """
- # Superclass
- super(Node, self).__init__()
-
- # Params
- self._input_dim = input_dim
- self._output_dim = output_dim
- self._debug = debug
- self._dtype = dtype
-
- # Count calls to forward
- self._forward_calls = 0
-
- # Debug points and test
- self._debug_points = dict()
- self._test_case = test_case
-
- # Trainable elements
- self._trainable_elements = list()
-
- # Handlers
- self._neural_filter_handlers = []
- self._neural_batch_filter_handlers = []
- self._post_states_update_handlers = []
- # end __init__
-
- # region PROPERTIES
-
- # Input dimension
- @property
- def input_dim(self):
- """
- Get input dimension
- """
- return self._input_dim
- # end input_dim
-
- # Set input dimension
- @input_dim.setter
- def input_dim(self, new_dim):
- """
- Set input dimension
- :param new_dim: New input dimension
- """
- self._input_dim = new_dim
- # end input_dim
-
- # Output dimension
- @property
- def output_dim(self):
- """
- Get output dimension
- """
- return self._output_dim
-
- # end input_dim
-
- # Set output dimension
- @output_dim.setter
- def output_dim(self, new_dim):
- """
- Set output dimension
- :param new_dim: New output dimension
- """
- self._output_dim = new_dim
- # end output_dim
-
- # Type
- @property
- def dtype(self):
- """
- Type
- :return: Type
- """
- return self._dtype
- # end dtype
-
- # Is the layer trainable?
- @property
- def is_trainable(self):
- """
- Is the node trainable ?
- :return: True/False
- """
- return False
- # end is_trainable
-
- # Is the layer invertible ?
- @property
- def is_invertibe(self):
- """
- Is the layer invertible ?
- :return: True/False
- """
- return False
- # end is_invertible
-
- # Supported dtypes
- @property
- def supported_dtype(self):
- """
- Supported dtypes
- """
- return [torch.float16, torch.float32, torch.float64]
- # end supported_dtype
-
- # endregion PROPERTIES
-
- # region PUBLIC
-
- # Reset learning
- def reset(self):
- """
- Reset learning
- :return:
- """
- # Training mode again
- self.train(True)
-
- # Count calls to forward
- self._forward_calls = 0
- # end reset
-
- # Forward
- def forward(self, *input, **kwargs):
- """
- Forward
- :param input:
- :return:
- """
- pass
- # end forward
-
- # Finish training
- def finalize(self):
- """
- Finish training
- """
- for e in self._trainable_elements:
- e.finalize()
- # end for
-
- # In eval mode
- self.train(False)
- # end finalize
-
- # Initialization of the node
- def initialize(self):
- """
- Initialization of the node
- """
- pass
- # end initialize
-
- # Add to elements to train when "finalize" is called
- def add_trainable(self, e):
- """
- Add to elements to finalize
- :param e: Node to finalize
- """
- if isinstance(e, Node):
- self._trainable_elements.append(e)
- # end if
- # end add_trainable
-
- # Remove element to train when "finalize" is called
- def remove_trainable(self, e):
- """
- Remove element to train when "finalize" is called
- :param e: Node to remove
- """
- if e in self._trainable_elements:
- self._trainable_elements.remove(e)
- # end if
- # end remove_trainable
-
- # Set debug mode
- def debug(self, mode):
- """
- Set debug mode
- :param mode: True/False
- """
- self._debug = mode
- # end debug
-
- # Set a debug point
- def debug_point(self, name, value, precision):
- """
- Set a debug point for comparison
- :param name: Name of the debug point (corresponding to one given by the module)
- :param value: Value of the debug point to compare (ex, matrix, scalar, etc)
- :param precision: Limit precision.
- """
- self._debug_points[name] = (value, precision)
- # end debug_point
-
- # Connect handler
- def connect(self, handler_name, handler_func):
- """
- Connect handler
- :paramm handler_name: Handler name
- :param handler_func: Handler function
- """
- if handler_name == "neural-filter":
- if handler_func not in self._neural_filter_handlers:
- self._neural_filter_handlers.append(handler_func)
- # end if
- elif handler_name == "neural-batch-filter":
- if handler_func not in self._neural_batch_filter_handlers:
- self._neural_batch_filter_handlers.append(handler_func)
- # end if
- elif handler_name == "post-states-update":
- if handler_func not in self._post_states_update_handlers:
- self._post_states_update_handlers.append(handler_func)
- # end if
- # end if
- # end connect
-
- # enregion PUBLIC
-
- # region PRIVATE
-
- # Matrix inverse
- def _inverse(self, name, M, code_class, code_pos):
- """
- Matrix inverse
- :param name: Name associated with M
- :param M: Matrix to inverse
- :return: Inverse matrix
- """
- if self._debug == Node.DEBUG_TEST or self._debug == Node.DEBUG_OUTPUT:
- # SVD of matrix
- _, S, _ = torch.svd(M)
-
- # Condition number
- condition_number = torch.log10(S[0] / S[-1])
-
- # Show condition number
- if self._debug == Node.DEBUG_OUTPUT:
- print(
- "DEBUG - INFO : Condition number while inversing {} : {} (at {}:{})".format(
- name,
- condition_number,
- code_class,
- code_pos
- )
- )
-
- # Bad condition number
- if condition_number > 14:
- print(
- "DEBUG - WARNING : High condition number while inversing {} : {} (at {}:{})".format(
- name,
- condition_number,
- code_class,
- code_pos
- )
- )
- # end if
- # end if
- return torch.inverse(M)
- # end _inverse
-
- # Matrix pseudo-inverse
- def _pinverse(self, name, M, code_class, code_pos):
- """
- Matrix pseudo-inverse
- :param name: Name associated with M
- :param M: Matrix to inverse
- :return: Pseudo-inverse of matrix
- """
- if self._debug == Node.DEBUG_TEST or self._debug == Node.DEBUG_OUTPUT:
- # SVD of matrix
- _, S, _ = torch.svd(M)
-
- # Condition number
- condition_number = torch.log10(S[0] / S[-1])
-
- # Show condition number
- if self._debug == Node.DEBUG_OUTPUT:
- print(
- "DEBUG - INFO : Condition number while pseudo-inversing {} : {} (at {}:{})".format(
- name,
- condition_number,
- code_class,
- code_pos
- )
- )
-
- # Bad condition number
- if condition_number > 14:
- print(
- "DEBUG - WARNING : High condition number while pseudo-inversing {} : {} (at {}:{})".format(
- name,
- condition_number,
- code_class,
- code_pos
- )
- )
- # end if
- # end if
- return torch.pinverse(M)
- # end _pinverse
-
- # Call debug point
- def _call_debug_point(self, name, value, code_class, code_pos):
- """
- Call a debug point from inside the module to compare with given values
- :param name: Name of the debug point
- :param value: Value of the debug point
- """
- # String
- error_precision = Node.WARNING + "DEBUG - ERROR: {} has precision issue! (diff: {}, module:{}, outside:{}) (at {}:{})" + Node.ENDC
- info_precision_scalar = "DEBUG - INFO: {} (scalar) has an absolute difference of {} (at {}:{})"
- info_precision_matrix = "DEBUG - INFO: {} (matrix) has a norm-2 difference of {} (at {}:{})"
- fatal_size = Node.FAIL + "DEBUG - FATAL: {} have not the same size! (module:{}, outside:{}) (at {}:{})" + Node.ENDC
- fatal_type = Node.FAIL + "DEBUG - FATAL: {} are not of the same type! (module:{}, outside:{}) (at {}:{})" + Node.ENDC
-
- # If debug point set
- if self._debug > Node.NO_DEBUG and name in self._debug_points.keys():
- # Get value
- value_from_module = value
- value_from_outside, precision = self._debug_points[name]
-
- # Test same type
- if type(value_from_module) == type(value_from_outside):
- # Type scalar
- if isinstance(value_from_module, int) or isinstance(value_from_module, float):
- # Compute absolute difference
- abs_diff = np.abs(value_from_module - value_from_outside)
-
- # In debug output, print difference
- if self._debug == Node.DEBUG_OUTPUT:
- print(info_precision_scalar.format(name, abs_diff, code_class, code_pos))
- # end if
-
- # In debug test case, call test call for evaluation
- if self._debug == Node.DEBUG_TEST_CASE:
- self._test_case.assertAlmostEqual(value_from_module, value_from_outside, precision)
- # In debut test, test if precision is ok
- elif abs_diff > precision:
- print(error_precision.format(name, value_from_module, value_from_outside, code_class, code_pos))
- # end if
- # Matrix/Tensor
- elif isinstance(value_from_module, torch.Tensor):
- # Test size
- if value_from_module.size() == value_from_outside.size():
- # Compute Frobenius norm difference
- norm_diff = torch.norm(value_from_module - value_from_outside)
-
- # In debug output, print difference
- if self._debug == Node.DEBUG_OUTPUT:
- print(info_precision_matrix.format(name, norm_diff, code_class, code_pos))
- # end if
-
- # In debug test case, call test case for evaluation
- if self._debug == Node.DEBUG_TEST_CASE:
- self._test_case.assertTensorAlmostEqual(value_from_module, value_from_outside, precision)
- if norm_diff > precision:
- print(
- error_precision.format(
- name,
- norm_diff,
- torch.norm(value_from_module),
- torch.norm(value_from_outside),
- code_class,
- code_pos
- )
- )
- # end if
- else:
- print(fatal_size.format(name, value_from_module.size(), value_from_outside.size(), code_class, code_pos))
- raise Exception()
- # end if
- else:
- print(fatal_type.format(name, type(value_from_module), type(value_from_outside), code_class, code_pos))
- raise Exception()
- # end if
- # end if
- # end _call_debug_point
-
- # Hook which gets executed before the update state equation for every sample.
- def _pre_update_hook(self, inputs, forward_i, sample_i):
- """
- Hook which gets executed before the update equation for a batch
- :param inputs: Input signal.
- :param forward_i: Index of forward call
- :param sample_i: Position of the sample in the batch.
- """
- return inputs
- # end _pre_update_hook
-
- # Hook which gets executed before the update state equation for every timesteps.
- def _pre_step_update_hook(self, inputs, forward_i, sample_i, t):
- """
- Hook which gets executed before the update equation for every timesteps
- :param inputs: Input signal.
- :param forward_i: Index of forward call
- :param sample_i: Position of the sample in the batch.
- :param t: Timestep.
- """
- return inputs
- # end _pre_step_update_hook
-
- # Hook which gets executed after the update state equation for every sample.
- def _post_update_hook(self, states, inputs, forward_i, sample_i):
- """
- Hook which gets executed after the update equation for a batch
- :param states: Reservoir's states.
- :param inputs: Input signal
- :param forward_i: Index of forward call
- :param sample_i: Batch position
- """
- return states
- # end _post_update_hook
-
- # Hook which gets executed after the update state equation for every timesteps.
- def _post_step_update_hook(self, states, inputs, forward_i, sample_i, t):
- """
- Hook which gets executed after the update equation for every timesteps
- :param states: Reservoir's states.
- :param inputs: Input signal.
- :param forward_i: Index of forward call
- :param sample_i: Position of the sample in the batch
- :param t: Timestep
- """
- return states
- # end _post_step_update_hook
-
- # endregion PRIVATE
-
- # region OVERRIDE
-
- # Extra-information
- def extra_repr(self):
- """
- Extra-information
- :return: String
- """
- s = '{_input_dim}, {_output_dim}'
- return s.format(**self.__dict__)
- # end extra_repr
-
- # endregion OVERRIDE
-
-# end Node
diff --git a/echotorch/series.py b/echotorch/series.py
deleted file mode 100644
index 28c99ef..0000000
--- a/echotorch/series.py
+++ /dev/null
@@ -1,629 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/series.py
-# Description : Utility functions to generate timeseries
-# Date : 25th of January, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-
-# Imports
-from typing import Union
-import torch
-import torchvision
-import echotorch.data as etds
-import echotorch.utils.evaluation as etev
-import echotorch.transforms.images as etim
-import echotorch.transforms.targets as etta
-
-
-# Generate Copy Task series
-def copytask(
- size: tuple, length_min: int, length_max: int, n_inputs: int, return_db: bool = False, dtype=None
-) -> Union[etds.EchoDataset, torch.Tensor]:
- """
- Generate a dataset or series for the Copy task
- @param size: How many series to generate
- @param length_min: Minimum length
- @param length_max: Maximum length
- @param n_inputs: Number of inputs
- @param return_db: Return the datset (True) or series (False)
- @param dtype: Data type of the output series
- @return: An EchoDataset or a tensor of series
- """
- # The dataset
- dataset = etds.CopyTaskDataset(
- n_samples=size,
- length_min=length_min,
- length_max=length_max,
- n_inputs=n_inputs,
- dtype=dtype
- )
-
- if return_db:
- return dataset
- else:
- return dataset.data
- # end if
-# end copytask
-
-
-# Compose a dataset
-def compose(
- datasets: list
-) -> etds.EchoDataset:
- """
- Compose a dataset from a list of datasets
- @param datasets: A list of datasets
- @return: A new EchoDataset composed of the dataset in the given list
- """
- return etds.DatasetComposer(
- datasets=datasets
- )
-# end compose
-
-
-# Create cross validation dataset
-def cross_eval(
- root_dataset, k=10, dev_ratio=0, shuffle=False, train_size=1.0, fold=0, mode='train', sample_indices=None,
- return_multiple_dataset=False
-) -> Union[etds.EchoDataset, torch.Tensor]:
- """
- Create a cross validation dataset from a root dataset
- @param root_dataset: Root dataset (EchoDataset)
- @param k: Number of folds
- @param dev_ratio: Ratio of the dev set
- @param shuffle:
- @param train_size:
- @param fold:
- @param mode:
- @param sample_indices:
- @param return_multiple_dataset:
- @return:
- """
- if not return_multiple_dataset:
- return etev.CrossValidationWithDev(
- root_dataset=root_dataset,
- k=k,
- mode=mode,
- samples_indices=sample_indices,
- fold=fold,
- train_size=train_size,
- dev_ratio=dev_ratio,
- shuffle=shuffle
- )
- else:
- cv10_datasets = dict()
- for dataset_type in ['train', 'dev', 'test']:
- cv10_datasets[dataset_type] = etev.CrossValidationWithDev(
- root_dataset=root_dataset,
- k=k,
- dev_ratio=dev_ratio,
- shuffle=shuffle,
- train_size=train_size,
- fold=fold,
- mode=dataset_type,
- samples_indices=sample_indices
- )
- # end for
- return cv10_datasets
- # end if
-# cross_eval
-
-
-# Load Time series from a CSV file
-def csv_file(
- csv_file: str, delimiter: str, quotechar: str, columns: list, return_db: bool = False, dtype=None
-) -> Union[etds.EchoDataset, torch.Tensor]:
- """
- Load Timeseries from a CSV file
- :param csv_file:
- :param delimiter:
- :param quotechar:
- :param columns:
- :param return_db:
- :param dtype:
- """
- if return_db:
- return etds.FromCSVDataset(
- csv_file=csv_file,
- columns=columns,
- delimiter=delimiter,
- quotechar=quotechar,
- dtype=dtype
- )
- else:
- return etds.FromCSVDataset.generate(
- csv_file=csv_file,
- delimiter=delimiter,
- quotechar=quotechar,
- columns=columns,
- dtype=dtype
- )
- # end if
-# end csv_file
-
-
-# Delay dataset
-def delaytask(
- root_dataset: etds.EchoDataset, delay: int, data_index: int = 0, keep_indices: bool = None
-) -> etds.EchoDataset:
- """
- Delay dataset
- """
- return etds.DelayDataset(
- root_dataset=root_dataset,
- n_delays=delay,
- data_index=data_index,
- keep_indices=keep_indices
- )
-# end delaytask
-
-
-# Generate Discrete Markov Chain dataset
-def discrete_markov_chain(
- size, length, n_states, probability_matrix, start_state=0, return_db=False, dtype=None
-) -> Union[etds.EchoDataset, torch.Tensor]:
- """
- Generate series of Discrete Markov Chain directly or through a dataset
- @param size:
- @param length:
- @param n_states:
- @param probability_matrix:
- @param start_state:
- @param return_db:
- @param dtype:
- @return:
- """
- if return_db:
- return etds.DiscreteMarkovChainDataset(
- n_samples=size,
- sample_length=length,
- probability_matrix=probability_matrix
- )
- else:
- samples = list()
- for sample_i in range(size):
- samples.append(etds.DiscreteMarkovChainDataset.generate(
- length=length,
- n_states=n_states,
- probability_matrix=probability_matrix,
- start_state=start_state,
- dtype=dtype
- ))
- # end for
- return samples
- # end if
-# end discrete_markov_chain
-
-
-# Henon attractor
-def henon(
- size: int,
- length: int,
- xy: int,
- a: int,
- b: int,
- washout: int = 0,
- normalize: bool = False,
- return_db: bool = False,
- dtype=None
-) -> Union[etds.EchoDataset, torch.Tensor]:
- """Generate a series with the Hénon map dynamical system.
-
- The Hénon-Pomean attractor is a dynamical system which exhibit chaotic behavior. Each point :math:`(x_n, y_n)` in
- the plane is mapped to the new point
-
- .. math::
- \sum_{i=1}^{\\infty} x_{i}
-
- :param size: How many samples to generate
- :type size: int
- :param length: Length of samples (time)
- :type length: int
- :param xy: Parameter
- :type xy: int
- :param a: Parameter
- :type a: int
- :param b: Parameter
- :type b: int
- :param washout: Time steps to remove at the beginning of samples
- :type washout: int
- :param normalize: Normalize samples
- :type normalize: bool
- :param return_db: Return the database object
- :type return_db: bool
- :param dtype: Tensor data type
- :type dtype: ``torch.dtype``
-
- Examples::
- >>> echotorch.henon(1, 100, 1, 2, 3)
- timetensor([...])
- """
- if return_db:
- return etds.HenonAttractor(
- sample_len=length,
- n_samples=size,
- xy=xy,
- a=a,
- b=b,
- washout=washout,
- normalize=normalize
- )
- else:
- return etds.HenonAttractor.generate(
- n_samples=size,
- sample_len=length,
- xy=xy,
- a=a,
- b=b,
- washout=washout,
- normalize=normalize,
- dtype=dtype
- )
- # end if
-# end henon
-
-
-# From images to time series
-def images(
- image_dataset: etds.EchoDataset,
- n_images: int,
- transpose: bool
-) -> Union[etds.EchoDataset, torch.Tensor]:
- return etds.ImageToTimeseries(
- image_dataset=image_dataset,
- n_images=n_images,
- transpose=transpose
- )
-# end images
-
-
-# Create series from a function
-def lambda_dataset(
- size: int, length: int, func: callable, start: int = 0, dtype=None
-) -> Union[etds.EchoDataset, torch.Tensor]:
- """
- Create series from a function
- @param size:
- @param length:
- @param func:
- @param start:
- @param dtype:
- @return:
- """
- return etds.LambdaDataset(
- sample_len=length,
- n_samples=size,
- func=func,
- start=start,
- dtype=dtype
- )
-# end lambda_dataset
-
-
-# Latch task dataset
-def latch(
- size: int, length_min: int, length_max: int, n_pics: int, dtype=None
-) -> Union[etds.EchoDataset, torch.Tensor]:
- """
- Latch task dataset
- @param size:
- @param length_min:
- @param length_max:
- @param n_pics:
- @param dtype:
- @return:
- """
- return etds.LatchTaskDataset(
- n_samples=size,
- length_min=length_min,
- length_max=length_max,
- n_pics=n_pics,
- dtype=dtype
- )
-# end latch
-
-
-# Dataset from the logistic map
-def logistic_map(
- size: int, length: int, alpha: float = 5, beta: float = 11, gamma: float = 13, c: float = 3.6, b: float = 0.13,
- seed: int = None
-) -> Union[etds.EchoDataset, torch.Tensor]:
- """
- Dataset from the logistic map
- @param size:
- @param length:
- @param alpha:
- @param beta:
- @param gamma:
- @param c:
- @param b:
- @param seed:
- @return:
- """
- return etds.LogisticMapDataset(
- sample_len=length,
- n_samples=size,
- alpha=alpha,
- beta=beta,
- gamma=gamma,
- c=c,
- b=b,
- seed=seed
- )
-# end logistic_map
-
-
-# Mackey Glass time series
-def mackey_glass(
- size, length, tau=17, return_db=False, dtype=None
-) -> Union[etds.EchoDataset, torch.Tensor]:
- """
- Mackey Glass timeseries
- """
- if return_db:
- return etds.MackeyGlassDataset(
- sample_len=length,
- n_samples=size,
- tau=tau
- )
- else:
- samples = list()
- for sample_i in range(size):
- return etds.MackeyGlassDataset.generate(
-
- )
- # end for
- # end if
-# end mackey_glass
-
-
-# Mackey Glass time series
-def mackey_glass_2d(
- size, length, subsample_rate, tau=17, normalize=False, seed=None, return_db=False, dtype=None
-) -> Union[etds.EchoDataset, torch.Tensor]:
- """
- Mackey Glass timeseries
- """
- if return_db:
- return etds.MackeyGlass2DDataset(
- sample_len=length,
- n_samples=size,
- tau=tau,
- subsample_rate=subsample_rate,
- normalize=normalize
- )
- else:
- pass
- # end if
-# end mackey_glass
-
-
-# Markov Chain Dataset
-def markov_chain(
- size: int, length: int, datasets: list, states_length: int, morphing_length: int,
- probability_matrix: torch.Tensor, random_start: int = 0, *args, **kwargs
-) -> Union[etds.EchoDataset, torch.Tensor]:
- return etds.MarkovChainDataset(
- datasets=datasets,
- states_length=states_length,
- morphing_length=morphing_length,
- n_samples=size,
- sample_length=length,
- probability_matrix=probability_matrix,
- random_start=random_start,
- *args,
- **kwargs
- )
-# end markov_chain
-
-
-# MemTest dataset
-def memtest(
- size: int, length: int, n_delays: int = 10, seed: int = None
-) -> Union[etds.EchoDataset, torch.Tensor]:
- """
- MemTest dataset
- @param size:
- @param length:
- @param n_delays:
- @param seed:
- @return:
- """
- return etds.MemTestDataset(
- sample_len=length,
- n_samples=size,
- n_delays=n_delays,
- seed=seed
- )
-# end memtest
-
-
-# NARMA
-def narma(
- size: int, length: int, order: int = 10, return_db: bool = False, dtype=None
-) -> Union[etds.EchoDataset, torch.Tensor]:
- """
- Generate NARMA-x series or dataset
- @param size:
- @param length:
- @param order:
- @param return_db:
- @param dtype:
- @return:
- """
- if return_db:
- return etds.NARMADataset(
- sample_len=length,
- n_samples=size,
- system_order=order
- )
- else:
- return etds.NARMADataset.generate(
- sample_len=length,
- n_samples=length,
- system_order=order,
- dtype=dtype
- )
- # end if
-# end narma
-
-
-# NARMA-10
-def narma10(
- size: int, length: int, return_db: bool = False, dtype=None
-) -> Union[etds.EchoDataset, torch.Tensor]:
- """
- NARMA-10
- """
- if return_db:
- return etds.NARMADataset(
- sample_len=length,
- n_samples=size,
- system_order=10
- )
- else:
- return etds.NARMADataset.generate(
- sample_len=length,
- n_samples=length,
- system_order=10,
- dtype=dtype
- )
- # end if
-# end narma10
-
-
-# NARMA-30
-def narma30(
- size: int, length: int, return_db: bool = False, dtype=None
-) -> Union[etds.EchoDataset, torch.Tensor]:
- """
- NARMA-30
- """
- if return_db:
- return etds.NARMADataset(
- sample_len=length,
- n_samples=size,
- system_order=30
- )
- else:
- return etds.NARMADataset.generate(
- sample_len=length,
- n_samples=length,
- system_order=30,
- dtype=dtype
- )
- # end if
-# end narma30
-
-
-# Segment series
-def segment_series(
- root_dataset: etds.EchoDataset, window_size: int, data_indices: list, stride: int, remove_indices: list,
- time_axis: int = 0, dataset_in_memory: bool = False, *args, **kwargs
-) -> etds.EchoDataset:
- """
- Segment the series in a dataset with a sliding window
- @param root_dataset:
- @param window_size:
- @param data_indices:
- @param stride:
- @param remove_indices:
- @param time_axis:
- @param dataset_in_memory:
- @param args:
- @param kwargs:
- @return:
- """
- return etds.TimeseriesBatchSequencesDataset(
- root_dataset=root_dataset,
- window_size=window_size,
- data_indices=data_indices,
- stride=stride,
- remove_indices=remove_indices,
- time_axis=time_axis,
- dataset_in_memory=dataset_in_memory,
- *args,
- **kwargs
- )
-# end segment_series
-
-
-# MNIST series and dataset
-def mnist(
- image_size: int, degrees: list, root: str = ".", download: bool = True, block_size: int = 100, return_db=False
-) -> Union[etds.EchoDataset, torch.Tensor]:
- """
- Load (and download) the MNIST dataset
- @param image_size: Final image size (after crop and resize)
- @param degrees: List of rotation degrees to apply
- @param root: Root directory for the dataset (if downloaded)
- @param download: Download the dataset if not present ?
- @param block_size: The number of image per block
- @param return_db: True to return the dataset, False otherwise
- @return: Dataset or Tensor
- """
- # Concat rotation and crop
- transforms = [etim.CropResize(size=image_size)]
-
- # Add each composition
- for degree in degrees:
- transforms.append(
- torchvision.transforms.Compose([
- etim.Rotate(degree=degree),
- etim.CropResize(size=image_size)
- ])
- )
- # end for
-
- # Create the dataset (train)
- train_dataset = etds.ImageToTimeseries(
- torchvision.datasets.MNIST(
- root=root,
- train=True,
- download=download,
- transform=torchvision.transforms.Compose([
- etim.Concat(transforms, sequential=True),
- torchvision.transforms.ToTensor()
- ]),
- target_transform=etta.ToOneHot(class_size=10)
- ),
- n_images=block_size
- )
-
- # Create the dataset (test)
- test_dataset = etds.ImageToTimeseries(
- torchvision.datasets.MNIST(
- root=root,
- train=False,
- download=download,
- transform=torchvision.transforms.Compose([
- etim.Concat(transforms, sequential=True),
- torchvision.transforms.ToTensor()
- ]),
- target_transform=etta.ToOneHot(class_size=10)
- ),
- n_images=block_size
- )
-
- # If db or tensor
- if return_db:
- return train_dataset, test_dataset
- else:
- return train_dataset.generate(), test_dataset.generate()
- # end if
-# end mnist
diff --git a/echotorch/series_ops.py b/echotorch/series_ops.py
deleted file mode 100644
index a201a28..0000000
--- a/echotorch/series_ops.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/series_ops.py
-# Description : Series transformation operations
-# Date : 18th of August, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti, University of Neuchâtel
-# University of Geneva
-
-# Imports
-from .timetensors import TimeTensor
-
-
-# Difference operator
-def diff(
- input: TimeTensor
-) -> TimeTensor:
- r"""The difference operator.
-
- The difference operator compute the difference between the time series :math:`x(t)` at time :math:`t` and :math:`t+1`.
-
- .. :math::
- diff(x) = x(t+1) - x(t)
-
- :param input: input timeseries.
- :type input: ``TimeTensor``
- :return: The difference between :math:`x` at time :math:`t` and :math:`t+1` as a ``TimeTensor``.
-
- Example:
-
- >>> x = echotorch.rand(5, time_length=100)
- >>> df = echotorch.diff(x)
- """
- # Time length must be > 1
- if input.tlen <= 1:
- raise ValueError("The input timeseries must have at least a length equal to 2 (here {})".format(input.tlen))
- # end if
-
- # Construct the indexer
- t_index = [slice(None, None)] * input.bdim
- t_index += [slice(None, -1)]
-
- # Indexers
- tp_index = list(t_index)
- tp_index[input.time_dim] = slice(1, None)
-
- return input[tuple(tp_index)] - input[tuple(t_index)]
-# end diff
-
diff --git a/echotorch/skecho/__init__.py b/echotorch/skecho/__init__.py
deleted file mode 100644
index 6d5fb68..0000000
--- a/echotorch/skecho/__init__.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/utils/esko/__init__.py
-# Description : EchoTorch to Sklearn subpackage init file.
-# Date : 3th of May, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-"""
-Created on 3 May 2021
-@author: Nils Schaetti
-"""
-
-# Imports
-from .esn_regressor import ESNRegressor
-from .esn_classifier import ESNClassifier
-from .esn_predictor import ESNPredictor
-
-# All
-__all__ = [
- 'ESNPredictor', 'ESNClassifier', 'ESNRegressor'
-]
diff --git a/echotorch/skecho/callbacks/__init__.py b/echotorch/skecho/callbacks/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/echotorch/skecho/esn.py b/echotorch/skecho/esn.py
deleted file mode 100644
index f8360c9..0000000
--- a/echotorch/skecho/esn.py
+++ /dev/null
@@ -1,308 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : utils/helpers/ESN.py
-# Description : Helper class for ESN classifier
-# Date : 27th of April, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-# Imports
-
-
-# ESN base class
-class ESN:
- """ESN base class
-
- ESN base class
-
- Parameters
- ----------
-
- Attributes
- ----------
- """
-
- # region CONSTRUCTORS
-
- def __init__(
- self
- ):
- pass
- # end __init__
-
- # endregion CONSTRUCTORS
-
- # region PROPERTIES
-
- # History
- @property
- def history(self):
- """History (getter)
- TODO
- """
- return None
- # end history
-
- # History (setter)
- @history.setter
- def history(self, value):
- """History (setter)
- TODO
- """
- pass
- # end history
-
- # Default callbacks (getter)
- @property
- def _default_callbacks(self):
- """Default callbacks (getter)
- TODO
- """
- return None
- # end _default_callbacks
-
- # endregion PROPERTIES
-
- # region PUBLIC
-
- # Get default callbacks
- def get_default_callbacks(self):
- """Get default callbacks
- TODO
- """
- return None
- # end get_default_callbacks
-
- # Notify
- def notify(self, method_name, **cb_kwargs):
- """Call the callback method specified in ``method_name`` with
- parameters specified in `cb_kwargs``.
-
- Method names can be one of:
- * on_train_begin
- * on_train_end
- * on_batch_begin
- * on_batch_end
-
- """
- # Call the method
- getattr(self, method_name)(self, **cb_kwargs)
-
- # Call each callback
- for _, cb in self.callbacks_:
- getattr(cb, method_name)(self, **cb_kwargs)
- # end for
- # end notify
-
- # On train begin
- def on_train_begin(self, net, X=None, y=None, **kwargs):
- """On train begin
- TODO
- """
- pass
- # end on_train_begin
-
- # On train end
- def on_train_end(self, net, X=None, y=None, **kwargs):
- """On train end
- TODO
- """
- pass
- # end on_train_end
-
- # On batch begin
- def on_batch_begin(self, net, Xi=None, yi=None, training=False, **kwargs):
- """On batch begin
- TODO
- """
- pass
- # end on_batch_begin
-
- # On batch end
- def on_batch_end(self, net, Xi=None, yi=None, training=False, **kwargs):
- """On batch end
- TODO
- """
- pass
- # end on_batch_end
-
- # Initialize callbacks
- def initialize_callbacks(self):
- """Initializes all callbacks and save the results in the
- ``callbacks_`` attribute.
- TODO
- """
- pass
- # end initialize_callbacks
-
- # Initialize module
- def initialize_module(self):
- """Initialize the module.
-
- Note that if the module has learned parameters, those will be
- reset.
- TODO
- """
- pass
- # end initialize_module
-
- # Initialize virtual parameters
- def initialize_virtual_params(self):
- """Initialize virtual parameters
- TODO"""
- pass
- # end initialize_virtual_param
-
- # Initialize history
- def initialize_history(self):
- """Initializes the history.
- TODO"""
- pass
- # end initialize_history
-
- # Initialize
- def initialize(self):
- """Initialize
- TODO"""
- pass
- # end initialize
-
- # Check data
- def check_data(self, X, y=None):
- """Check data
- TODO"""
- pass
- # end check_data
-
- # Train step
- def train_step(self, Xi, yi, **fit_params):
- """TODO"""
- pass
- # end train_step
-
- # Evaluation step
- def evaluation_step(self, Xi, training=False):
- """TODO"""
- pass
- # end evaluation_step
-
- # Fit loop
- def fit_loop(self, X, y=None, **fit_params):
- """Fit loop.
- TODO"""
- pass
- # end fit_loop
-
- # Partial fit
- def partial_fit(self, X, y=None, classes=None, **fit_params):
- """Fit the module. TODO"""
- pass
- # end partial_loop
-
- # Fit
- def fit(self, X, y=None, **fit_params):
- """Initialize and fit the module. TODO"""
- pass
- # end fit
-
- # Check is fitted
- def check_is_fitted(self, attributes=None, *args, **kwargs):
- """Check is fitted. TODO"""
- pass
- # end check_is_fitted
-
- # Forward iter
- def forward_iter(self, X, training=False, device='cpu'):
- """TODO"""
- pass
- # end forward_iter
-
- # Forward
- def forward(self, X, training=False, device='cpu'):
- """TODO"""
- pass
- # end forward
-
- # Infer
- def infer(self, x, **fit_params):
- """Perform a single inference step on a batch of data. TODO"""
- pass
- # end infer
-
- # Predict probabilities
- def predict_proba(self, X):
- """Predict proba. TODO"""
- pass
- # end predict_proba
-
- # Predict
- def predict(self, X):
- """TODO"""
- pass
- # end predict
-
- # Get dataset
- def get_dataset(self, X, y=None):
- """TODO"""
- pass
- # end get_dataset
-
- # endregion PUBLIC
-
- # region PRIVATE
-
- # Yield callbacks
- def _yield_callbacks(self):
- """Yield all callbacks
- TODO
- """
- pass
- # end _yield_callbacks
-
- # Callback grouped by name
- def _callbacks_grouped_by_name(self):
- """Callback grouped by name
- TODO
- """
- pass
- # end _callbacks_grouped_by_name
-
- # Uniquely named callbacks
- def _uniquely_named_callbacks(self):
- """Make sure that the returned dict of named callbacks is unique...
- TODO
- """
- pass
- # end _uniquely_named_callbacks
-
- # Format reinitialisation message
- def _format_reinit_msg(self, name, kwargs=None, triggered_directly=True):
- """Format reinitialisation message
- TODO
- """
- pass
- # end _format_reinit_msg
-
- # Is virtual parameter
- def _is_virtual_param(self, key):
- """Is virtual parameter
- TODO"""
- pass
- # end _is_virtual_param
-
- # endregion PRIVATE
-
-# end ESN
diff --git a/echotorch/skecho/esn_classifier.py b/echotorch/skecho/esn_classifier.py
deleted file mode 100644
index 98a3e80..0000000
--- a/echotorch/skecho/esn_classifier.py
+++ /dev/null
@@ -1,175 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : utils/helpers/ESN.py
-# Description : Helper class for ESN classifier
-# Date : 27th of April, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-# Imports
-from typing import Union, List, Dict
-import torch
-import numpy as np
-import echotorch.nn.reservoir
-from sklearn.base import ClassifierMixin
-
-# Imports local
-from .esn import ESN
-
-
-# ESN classifier, documentation start
-esn_clf_doc_start = """TODO"""
-
-# ESN classifier, additional text
-esn_clf_additional_text = """TODO"""
-
-# ESN classifier, additional attribute
-esn_clf_additional_attribute = """TODO"""
-
-
-# ESN Helper class for classification
-class ESNClassifier(ESN, ClassifierMixin):
- """ESN helper class for classification
-
- ESNClassifier description.
-
- Parameters
- ----------
- input_dim : int
- The size of the input layer.
-
- reservoir_size : int
- The size of the reservoir recurrent layer.
-
- leaky_rate : float
- The leaky integrator parameter.
-
- Attributes
- ----------
- module: torch module (instance)
- The instantiated module.
-
- """
-
- # region CONSTRUCTORS
-
- # Constructors
- def __init__(
- self, input_dim, reservoir_size, *args, leaky_rate=1.0, input_scaling=1.0, nonlin_func=torch.tanh,
- w=None, w_in=None, w_bias=None, learning_algo='inv', ridge_param=0.0, with_bias=True, softmax_output=False,
- washout=0, **kwargs
- ):
- # Create the ESN model
- self.module = echotorch.nn.reservoir.LiESN(
- input_dim=input_dim,
- hidden_dim=reservoir_size,
- leaky_rate=leaky_rate
- )
- # end __init__
-
- # endregion CONSTRUCTORS
-
- # region PROPERTIES
-
- # Default callback
- def _default_callbacks(self):
- """
- TODO
- """
- pass
- # end _default_callbacks
-
- # endregion PROPERTIES
-
- # region PUBLIC
-
- # Initialize and fit the module
- def fit(self, X, y=None, **fit_params):
- """Initialize and fit the module.
-
- If the module was already initialized, by calling fit, the
- module will be re-initialized (unless ``warm_start`` is True).
-
- Parameters
- ----------
- X : input data, compatible with echotorch.datasets.EchoDataset
- By default, you should be able to pass:
-
- * numpy arrays
- * torch tensors
- * echotorch timetensors
- * pandas DataFrame or Series
- * scipy sparse CSR matrices
- * a dictionary of the former three
- * a list/tuple of the former three
- * a Dataset
-
- If this doesn't work with your data, you have to pass a
- ``Dataset`` that can deal with the data.
-
- y : target data, compatible with echotorch.datasets.EchoDataset
- The same data types as for ``X`` are supported. If your X is
- a Dataset that contains the target, ``y`` may be set to
- None.
-
- **fit_params : dict
- @todo
-
- """
- pass
- # end fit
-
- # Return the class labels for samples in X.
- def predict(self, X: Union[np.array, torch.tensor, List, echotorch.datasets.EchoDataset, Dict]):
- """Where applicable, return class labels for samples in X.
-
- If the module's forward method returns multiple outputs as a
- tuple, it is assumed that the first output contains the
- relevant information and the other values are ignored. If all
- values are relevant, consider using
- :func:`~skorch.NeuralNet.forward` instead.
-
- Parameters
- ----------
- X : input data, compatible with skorch.dataset.Dataset
- By default, you should be able to pass:
-
- * numpy arrays
- * torch tensors
- * pandas DataFrame or Series
- * scipy sparse CSR matrices
- * a dictionary of the former three
- * a list/tuple of the former three
- * a Dataset
-
- If this doesn't work with your data, you have to pass a
- ``Dataset`` that can deal with the data.
-
- Returns
- -------
- y_pred : numpy ndarray
-
- """
- pass
- # end predict
-
- # endregion PUBLIC
-
- # region PRIVATE
-
- # endregion PRIVATE
-
-# end ESNClassifier
diff --git a/echotorch/skecho/esn_predictor.py b/echotorch/skecho/esn_predictor.py
deleted file mode 100644
index 0c019c6..0000000
--- a/echotorch/skecho/esn_predictor.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : utils/esko/esn_predictor.py
-# Description : Helper class for ESN prediction
-# Date : 3th of May, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-# Imports
-from typing import Union, List, Dict
-import torch
-import numpy as np
-import echotorch.nn.reservoir
-from sklearn.base import ClassifierMixin
-
-
-# ESN prediction, documentation start
-esn_pred_doc_start = """TODO"""
-
-# ESN prediction, additional text
-esn_pred_additional_text = """TODO"""
-
-# ESN prediction, additional attribute
-esn_pred_additional_attribute = """TODO"""
-
-
-# ESN Helper class for prediction
-class ESNPredictor(ClassifierMixin):
- """ESN helper class for prediction
-
- ESNPredictor description.
-
- Parameters
- ----------
- input_dim : int
- The size of the input layer.
-
- reservoir_size : int
- The size of the reservoir recurrent layer.
-
- leaky_rate : float
- The leaky integrator parameter.
-
- Attributes
- ----------
- module: torch module (instance)
- The instantiated module.
-
- """
-
- # region CONSTRUCTORS
-
- # Constructors
- def __init__(
- self, input_dim, reservoir_size, *args, leaky_rate=1.0, input_scaling=1.0, nonlin_func=torch.tanh,
- w=None, w_in=None, w_bias=None, learning_algo='inv', ridge_param=0.0, with_bias=True, softmax_output=False,
- washout=0, **kwargs
- ):
- # Create the ESN model
- self.module = echotorch.nn.reservoir.LiESN(
- input_dim=input_dim,
- hidden_dim=reservoir_size,
- leaky_rate=leaky_rate
- )
- # end __init__
-
- # endregion CONSTRUCTORS
-
- # region PUBLIC
-
- # Initialize and fit the module
- def fit(self, X, y=None, **fit_params):
- """Initialize and fit the module.
-
- If the module was already initialized, by calling fit, the
- module will be re-initialized (unless ``warm_start`` is True).
-
- Parameters
- ----------
- X : input data, compatible with echotorch.datasets.EchoDataset
- By default, you should be able to pass:
-
- * numpy arrays
- * torch tensors
- * echotorch timetensors
- * pandas DataFrame or Series
- * scipy sparse CSR matrices
- * a dictionary of the former three
- * a list/tuple of the former three
- * a Dataset
-
- If this doesn't work with your data, you have to pass a
- ``Dataset`` that can deal with the data.
-
- y : target data, compatible with echotorch.datasets.EchoDataset
- The same data types as for ``X`` are supported. If your X is
- a Dataset that contains the target, ``y`` may be set to
- None.
-
- **fit_params : dict
- @todo
-
- """
- pass
- # end fit
-
- # Return the class labels for samples in X.
- def predict(self, X: Union[np.array, torch.tensor, List, echotorch.datasets.EchoDataset, Dict]):
- """Where applicable, return class labels for samples in X.
-
- If the module's forward method returns multiple outputs as a
- tuple, it is assumed that the first output contains the
- relevant information and the other values are ignored. If all
- values are relevant, consider using
- :func:`~skorch.NeuralNet.forward` instead.
-
- Parameters
- ----------
- X : input data, compatible with skorch.dataset.Dataset
- By default, you should be able to pass:
-
- * numpy arrays
- * torch tensors
- * pandas DataFrame or Series
- * scipy sparse CSR matrices
- * a dictionary of the former three
- * a list/tuple of the former three
- * a Dataset
-
- If this doesn't work with your data, you have to pass a
- ``Dataset`` that can deal with the data.
-
- Returns
- -------
- y_pred : numpy ndarray
-
- """
- pass
- # end predict
-
- # endregion PUBLIC
-
- # region PRIVATE
-
- # endregion PRIVATE
-
-# end ESNPredictor
diff --git a/echotorch/skecho/esn_regressor.py b/echotorch/skecho/esn_regressor.py
deleted file mode 100644
index 6213dcf..0000000
--- a/echotorch/skecho/esn_regressor.py
+++ /dev/null
@@ -1,174 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : utils/esko/esn_regressor.py
-# Description : Helper class for ESN regressor
-# Date : 3th of May, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-# Imports
-from typing import Union, List, Dict
-import torch
-import numpy as np
-import echotorch.nn.reservoir
-from sklearn.base import ClassifierMixin
-
-
-# ESN regressor, documentation start
-esn_rgs_doc_start = """NeuralNet for regression tasks
- Use this specifically if you have a standard regression task,
- with input data X and target y.
-"""
-
-# ESN regressor, additional text
-esn_rgs_additional_text = """
- criterion : torch criterion (class, default=torch.nn.NLLLoss)
- Negative log likelihood loss. Note that the module should return
- probabilities, the log is applied during ``get_loss``.
- classes : None or list (default=None)
- If None, the ``classes_`` attribute will be inferred from the
- ``y`` data passed to ``fit``. If a non-empty list is passed,
- that list will be returned as ``classes_``. If the initial
- skorch behavior should be restored, i.e. raising an
- ``AttributeError``, pass an empty list."""
-
-# ESN regressor, additional attribute
-esn_rgs_additional_attribute = """classes_ : array, shape (n_classes, )
- A list of class labels known to the regressor.
-"""
-
-
-# ESN Helper class for regression
-class ESNRegressor(ClassifierMixin):
- """ESN helper class for regression
-
- ESNRegressor description.
-
- Parameters
- ----------
- input_dim : int
- The size of the input layer.
-
- reservoir_size : int
- The size of the reservoir recurrent layer.
-
- leaky_rate : float
- The leaky integrator parameter.
-
- Attributes
- ----------
- module: torch module (instance)
- The instantiated module.
-
- """
-
- # region CONSTRUCTORS
-
- # Constructors
- def __init__(
- self, input_dim, reservoir_size, *args, leaky_rate=1.0, input_scaling=1.0, nonlin_func=torch.tanh,
- w=None, w_in=None, w_bias=None, learning_algo='inv', ridge_param=0.0, with_bias=True, softmax_output=False,
- washout=0, **kwargs
- ):
- # Create the ESN model
- self.module = echotorch.nn.reservoir.LiESN(
- input_dim=input_dim,
- hidden_dim=reservoir_size,
- leaky_rate=leaky_rate
- )
- # end __init__
-
- # endregion CONSTRUCTORS
-
- # region PUBLIC
-
- # Initialize and fit the module
- def fit(self, X, y=None, **fit_params):
- """Initialize and fit the module.
-
- If the module was already initialized, by calling fit, the
- module will be re-initialized (unless ``warm_start`` is True).
-
- Parameters
- ----------
- X : input data, compatible with echotorch.datasets.EchoDataset
- By default, you should be able to pass:
-
- * numpy arrays
- * torch tensors
- * echotorch timetensors
- * pandas DataFrame or Series
- * scipy sparse CSR matrices
- * a dictionary of the former three
- * a list/tuple of the former three
- * a Dataset
-
- If this doesn't work with your data, you have to pass a
- ``Dataset`` that can deal with the data.
-
- y : target data, compatible with echotorch.datasets.EchoDataset
- The same data types as for ``X`` are supported. If your X is
- a Dataset that contains the target, ``y`` may be set to
- None.
-
- **fit_params : dict
- @todo
-
- """
- pass
- # end fit
-
- # Return the class labels for samples in X.
- def predict(self, X: Union[np.array, torch.tensor, List, echotorch.datasets.EchoDataset, Dict]):
- """Where applicable, return class labels for samples in X.
-
- If the module's forward method returns multiple outputs as a
- tuple, it is assumed that the first output contains the
- relevant information and the other values are ignored. If all
- values are relevant, consider using
- :func:`~skorch.NeuralNet.forward` instead.
-
- Parameters
- ----------
- X : input data, compatible with skorch.dataset.Dataset
- By default, you should be able to pass:
-
- * numpy arrays
- * torch tensors
- * pandas DataFrame or Series
- * scipy sparse CSR matrices
- * a dictionary of the former three
- * a list/tuple of the former three
- * a Dataset
-
- If this doesn't work with your data, you have to pass a
- ``Dataset`` that can deal with the data.
-
- Returns
- -------
- y_pred : numpy ndarray
-
- """
- pass
- # end predict
-
- # endregion PUBLIC
-
- # region PRIVATE
-
- # endregion PRIVATE
-
-# end ESNRegressor
diff --git a/echotorch/stat_ops.py b/echotorch/stat_ops.py
deleted file mode 100644
index dcebf62..0000000
--- a/echotorch/stat_ops.py
+++ /dev/null
@@ -1,312 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/stat_ops.py
-# Description : Statistical operations on (Time/Data/*)Tensor-
-# Date : 16th of August, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-
-# Imports
-from typing import Optional, Tuple, Union
-import math
-from scipy.stats import t
-
-# Torch
-import torch
-from torch import Tensor, mean, mm, std, var, sum
-
-# Import local
-from .base_ops import zeros
-from .timetensors import TimeTensor
-
-
-# Summation over time dimension
-def tsum(
- input: TimeTensor
-) -> Tensor:
- r"""Returns the sum over time dimension of all elements in the ``input`` timetensor.
-
- :param input: the input timetensor.
- :type input: ``TimeTensor``
- """
- return sum(input, dim=input.time_dim)
-# end tsum
-
-
-# Quantile
-def tquantile(
- input: TimeTensor,
- q
-) -> Tensor:
- r"""Computes the :math:`q`-th quantiles of each row of the ``input`` timetensor along the time dimension.
-
- .. note::
- From Torch documentation:
- To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location of the quantile in the sorted input. If the quantile lies between two data points a < b with indices i and j in the sorted order, result is computed using linear interpolation as follows:
-
- a + (b - a) * fraction, where fraction is the fractional part of the computed quantile index.
-
- If q is a 1D tensor, the first dimension of the output represents the quantiles and has size equal to the size of q, the remaining dimensions are what remains from the reduction.
-
- :param input: the input timetensor.
- :type input: ``TimeTensor``
-
- Example:
-
- >>> x = echotorch.randn(5, time_length=100)
- """
-
-
-# Average over time dimension
-def tmean(
- input: TimeTensor
-) -> Tensor:
- r"""Returns the mean value over time dimension of all elements in the ``input`` timetensor.
-
- :param input: the input timetensor.
- :type input: ``TimeTensor``
- """
- return mean(input, dim=input.time_dim)
-# end tmean
-
-
-# Standard deviation over time dimension
-def tstd(
- input: TimeTensor,
- unbiased: bool = True
-) -> Tensor:
- r"""Returns the standard deviation over time dimension of all elements in the ``input`` timetensor.
-
- :param input: the input timetensor.
- :type input: ``TimeTensor``
- :param unbiased: whether to used Bessel's correction (:math:`\delta N = 1`)
- :type unbiased: bool
-
- Example:
-
- >>> x = echotorch.rand(5, time_length=10)
- >>> echotorch.tstd(x)
- tensor([0.2756, 0.2197, 0.2963, 0.2962, 0.2853])
- """
- return std(input, dim=input.time_dim, unbiased=unbiased)
-# end tstd
-
-
-# Variance over time dimension
-def tvar(
- input: TimeTensor,
- unbiased: bool = True
-) -> Tensor:
- r"""Returns the variance over time dimension of all elements in the ``input`` timetensor.
-
- :param input: the input timetensor.
- :type input: ``TimeTensor``
- :param unbiased: whether to used Bessel's correction (:math:`\delta N = 1`)
- :type unbiased: bool
-
- Example:
-
- >>> x = echotorch.rand(5, time_length=10)
- >>> echotorch.tvar(x)
- tensor([0.0726, 0.0542, 0.0754, 0.0667, 0.0675])
-
- """
- return var(input, dim=input.time_dim, unbiased=unbiased)
-# end tvar
-
-
-# Correlation matrix
-def cor(
- t1: TimeTensor,
- t2: Optional[TimeTensor] = None,
- bias: Optional[bool] = False,
- ddof: Optional[int] = None,
- pvalue: Optional[bool] = False
-) -> Union[Tensor, Tuple[Tensor, Tensor]]:
- r"""Returns the correlation matrix between two 1-D timeseries, ``x`` and ``y``, with the same number of channels.
-
- As the size of the two timetensors is :math:`(T, p)`, the returned
- matrix :math:`R` has a size :math:`(p, p)`. Each element :math:`R_{ij}` of
- matrix :math:`R` is the correlation :math:`Cor(x_i, y_i)` between :math:`x_i` and :math:`y_i`,
- such that,
-
- .. math::
- :nowrap:
-
- $$
- R =
- \begin{pmatrix}
- Cor(x_{1}, y_{1}) & Cor(x_{1}, y_{2}) & \cdots & Cor(x_{1}, y_{p}) \\
- Cor(x_{2}, y_{1}) & \ddots & \cdots & \vdots \\
- \vdots & \vdots & \ddots & \vdots \\
- Cor(x_{p}, y_{1}) & \cdots & \cdots & Cor(x_{p}, y_{p})
- \end{pmatrix}
- $$
-
- where :math:`p` is the number of channels.
-
- :param t1: first timetensor containing the uni or multivariate timeseries. The time dimension should be at position 0.
- :type t1: ``TimeTensor``
- :param t2: An additional ``TimeTensor`` with same shape and time length. If ``None``, the auto-correlation of *t1* is returned.
- :type t2: ``TimeTensor``, optional
- :param bias: Default normalization (False) is by :math:`(N - 1)`, where :math:`N` is the number of observations given (unbiased) or length of the timeseries. If *bias* is True, then normalization is by :math:`N`. These values can be overriden by using the keyword *ddof*.
- :type bias: ``bool``, optional
- :param ddof: If not *None* the default value implied by *bias* is overridden. Not that ``ddof=1`` will return the unbiased estimate and ``ddof=0`` will return the simple average.
- :type ddof: ``int``
- :param pvalue: Return also the p-value from a Pearson significant test.
- :type pvalue: ``bool``
- :return: The correlation matrix of the two timeseries with the time dimension as samples.
- :rtype: ``Tensor``
-
- Example:
-
- >>> x = echotorch.randn(5, time_length=100)
- >>> y = echotorch.randn(5, time_length=100)
- >>> echotorch.cor(x, y)
- tensor([[-0.1257, 0.3849, -0.2094, -0.2107, 0.1781],
- [-0.0990, -0.5916, 0.3169, -0.1333, -0.0315],
- [ 0.0443, -0.0571, -0.2228, -0.3075, 0.0995],
- [ 0.2477, -0.5867, 0.4337, -0.2673, 0.0725],
- [ 0.2607, 0.4544, 0.5199, 0.2562, 0.4110]])
- """
- # Get covariance matrix
- cov_m = cov(t1, t2, bias, ddof)
-
- # Get sigma for t1 and t2
- t1_std = torch.unsqueeze(tstd(t1), dim=1)
- t2_std = torch.unsqueeze(tstd(t2), dim=0)
-
- # Inner product of s(t1) and s(t2)
- t_inner = torch.mm(t1_std, t2_std)
-
- # Correlation coefficients
- corr_coefs = torch.divide(cov_m, t_inner)
-
- # Return coef (and p-value)
- if pvalue:
- # Compute t-value
- t_values = torch.divide(
- corr_coefs * math.sqrt(t1.tlen - 2),
- torch.sqrt(1 - torch.pow(corr_coefs, 2))
- )
-
- # Compute p-values
- # TODO: replace with in-house student distrib
- pvals = 2.0 * t.cdf(-torch.abs(t_values).numpy(), df=t1.tlen-2)
-
- # Return coef + pvalues
- return corr_coefs, torch.tensor(pvals)
- else:
- return corr_coefs
- # end if
-# end cor
-
-
-# Covariance matrix
-def cov(
- t1: TimeTensor,
- t2: Optional[TimeTensor] = None,
- bias: Optional[bool] = False,
- ddof: Optional[int] = None
-) -> Tensor:
- r"""Returns the covariance matrix of two 1-D or 0-D timeseries with the same number of channels.
-
- As the size of the two timetensors is :math:`(T, p)`, the returned
- matrix :math:`C` has a size :math:`(p, p)`. Each element :math:`C_{ij}` of
- matrix :math:`C` is the covariance :math:`Cov(x_i, y_i)` between :math:`x_i` and :math:`y_i`,
- such that,
-
- .. math::
- :nowrap:
-
- $$
- C =
- \begin{pmatrix}
- \sigma_{x_{1}y_{1}} & \sigma_{x_{1}y_{2}} & \cdots & \sigma_{x_{1}y_{p}} \\
- \sigma_{x_{2}y_{1}} & \ddots & \cdots & \vdots \\
- \vdots & \vdots & \ddots & \vdots \\
- \sigma_{x_{p}y_{1}} & \cdots & \cdots & \sigma_{x_{p}y_{p}}
- \end{pmatrix}
- $$
-
- where :math:`p` is the number of channels.
-
- :param t1: first timetensor containing the uni or multivariate timeseries. The time dimension should be at position 0.
- :type t1: ``TimeTensor``
- :param t2: An additional ``TimeTensor`` with same shape and time length. If ``None``, the auto-covariance matrix of *t1* is returned.
- :type t2: ``TimeTensor``, optional
- :param bias: Default normalization (False) is by :math:`(N - 1)`, where :math:`N` is the number of observations given (unbiased) or length of the timeseries. If *bias* is True, then normalization is by :math:`N`. These values can be overriden by using the keyword *ddof*.
- :type bias: ``bool``, optional
- :param ddof: If not *None* the default value implied by *bias* is overriden. Not that ``ddof=1`` will return the unbiased estimate and ``ddof=0`` will return the simple average.
- :return: The covariance matrix of the two timeseries with the time dimension as samples.
- :rtype: ``Tensor``
-
- Example:
- >>> x = echotorch.randn(5, time_length=100)
- >>> y = echotorch.randn(5, time_length=100)
- >>> echotorch.cov(x, y)
- tensor([[-0.0754, -0.0818, -0.0063, -0.0484, 0.0499],
- [ 0.0290, 0.2155, 0.0735, 0.2179, -0.0991],
- [ 0.0117, 0.0356, -0.0438, 0.0088, -0.0487],
- [ 0.0080, 0.0390, -0.0212, 0.0773, 0.1014],
- [-0.1000, -0.0774, 0.0011, 0.0819, -0.0735]])
- """
- # Check that t1 and t2 have the time dim at pos 0
- if t1.time_dim != 0 or t2.time_dim != 0:
- raise ValueError(
- "Expected two timeseries with time dimension first (here {} and {}".format(t1.time_dim, t2.time_dim)
- )
- # end if
-
- # Check that t1 and t2 have the same time length
- if t1.tlen != t2.tlen:
- raise ValueError(
- "Expected two timeseries with same time lengths (here {} != {})".format(t1.tlen, t2.tlen)
- )
- # end if
-
- # Only 1-D or 0-D timetensors
- if t1.cdim > 1 or t2.cdim > 1 or t1.cdim != t2.cdim:
- raise ValueError(
- "Expected 1-D or 0-D timeseries, with same shape, but got {} and {}".format(t1.cdim, t2.cdim)
- )
- # end if
-
- # If 0-D, transform in 1-D
- if t1.cdim == 0:
- t1 = torch.unsqueeze(t1, dim=t1.time_dim+1)
- t2 = torch.unsqueeze(t2, dim=t2.time_dim + 1)
- # end if
-
- # Compute means
- t1_mean = tmean(t1)
- t2_mean = tmean(t2)
-
- # Bias value
- if ddof is None:
- add_bias = 1
- if bias:
- add_bias = 0
- # end if
- else:
- add_bias = ddof
- # end if
-
- # Compute covariance
- return mm((t1 - t1_mean).t(), t2 - t2_mean) / (t1.tlen - add_bias)
-# end cov
-
diff --git a/echotorch/cuda/__init__.py b/echotorch/timetensor.py
similarity index 100%
rename from echotorch/cuda/__init__.py
rename to echotorch/timetensor.py
diff --git a/echotorch/timetensors.py b/echotorch/timetensors.py
deleted file mode 100644
index fefd74d..0000000
--- a/echotorch/timetensors.py
+++ /dev/null
@@ -1,1789 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/timetensor.py
-# Description : A special tensor with a time dimension
-# Date : 25th of January, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-# Imports
-from typing import Optional, Tuple, Union, List, Callable, Any
-import torch
-import numpy as np
-import warnings
-
-# EchoTorch imports
-import echotorch
-from .base_tensors import BaseTensor
-
-
-# Error
-ERROR_TENSOR_TO_SMALL = "Time dimension does not exists in the data tensor " \
- "(time dim at {}, {} dimension in tensor). The minimum tensor size " \
- "is {}"
-ERROR_TIME_LENGTHS_TOO_BIG = "There is time lengths which are bigger than the actual tensor data"
-ERROR_WRONG_TIME_LENGTHS_SIZES = "The sizes of the time lengths tensor should be {}"
-ERROR_TIME_DIM_NEGATIVE = "The index of the time-dimension cannot be negative"
-
-# Torch overridable methods
-# TORCH_OPS = [
-# 'abs', 'absolute', 'adaptive_avg_pool1d', 'adaptive_max_pool1d', 'acos', 'arccos', 'acosh', 'arccosh', 'add',
-# 'addbmm', 'addcdiv', 'addcmul', 'addmm', 'addmv', 'addr', 'affine_grid_generator', 'all', 'allclose',
-# 'alpha_dropout', 'amax', 'amin', 'angle', 'any', 'argmax', 'argmin', 'argsort', 'asin', '_assert_async', 'arcsin',
-# 'asinh', 'arcsinh', 'atan', 'arctan', 'atan2', 'atanh', 'arctanh', 'atleast_1d', 'atleast_2d', 'atleast_3d',
-# 'avg_pool1d', 'baddbmm', 'batch_norm', 'batch_norm_backward_elemt', 'batch_norm_backward_reduce',
-# 'batch_norm_elemt', 'batch_norm_gather_stats', 'batch_norm_gather_stats_with_counts', 'batch_norm_stats',
-# 'batch_norm_update_stats', 'bernoulli', 'bilinear', 'binary_cross_entropy_with_logits', 'bincount', 'binomial',
-# 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'block_diag', 'bmm', 'broadcast_tensors',
-# 'broadcast_to', 'bucketize', 'cartesian_prod', 'cat', 'cdist', 'ceil', 'celu', 'chain_matmul', 'channel_shuffle',
-# 'cholesky', 'linalg_cholesky', 'linalg_cholesky_ex', 'cholesky_inverse', 'cholesky_solve',
-# 'choose_qparams_optimized', 'chunk', 'clamp', 'clip', 'clamp_min', 'clamp_max', 'column_stack', 'clone',
-# 'combinations', 'complex', 'copysign', 'polar', 'linalg_cond', 'conj', 'constant_pad_nd', 'conv1d', 'conv2d',
-# 'conv3d', 'convolution', 'conv_tbc', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d', 'cos',
-# 'cosine_embedding_loss', 'cosh', 'cosine_similarity', 'count_nonzero', 'cross', 'ctc_loss', 'cummax', 'cummin',
-# 'cumprod', 'cumsum', 'logcumsumexp', 'deg2rad', 'dequantize', 'det', 'linalg_det', 'detach', 'diag', 'diag_embed',
-# 'diagflat', 'diff', 'diagonal', 'digamma', 'dist', 'div', 'divide', 'dot', 'dropout', 'dsmm', 'hsmm', 'dsplit',
-# 'dstack', 'eig', 'linalg_eig', 'linalg_eigvals', 'linalg_eigh', 'linalg_eigvalsh', 'einsum', 'embedding',
-# 'embedding_bag', 'empty_like', 'eq', 'equal', 'erf', 'erfc', 'erfinv', 'exp', 'exp2', 'expm1',
-# 'fake_quantize_per_channel_affine', 'fake_quantize_per_tensor_affine', 'fbgemm_linear_fp16_weight',
-# 'fbgemm_linear_fp16_weight_fp32_activation', 'fbgemm_linear_int8_weight',
-# 'fbgemm_linear_int8_weight_fp32_activation', 'fbgemm_linear_quantize_weight', 'fbgemm_pack_gemm_matrix_fp16',
-# 'fbgemm_pack_quantized_matrix', 'feature_alpha_dropout', 'feature_dropout', 'fft_fft', 'fft_ifft', 'fft_rfft',
-# 'fft_irfft', 'fft_hfft', 'fft_ihfft', 'fft_fftn', 'fft_ifftn', 'fft_rfftn', 'fft_irfftn', 'fft_fft2', 'fft_ifft2',
-# 'fft_rfft2', 'fft_irfft2', 'fft_fftshift', 'fft_ifftshift', 'fix', 'flatten', 'flip', 'fliplr', 'flipud',
-# 'frobenius_norm', 'floor', 'floor_divide', 'float_power', 'fmod', 'frac', 'frexp', 'full_like', 'lu_unpack',
-# 'gather', 'gcd', 'ge', 'greater_equal', 'geqrf', 'i0', 'inner', 'outer', 'ger', 'gradient', 'grid_sampler',
-# 'grid_sampler_2d', 'grid_sampler_3d', 'group_norm', 'gru', 'gru_cell', 'gt', 'greater', 'hardshrink', 'heaviside',
-# 'hinge_embedding_loss', 'histc', 'linalg_householder_product', 'hsplit', 'hstack', 'hypot', 'igamma', 'igammac',
-# 'imag', 'index_add', 'index_copy', 'index_put', 'index_select', 'index_fill', 'isfinite', 'isinf', 'isreal',
-# 'isposinf', 'isneginf', 'instance_norm', 'int_repr', 'inverse', 'linalg_inv', 'linalg_inv_ex', 'is_complex',
-# 'is_distributed', 'is_floating_point', 'is_nonzero', 'is_same_size', 'is_signed', 'isclose', 'isnan', 'istft',
-# 'kl_div', 'kron', 'kthvalue', 'layer_norm', 'lcm', 'ldexp', 'le', 'less_equal', 'lerp', 'lgamma', 'lobpcg', 'log',
-# 'log_softmax', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logdet', 'xlogy', 'logical_and',
-# 'logical_not', 'logical_or', 'logical_xor', 'logsumexp', 'logit', 'lstm', 'lstm_cell', 'lstsq', 'lt', 'less',
-# 'lu', 'lu_solve', 'margin_ranking_loss', 'masked_fill', 'masked_scatter', 'masked_select', 'matmul',
-# 'matrix_power', 'linalg_matrix_power', 'matrix_rank', 'linalg_matrix_rank', 'linalg_multi_dot', 'matrix_exp',
-# 'max', 'maximum', 'fmax', 'max_pool1d', 'max_pool2d', 'max_pool3d', 'max_pool1d_with_indices', 'mean', 'median',
-# 'nanmedian', 'meshgrid', 'min', 'minimum', 'fmin', 'miopen_batch_norm', 'miopen_convolution',
-# 'miopen_convolution_transpose', 'miopen_depthwise_convolution', 'miopen_rnn', 'mode', 'movedim', 'moveaxis',
-# 'msort', 'mul', 'multiply', 'multinomial', 'mv', 'mvlgamma', 'narrow', 'narrow_copy', 'nan_to_num',
-# 'native_batch_norm', 'native_layer_norm', 'native_group_norm', 'native_norm', 'ne', 'not_equal', 'neg',
-# 'negative', 'nextafter', 'adaptive_avg_pool2d', 'adaptive_avg_pool3d', 'adaptive_max_pool1d',
-# 'adaptive_max_pool1d_with_indices', 'adaptive_max_pool2d', 'adaptive_max_pool2d_with_indices',
-# 'adaptive_max_pool3d', 'adaptive_max_pool3d_with_indices', 'affine_grid', 'alpha_dropout', 'avg_pool2d',
-# 'avg_pool3d', 'batch_norm', 'bilinear', 'binary_cross_entropy', 'binary_cross_entropy_with_logits', 'celu',
-# 'cosine_embedding_loss', 'cross_entropy', 'ctc_loss', 'dropout', 'dropout2d', 'dropout3d', 'elu', 'embedding',
-# 'embedding_bag', 'feature_alpha_dropout', 'fold', 'fractional_max_pool2d', 'fractional_max_pool2d_with_indices',
-# 'fractional_max_pool3d', 'fractional_max_pool3d_with_indices', 'gaussian_nll_loss', 'gelu', 'glu', 'grid_sample',
-# 'group_norm', 'gumbel_softmax', 'hardshrink', 'hardtanh', 'hinge_embedding_loss', 'instance_norm', 'interpolate',
-# 'kl_div', 'l1_loss', 'layer_norm', 'leaky_relu', 'linear', 'local_response_norm', 'log_softmax', 'log_sigmoid',
-# 'lp_pool1d', 'lp_pool2d', 'margin_ranking_loss', 'max_pool1d', 'max_pool1d_with_indices', 'max_pool2d',
-# 'max_pool2d_with_indices', 'max_pool3d', 'max_pool3d_with_indices', 'max_unpool1d', 'max_unpool2d', 'max_unpool3d',
-# 'mse_loss', 'multi_head_attention_forward', 'multi_margin_loss', 'multilabel_margin_loss',
-# 'multilabel_soft_margin_loss', 'nll_loss', 'normalize', 'one_hot', '_pad', 'pairwise_distance',
-# 'poisson_nll_loss', 'prelu', 'relu', 'relu6', 'rrelu', 'selu', 'silu', 'mish', 'smooth_l1_loss', 'huber_loss',
-# 'soft_margin_loss', 'softmax', 'softmin', 'softplus', 'softshrink', 'softsign', 'tanhshrink', '_threshold',
-# 'triplet_margin_loss', 'triplet_margin_with_distance_loss', 'unfold', 'nonzero', 'norm', 'linalg_norm',
-# 'linalg_vector_norm', 'linalg_matrix_norm', 'norm_except_dim', 'nuclear_norm', 'numel', 'orgqr', 'ormqr',
-# 'pairwise_distance', 'permute', 'pca_lowrank', 'pdist', 'pinverse', 'linalg_pinv', 'pixel_shuffle',
-# 'pixel_unshuffle', 'poisson', 'poisson_nll_loss', 'polygamma', 'positive', 'prelu', 'ones_like', 'pow', 'prod',
-# 'put', 'q_per_channel_axis', 'q_per_channel_scales', 'q_per_channel_zero_points', 'q_scale', 'q_zero_point', 'qr',
-# 'linalg_qr', 'quantile', 'nanquantile', 'quantize_per_channel', 'quantize_per_tensor', 'quantized_batch_norm',
-# 'quantized_gru_cell', 'quantized_lstm_cell', 'quantized_max_pool1d', 'quantized_max_pool2d',
-# 'quantized_rnn_relu_cell', 'quantized_rnn_tanh_cell', 'rad2deg', 'rand_like', 'randint_like', 'randn_like',
-# 'ravel', 'real', 'vdot', 'view_as_real', 'view_as_complex', 'reciprocal', 'relu', 'remainder', 'renorm',
-# 'repeat_interleave', 'reshape', 'rnn_relu', 'rnn_relu_cell', 'rnn_tanh', 'rnn_tanh_cell', 'roll', 'rot90',
-# 'round', 'row_stack', '_rowwise_prune', 'rrelu', 'rsqrt', 'rsub', 'saddmm', 'scatter', 'scatter_add',
-# 'searchsorted', 'segment_reduce', 'select', 'selu', 'sigmoid', 'sign', 'signbit', 'sgn', 'sin', 'sinc', 'sinh',
-# 'slogdet', 'linalg_slogdet', 'smm', 'softmax', 'solve', 'linalg_solve', 'sort', 'split', 'split_with_sizes',
-# 'sqrt', 'square', 'squeeze', 'stack', 'std', 'std_mean', 'stft', 'sub', 'subtract', 'sum', 'nansum', 'svd',
-# 'svd_lowrank', 'linalg_svd', 'linalg_svdvals', 'symeig', 'swapaxes', 'swapdims', 'special_entr', 'special_erf',
-# 'special_erfc', 'special_erfinv', 'special_exp2', 'special_expm1', 'special_expit', 'special_gammaln',
-# 'special_i0e', 'special_logit', 'special_xlog1py', 't', 'take', 'take_along_dim', 'tan', 'tanh',
-# 'linalg_tensorinv', 'linalg_tensorsolve', 'tensordot', 'tensor_split', 'threshold', 'tile', 'topk', 'trace',
-# 'transpose', 'trapz', 'triangular_solve', 'tril', 'triplet_margin_loss', 'triu', 'true_divide', 'trunc', 'unbind',
-# 'unique', 'unique_consecutive', 'unsafe_chunk', 'unsafe_split', 'unsafe_split_with_sizes', 'unsqueeze', 'var',
-# 'var_mean', 'vsplit', 'vstack', 'where', 'zeros_like', '__floordiv__', '__rfloordiv__', '__ifloordiv__',
-# '__truediv__', '__rdiv__', '__idiv__', '__lshift__', '__ilshift__', '__rshift__', '__irshift__', '__float__',
-# '__complex__', '__array__', '__bool__', '__contains__', 'neg', '__invert__', '__mod__', '__imod__',
-# '__array_wrap__', '__getitem__', '__deepcopy__', '__int__', '__long__', '__hash__', '__index__', '__len__',
-# '__format__', '__reduce_ex__', '__reversed__', '__repr__', '__setitem__', '__setstate__', '__get__', 'type',
-# '_coalesced_', '_dimI', '_dimV', '_indices', '_is_view', '_nnz', 'crow_indices', 'col_indices',
-# '_update_names', '_values', 'align_as', 'align_to', 'apply_', 'as_strided', 'as_strided_', 'backward', 'bfloat16',
-# 'bool', 'byte', 'char', 'cauchy_', 'coalesce', 'contiguous', 'copy_', 'cpu', 'cuda', 'xpu', 'data_ptr',
-# 'dense_dim', 'dim', 'double', 'cdouble', 'element_size', 'expand', 'expand_as', 'exponential_', 'fill_',
-# 'fill_diagonal_', 'float', 'cfloat', 'geometric_', 'get_device', 'half', 'has_names', 'indices', 'int',
-# 'is_coalesced', 'is_contiguous', 'is_pinned', 'is_set_to', 'is_shared', 'item', 'log_normal_', 'log_softmax',
-# 'long', 'map_', 'map2_', 'mm', 'narrow_copy', 'ndimension', 'nelement', 'normal_', 'numpy', 'permute',
-# 'pin_memory', 'put_', 'qscheme', 'random_', 'record_stream', 'refine_names', 'register_hook', 'rename', 'repeat',
-# 'requires_grad_', 'reshape_as', 'resize', 'resize_', 'resize_as', 'retain_grad', 'set_', 'share_memory_',
-# 'short', 'size', 'sparse_dim', 'sparse_mask', 'sparse_resize_', 'sparse_resize_and_clear_', 'sspaddmm', 'storage',
-# 'storage_offset', 'storage_type', 'sum_to_size', 'tile', 'to', 'to_dense', 'to_sparse', 'tolist', 'to_mkldnn',
-# 'type_as', 'unfold', 'uniform_', 'values', 'view', 'view_as', 'zero_', 'linalg_lstsq', 'abs', 'abs_', 'absolute',
-# 'absolute_', 'acos', 'acos_', 'arccos', 'arccos_', 'acosh', 'acosh_', 'arccosh', 'arccosh_', 'add', 'add_',
-# '__add__', '__iadd__', '__radd__', 'addbmm', 'addbmm_', 'addcdiv', 'addcdiv_', 'addcmul', 'addcmul_', 'addmm',
-# 'addmm_', 'addmv', 'addmv_', 'addr', 'addr_', 'all', 'allclose', 'amax', 'amin', 'angle', 'any', 'argmax',
-# 'argmin', 'argsort', 'asin', 'asin_', 'arcsin', 'arcsin_', 'asinh', 'asinh_', 'arcsinh', 'arcsinh_', 'atan',
-# 'atan_', 'arctan', 'arctan_', 'atan2', 'atan2_', 'atanh', 'atanh_', 'arctanh', 'arctanh_', 'baddbmm', 'baddbmm_',
-# 'bernoulli', 'bernoulli_', 'bincount', 'bitwise_and', 'bitwise_and_', '__and__', '__iand__', 'bitwise_not',
-# 'bitwise_not_', 'bitwise_or', 'bitwise_or_', '__or__', '__ior__', 'bitwise_xor', 'bitwise_xor_', '__xor__',
-# '__ixor__', 'bmm', 'broadcast_to', 'ceil', 'ceil_', 'cholesky', 'cholesky_inverse', 'cholesky_solve', 'chunk',
-# 'clamp', 'clamp_', 'clip', 'clip_', 'clamp_min', 'clamp_min_', 'clamp_max', 'clamp_max_', 'clone', 'copysign',
-# 'copysign_', 'conj', 'cos', 'cos_', 'cosh', 'cosh_', 'count_nonzero', 'cross', 'cummax', 'cummin', 'cumprod',
-# 'cumprod_', 'cumsum', 'cumsum_', 'logcumsumexp', 'deg2rad', 'deg2rad_', 'dequantize', 'det', 'detach', 'detach_',
-# 'diag', 'diag_embed', 'diagflat', 'diff', 'diagonal', 'digamma', 'digamma_', 'dist', 'div', 'div_', '__div__',
-# 'divide', 'divide_', 'dot', 'dsplit', 'eig', 'eq', 'eq_', '__eq__', 'equal', 'erf', 'erf_', 'erfc', 'erfc_',
-# 'erfinv', 'erfinv_', 'exp', 'exp_', 'exp2', 'exp2_', 'expm1', 'expm1_', 'fix', 'fix_', 'flatten', 'flip', 'fliplr',
-# 'flipud', 'floor', 'floor_', 'floor_divide', 'floor_divide_', 'float_power', 'float_power_', 'fmod', 'fmod_',
-# 'frac', 'frac_', 'frexp', 'gather', 'gcd', 'gcd_', 'ge', 'ge_', '__ge__', 'greater_equal', 'greater_equal_',
-# 'geqrf', 'i0', 'i0_', 'inner', 'outer', 'ger', 'gt', 'gt_', '__gt__', 'greater', 'greater_', 'hardshrink',
-# 'heaviside', 'heaviside_', 'histc', 'hsplit', 'hypot', 'hypot_', 'igamma', 'igamma_', 'igammac', 'igammac_',
-# 'index_add', 'index_add_', 'index_copy', 'index_copy_', 'index_put', 'index_put_', 'index_select', 'index_fill',
-# 'index_fill_', 'isfinite', 'isinf', 'isreal', 'isposinf', 'isneginf', 'int_repr', 'inverse', 'is_complex',
-# 'is_distributed', 'is_floating_point', 'is_nonzero', 'is_same_size', 'is_signed', 'isclose', 'isnan', 'istft',
-# 'kron', 'kthvalue', 'lcm', 'lcm_', 'ldexp', 'ldexp_', 'le', 'le_', '__le__', 'less_equal', 'less_equal_',
-# 'lerp', 'lerp_', 'lgamma', 'lgamma_', 'log', 'log_', 'log10', 'log10_', 'log1p', 'log1p_', 'log2', 'log2_',
-# 'logaddexp', 'logaddexp2', 'logdet', 'xlogy', 'xlogy_', 'logical_and', 'logical_and_', 'logical_not',
-# 'logical_not_', 'logical_or', 'logical_or_', 'logical_xor', 'logical_xor_', 'logsumexp', 'logit', 'logit_',
-# 'lstsq', 'lt', 'lt_', '__lt__', 'less', 'less_', 'lu', 'lu_solve', 'masked_fill', 'masked_fill_',
-# 'masked_scatter', 'masked_scatter_', 'masked_select', 'matmul', '__matmul__', 'matrix_power', 'matrix_exp',
-# 'max', 'maximum', 'fmax', 'mean', 'median', 'nanmedian', 'min', 'minimum', 'fmin', 'mode', 'movedim', 'moveaxis',
-# 'msort', 'mul', 'mul_', '__mul__', '__imul__', '__rmul__', 'multiply', 'multiply_', 'multinomial', 'mv',
-# 'mvlgamma', 'mvlgamma_', 'narrow', 'nan_to_num', 'nan_to_num_', 'ne', 'ne_', '__ne__', 'not_equal', 'not_equal_',
-# 'neg_', 'negative', 'negative_', 'nextafter', 'nextafter_', 'prelu', 'relu', 'relu_', 'softmax', 'nonzero',
-# '__nonzero__', 'norm', 'numel', 'orgqr', 'ormqr', 'pinverse', 'polygamma', 'polygamma_', 'positive', 'pow',
-# 'pow_', '__ipow__', '__rpow__', 'prod', 'put', 'q_per_channel_axis', 'q_per_channel_scales',
-# 'q_per_channel_zero_points', 'q_scale', 'q_zero_point', 'qr', 'quantile', 'nanquantile', 'rad2deg', 'rad2deg_',
-# 'ravel', 'vdot', 'reciprocal', 'reciprocal_', 'remainder', 'remainder_', 'renorm', 'renorm_',
-# 'repeat_interleave', 'reshape', 'roll', 'rot90', 'round', 'round_', 'rsqrt', 'rsqrt_', '__rsub__', 'scatter',
-# 'scatter_', 'scatter_add', 'scatter_add_', 'select', 'sigmoid', 'sigmoid_', 'sign', 'sign_', 'signbit', 'sgn',
-# 'sgn_', 'sin', 'sin_', 'sinc', 'sinc_', 'sinh', 'sinh_', 'slogdet', 'smm', 'solve', 'sort', 'split',
-# 'split_with_sizes', 'sqrt', 'sqrt_', 'square', 'square_', 'squeeze', 'squeeze_', 'std', 'stft', 'sub', 'sub_',
-# '__sub__', '__isub__', 'subtract', 'subtract_', 'sum', 'nansum', 'svd', 'symeig', 'swapaxes', 'swapaxes_',
-# 'swapdims', 'swapdims_', 't', 't_', 'take', 'take_along_dim', 'tan', 'tan_', 'tanh', 'tanh_', 'tensor_split',
-# 'topk', 'trace', 'transpose', 'transpose_', 'triangular_solve', 'tril', 'tril_', 'triu', 'triu_', 'true_divide',
-# 'true_divide_', 'trunc', 'trunc_', 'unbind', 'unique', 'unique_consecutive', 'unsafe_chunk', 'unsafe_split',
-# 'unsafe_split_with_sizes', 'unsqueeze', 'unsqueeze_', 'var', 'vsplit', 'where', 'rename_', 'resize_as_'
-# ]
-
-# Torch ops which can be directly converted to timetensors
-TORCH_OPS_DIRECT = [
- # Indexing, etc
- 'cat', 'chunk', 'dsplit', 'column_stack', 'gather', 'hsplit', 'hstack', 'index_select', 'narrow', 'scatter',
- 'scatter_add', "split", 'tensor_split', 'tile', 'vsplit', 'where',
- # Pointwise operations,
- 'abs', 'absolute', 'acos', 'arccos', 'acosh', 'arccosh', 'add', 'addcdiv', 'addcmul', 'angle', 'asin', 'arcsin',
- 'asinh', 'arcsinh', 'atan', 'arctan', 'atanh', 'arctanh', 'atan2', 'bitwose_not', 'bitwise_and', 'bitwise_or',
- 'bitwise_xor', 'ceil', 'clamp', 'clip', 'conj', 'copysign', 'cos', 'cosh', 'deg2rad', 'div', 'divide', 'digamma',
- 'erf', 'erfc', 'erfinv', 'exp', 'exp2', 'expm1', # fake_quantize_per_channel_affine, fake_quantize_per_tensor_affine,
- 'fix', 'float_power', 'floor', 'floor_divide', 'fmod', 'frac', 'frexp', 'gradient', 'imag', 'ldexp', 'lerp',
- 'lgamma', 'log', 'log10', 'log1p', 'log2', 'logit', # hypot
- 'i0', 'igamma', 'igammac', 'mul', 'multiply', 'mvlgamma', 'nan_to_num', 'neg', 'negative', # nextafter
- # logaddexp, logaddexp2
- 'logical_and', 'logical_not', 'logical_or', 'logical_xor',
- # Other operation
- 'atleast_1d', 'block_diag', 'broadcast_to', 'bucketize', 'clone',
- # Reduction ops
-]
-
-# Torch reduction ops which can be converted to timetensor depending on the value of the parameter 'dim'
-TORCH_OPS_REDUCTION = [
- 'argmax', 'argmin', 'amax', 'amin', 'max', 'min', 'logsumexp', 'mean', 'median', 'nanmedian', 'mode', 'nansum',
- 'prod', 'quantile', 'nanquantile', 'std', 'std_mean', 'sum', 'unique', 'unique_consecutive', 'var', 'var_mean',
- 'count_nonzero', 'argsort',
-]
-
-# List of torch ops implemented, if not in this list, we print a warning
-TORCH_OPS_IMPLEMENTED = [
- # Indexing, etc
- 'cat', 'chunk', 'dsplit', 'column_stack', 'dstack', 'gather', 'hsplit', 'hstack', 'index_select', 'masked_select',
- 'movedim', 'moveaxis', 'narrow', 'nonzero', 'reshape', 'row_stack', 'scatter', 'scatter_add', "split", 'squeeze',
- 'stack', 'swapaxes', 'swapdims', 't', 'atleast_3d', 'take', 'take_along_dim', 'tensor_split', 'tile', 'transpose',
- 'unbind', 'unsqueeze', 'vsplit', 'vstack', 'where',
- # Pointwise operations,
- 'abs', 'absolute', 'acos', 'arccos', 'acosh', 'arccosh', 'add', 'addcdiv', 'addcmul', 'angle', 'asin', 'arcsin',
- 'asinh', 'arcsinh', 'atan', 'arctan', 'atanh', 'arctanh', 'atan2', 'bitwose_not', 'bitwise_and', 'bitwise_or',
- 'bitwise_xor', 'ceil', 'clamp', 'clip', 'conj', 'copysign', 'cos', 'cosh', 'deg2rad', 'div', 'divide', 'digamma',
- 'erf', 'erfc', 'erfinv', 'exp', 'exp2', 'expm1', # fake_quantize_per_channel_affine, fake_quantize_per_tensor_affine,
- 'fix', 'float_power', 'floor', 'floor_divide', 'fmod', 'frac', 'frexp', 'gradient', 'imag', 'ldexp', 'lerp',
- 'lgamma', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_or', 'logical_not',
- 'logical_xor', 'logit', 'hypot', 'i0', 'igamma', 'igammac', 'mul', 'multiply', 'mvlgamma', 'nan_to_num', 'neg',
- 'negative', 'nextafter', 'polygamma', 'positive', 'pow', 'rad2deg', 'real', 'reciprocal', 'remainder', 'round',
- 'rsqrt', 'sigmoid', 'sign', 'sgn', 'signbit',
- # Other operations,
- 'atleast_1d', 'atleast_2d', 'atleast_3d', 'bincount', 'block_diag', 'broadcast_tensors', 'broadcast_to',
- 'bucketize', 'cartesian_prod', 'cdist', 'clone', 'combinations', 'cross', 'cummax', 'cummin',
- # Reduction ops
- 'argmax', 'argmin', 'amax', 'amin', 'all', 'any', 'max', 'min', 'logsumexp', 'mean', 'median', 'nanmedian',
- 'mode', 'nansum', 'prod', 'quantile', 'nanquantile', 'std', 'std_mean', 'sum', 'unique', 'unique_consecutive',
- 'var', 'var_mean', 'count_nonzero',
- # Comparison
- 'allclose', 'argsort', 'eq', 'equal', 'ge', 'greater_equal', 'gt', 'greated', 'isclose', 'isfinite', 'isinf',
- 'kthvalue', 'le', 'less_equal', 'lt', 'less', 'maximum', 'minimum', 'fmax', 'fmin', 'ne', 'not_equal', 'sort',
- 'topk', 'msort',
- # Spectral
- 'stft', 'istft',
- # BLAS and LAPACK
- 'mm',
- # Convolution
- 'conv1d', 'conv2d', 'conv3d', 'conv_tranpose1d', 'conv_tranpose2d', 'conv_tranpose3d', 'unfold', 'fold',
- # Pooling
- 'avg_pool1d', 'avg_pool2d', 'avg_pool3d', 'max_pool1d', 'max_pool1d_indices', 'max_pool2d', 'max_pool2d_indices',
- 'max_pool3d', 'max_pool3d_indices', 'max_unpool1d', 'max_unpool2d', 'max_unpool3d', 'lp_pool1d', 'lp_pool2d',
- 'lp_pool3d', 'adaptive_max_pool1d', 'adaptive_max_pool2d', 'adaptive_max_pool3d', 'adaptive_avg_pool1d',
- 'adaptive_avg_pool2d', 'adaptive_avg_pool3d', 'fractional_max_pool2d', 'fractional_max_pool3d',
- # Linear
- 'linear', 'bilinear',
- # Dropout
- 'dropout', 'alpha_dropout', 'feature_alpha_dropout', 'dropout2d', 'dropout3d',
- # Sparse
- 'embedding', 'embedding_bag', 'one_hot',
- # Distance
- 'pairwise_distance', 'cosine_similarity', 'pdist',
- # Vision
- 'pixel_shuffle', 'pixel_unshuffle', 'pad', 'interpolate', 'grid_sample',
-]
-
-# Rejected Torch operations
-TORCH_OPS_UNSUPPORTED = [
- 'affine_grid'
-]
-
-
-# region TIMETENSOR
-
-# TimeTensor
-def check_time_lengths(
- time_len: int,
- time_lengths: Optional[torch.LongTensor],
- batch_sizes: torch.Size
-):
- r"""Check time lengths
-
- :param time_lengths:
- :param batch_sizes:
- :return:
- """
- # Check that the given lengths tensor has the right
- # dimensions
- if time_lengths.size() != batch_sizes:
- raise ValueError(ERROR_WRONG_TIME_LENGTHS_SIZES.format(batch_sizes))
- # end if
-
- # Check that all lengths are not bigger
- # than the actual time-tensor
- if torch.any(time_lengths > time_len):
- raise ValueError(ERROR_TIME_LENGTHS_TOO_BIG)
- # end if
-
- return True
-# end check_time_lengths
-
-
-# TimeTensor
-class TimeTensor(BaseTensor):
- r"""A special tensor with a time dimension.
- """
-
- # region CONSTRUCTORS
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'TimeTensor'],
- time_dim: Optional[int] = 0
- ) -> None:
- r"""TimeTensor constructor
-
- :param data: The data in a torch tensor to transform to timetensor.
- :param time_dim: The position of the time dimension.
- """
- # Copy if already a timetensor
- # transform otherwise
- if type(data) is TimeTensor:
- tensor_data = data.tensor
- else:
- tensor_data = data
- # end if
-
- # The tensor must have enough dimension
- # for the time dimension
- if tensor_data.ndim < time_dim + 1:
- # Error
- raise ValueError(
- ERROR_TENSOR_TO_SMALL.format(time_dim, tensor_data.ndim, time_dim + 1)
- )
- # end if
-
- # Set tensor and time index
- self._tensor = tensor_data
- self._time_dim = time_dim
- # end __init__
-
- # endregion CONSTRUCTORS
-
- # region PROPERTIES
-
- # Time dimension (getter)
- @property
- def time_dim(self) -> int:
- r"""Get the index of the time dimension.
-
- :return: The index of the time dimension.
- :rtype: ``ìnt``
- """
- return self._time_dim
- # end time_dim
-
- # Time dimension (setter)
- @time_dim.setter
- def time_dim(
- self,
- value: int
- ) -> None:
- r"""Set the index of the time dimension if valid.
-
- :param value: New index of the time dimension.
- :type value: ``ìnt``
- """
- # Check time dim is valid
- if value >= self.tensor.ndim:
- # Error
- raise ValueError(ERROR_TENSOR_TO_SMALL.format(value, self._tensor.ndim))
- elif value < 0:
- raise ValueError(ERROR_TIME_DIM_NEGATIVE)
- # end if
-
- # Set new time dim
- self._time_dim = value
- # end time_dim
-
- # Time length
- @property
- def tlen(self) -> int:
- r"""Returns the length of the time dimension.
-
- :return: the length of the time dimension.
- :rtype: ``int``
- """
- return self._tensor.size()[self._time_dim]
- # end tlen
-
- # Number of channel dimensions
- @property
- def cdim(self) -> int:
- r"""Number of channel dimensions.
-
- :return: the number of channel dimensions.
- :rtype: ``ìnt``
- """
- return self._tensor.ndim - self._time_dim - 1
- # end cdim
-
- # Number of batch dimensions
- @property
- def bdim(self) -> int:
- r"""Number of batch dimensions.
-
- :return: the number of batch dimensions.
- :rtype: ``ìnt``
- """
- return self._tensor.ndim - self.cdim - 1
- # end bdim
-
- # endregion PROPERTIES
-
- # region PUBLIC
-
- # Size of channel dimensions
- def csize(self) -> torch.Size:
- r"""Size of channel dimensions.
- """
- if self._time_dim != self._tensor.ndim - 1:
- tensor_size = self._tensor.size()
- return tensor_size[self.time_dim+1:]
- else:
- return torch.Size([])
- # end if
- # end csize
-
- # Size of batch dimensions
- def bsize(self) -> torch.Size:
- r"""Size of batch dimensions.
- """
- if self._time_dim == 0:
- return torch.Size([])
- else:
- tensor_size = self._tensor.size()
- return tensor_size[:self._time_dim]
- # end if
- # end bsize
-
- # Number of channel elements
- def numelc(self):
- r"""Returns the number of elements in the channel dimensions.
- """
- # Multiply sizes
- num_el = 1
- for c_size in list(self.csize()):
- num_el *= c_size
- # end for
- return num_el
- # end numelc
-
- # Number of batch elements
- def numelb(self):
- r"""Returns the number of elements in the batch dimensions.
- """
- # Multiply sizes
- num_el = 1
- for b_size in list(self.bsize()):
- num_el *= b_size
- # end for
- return num_el
- # end numelb
-
- # region CAST
-
- # To
- def to(self, *args, **kwargs) -> 'TimeTensor':
- r"""Performs TimeTensor dtype and/or device concersion. A ``torch.dtype`` and ``torch.device`` are inferred
- from the arguments of ``self.to(*args, **kwargs)
-
- .. note::
- From PyTorch documentation: if the ``self`` TimeTensor already has the correct ``torch.dtype`` and
- ``torch.device``, then ``self`` is returned. Otherwise, the returned timetensor is a copy of ``self``
- with the desired ``torch.dtype`` and ``torch.device``.
-
- Example::
- >>> ttensor = echotorch.randn(2, length=20)
- >>> ttensor.to(torch.float64)
-
- """
- # New tensor
- ntensor = self._tensor.to(*args, **kwargs)
-
- # Same tensor?
- if self._tensor == ntensor:
- return self
- else:
- return TimeTensor(
- ntensor,
- time_dim=self._time_dim
- )
- # end if
- # end to
-
- # endregion CAST
-
- # Indexing time tensor
- def indexing_timetensor(
- self,
- item
- ) -> 'TimeTensor':
- r"""Return a view of a :class:`TimeTensor` according to an indexing item.
-
- :param item: Data item to recover.
- :rtype: :class:`TimeTensor`
- """
- return TimeTensor(
- self._tensor[item],
- time_dim=self._time_dim
- )
- # end indexing_timetensor
-
- # endregion PUBLIC
-
- # region TORCH_FUNCTION
-
- # region TORCH_INDEXING
-
- # After dstack
- def after_dstack(
- self,
- func_output: Any,
- *ops_inputs,
- dim: int = 0
- ) -> 'TimeTensor':
- r"""After :func:`torch.dstack`.
- """
- # 0-D timeseries
- if self.ndim == 1:
- return TimeTensor(
- data=func_output,
- time_dim=1
- )
- else:
- return TimeTensor(
- data=func_output,
- time_dim=self._time_dim
- )
- # end if
- # end after_dstack
-
- # After movedim
- def after_movedim(
- self,
- func_output: Any,
- ops_input,
- source,
- destination
- ) -> 'TimeTensor':
- r"""After :func:`torch.movedim` we change index of time dimension of concerned.
- """
- # Keep time dim
- new_time_dim = self.time_dim
-
- # New time dim if in dest or source
- if source == self.time_dim:
- new_time_dim = destination
- elif destination == self.time_dim:
- new_time_dim = source
- # end if
-
- # New timetensor
- return TimeTensor(
- data=func_output,
- time_dim=new_time_dim
- )
- # end after_movedim
-
- # After squeeze
- def after_squeeze(
- self,
- func_output,
- input,
- dim=None
- ) -> Union['TimeTensor', torch.Tensor]:
- r"""
- """
- if dim is None:
- if self.tlen == 1:
- return func_output
- else:
- # How many dim at one before time dim?
- removed_dim = torch.sum(torch.tensor(self.size()[:self.time_dim]) == 1)
-
- # Return with modified time dim
- return TimeTensor(
- data=func_output,
- time_dim=self.time_dim - removed_dim
- )
- # end if
- else:
- # Time dim targeted
- if dim == self.time_dim and self.tlen == 1:
- return func_output
- # end if
-
- # If dim removed and before time dim
- if self.size()[dim] == 1 and dim < self.time_dim:
- return TimeTensor(
- data=func_output,
- time_dim=self.time_dim - 1
- )
- else:
- return TimeTensor(
- data=func_output,
- time_dim=self.time_dim
- )
- # end if
- # end if
- # end after_squeeze
-
- # After stack
- def after_stack(
- self,
- func_output,
- tensors,
- dim=0
- ) -> 'TimeTensor':
- r"""After :func:`torch.stack`, we increment time dim if needed.
- """
- if dim <= self.time_dim:
- return TimeTensor(
- data=func_output,
- time_dim=self.time_dim+1
- )
- else:
- return TimeTensor(
- data=func_output,
- time_dim=self.time_dim
- )
- # end if
- # end after_stack
-
- # after t
- def after_t(
- self,
- func_output,
- input
- ) -> 'TimeTensor':
- r"""After :func:`torch.t`, swap time dim.
- """
- if self.ndim == 1:
- return TimeTensor(
- data=func_output,
- time_dim=self.time_dim
- )
- else:
- return TimeTensor(
- data=func_output,
- time_dim=1 - self.time_dim
- )
- # end if
- # end after_t
-
- # After tranpose
- def after_transpose(
- self,
- func_output,
- input,
- dim0,
- dim1
- ) -> 'TimeTensor':
- r"""After :func:`torch.t`, swap time dim.
- """
- if self.time_dim in [dim0, dim1]:
- return TimeTensor(
- data=func_output,
- time_dim=dim0 if self.time_dim == dim1 else dim1
- )
- else:
- return TimeTensor(
- data=func_output,
- time_dim=self.time_dim
- )
- # end if
- # end after_transpose
-
- # After unbind
- def after_unbind(
- self,
- func_output,
- input,
- dim=0
- ) -> Tuple['TimeTensor', torch.Tensor]:
- r"""After :func:`torch.unbind`, remove time dim if needed.
- """
- if dim == self.time_dim:
- return func_output
- else:
- return tuple(self.transform_to_timetensors(func_output))
- # end if
- # end after_unbind
-
- # After unsqueeze
- def after_unsqueeze(
- self,
- func_output: Any,
- input: Any,
- dim: int
- ) -> 'TimeTensor':
- r"""After :func:`torch.unsqueeze`, remove time dim if needed.
-
- :param func_output: The output of the torch.unsqueeze function.
- :type func_output:
- :param dim: The request dimension from unsqueeze.
- :type dim:
- :return: The computed output.
- :rtype:
- """
- if dim <= self.time_dim:
- return TimeTensor(
- func_output,
- time_dim=self._time_dim + 1
- )
- else:
- return TimeTensor(
- func_output,
- time_dim=self._time_dim
- )
- # end if
- # end after_unsqueeze
-
- # After vstack
- def after_vstack(
- self,
- func_output,
- tensors
- ) -> 'TimeTensor':
- r"""After :func:`torch.vstack, we add 1 to the index of time
- dim if it is a 0-D timeseries, other keep the same
- """
- if self.ndim == 1:
- return TimeTensor(
- data=func_output,
- time_dim=1
- )
- else:
- return TimeTensor(
- data=func_output,
- time_dim=self.time_dim
- )
- # end if
- # end after_vstack
-
- # endregion TORCH_INDEXING
-
- # region TORCH_COMPARISON
-
- # After kthvalue
- def after_kthvalue(
- self,
- func_ret,
- input,
- k,
- dim=None,
- keepdim=False
- ):
- r"""After :func:`torch.kthvalue`.
- """
- return (
- self.convert_after_reduction(func_ret.values, input, dim, keepdim),
- self.convert_after_reduction(func_ret.indices, input, dim, keepdim)
- )
- # end after_kthvalue
-
- # After topk
- def after_topk(
- self,
- func_ret,
- input,
- k,
- dim=None,
- largest=True,
- sorted=True
- ):
- r"""After :func:`torch.kthvalue`.
- """
- return (
- self.convert_after_reduction(func_ret.values, input, dim, True),
- self.convert_after_reduction(func_ret.indices, input, dim, True)
- )
- # end after_kthvalue
-
- # endregion TORCH_COMPARISON
-
- # region TORCH_SPECTRAL
-
- # Short-time Fourier transform (STFT)
- def after_stft(
- self,
- func_ret,
- input,
- n_fft,
- hop_length=None,
- win_length=None,
- window=None,
- center=True,
- pad_mode='reflect',
- normalized=False,
- onesided=None,
- return_complex=None
- ) -> 'TimeTensor':
- r"""After :func:`torch.stft`.
- """
- if input.ndim == 1:
- return TimeTensor(
- data=func_ret,
- time_dim=1
- )
- else:
- return TimeTensor(
- data=func_ret,
- time_dim=2
- )
- # end if
- # end after_stft
-
- # Inverse Short-time Fourier transform (ISTFT)
- def after_istft(
- self,
- func_ret,
- input,
- n_fft,
- hop_length=None,
- win_length=None,
- window=None,
- center=True,
- normalized=False,
- onesided=None,
- length=None,
- return_complex=False
- ) -> 'TimeTensor':
- r"""After :func:`torch.istft`
- """
- if input.ndim == 3:
- return TimeTensor(
- data=func_ret,
- time_dim=0
- )
- else:
- return TimeTensor(
- data=func_ret,
- time_dim=1
- )
- # end if
- # end after_istft
-
- # endregion TORCH_SPECTRAL
-
- # region TORCH_OTHER
-
- # After atleast_2d
- def after_atleast_2d(
- self,
- func_output: Any,
- *ops_inputs,
- dim: int = 0
- ) -> 'TimeTensor':
- r"""After :func:`torch.atleast_2d`.
- """
- # 0-D timeseries
- if self.ndim == 1:
- return TimeTensor(
- data=func_output,
- time_dim=1
- )
- else:
- return TimeTensor(
- data=func_output,
- time_dim=self._time_dim
- )
- # end if
- # end after_atleast_2d
-
- # After atleast_3d
- def after_atleast_3d(
- self,
- func_output: Any,
- *ops_inputs,
- dim: int = 0
- ) -> 'TimeTensor':
- r"""After :func:`torch.atleast_3d`.
- """
- # 0-D timeseries
- if self.ndim == 1:
- return TimeTensor(
- data=func_output,
- time_dim=1
- )
- else:
- return TimeTensor(
- data=func_output,
- time_dim=self._time_dim
- )
- # end if
- # end after_atleast_3d
-
- # After broadcast_tensors
- def after_broadcast_tensors(
- self,
- func_output,
- *tensors
- ) -> Tuple['TimeTensor']:
- r"""After :func:`torch.broadcast_tensors`.
- """
- output_list = list()
- for t_i, tensor in enumerate(func_output):
- if isinstance(tensors[t_i], TimeTensor):
- output_list.append(
- TimeTensor(
- data=tensor,
- time_dim=tensors[t_i].time_dim
- )
- )
- else:
- output_list.append(tensor)
- # end if
- # end for
- return tuple(output_list)
- # end after_broadcast_tensors
-
- # After cartesian_prod
- def after_cartesian_prod(
- self,
- func_output,
- *tensors
- ) -> 'TimeTensor':
- r"""After :func:`torch.cartesian_prod`.
- """
- return TimeTensor(
- data=func_output,
- time_dim=0
- )
- # end after_cartesian_prod
-
- # After cdist
- def after_cdist(
- self,
- func_output,
- *tensors,
- p=2.0,
- compute_mode='use_mm_for_euclid_dist_if_necessary'
- ) -> Union[torch.Tensor, 'TimeTensor']:
- r"""After :func:`torch.cdist`.
- """
- if self is tensors[0]:
- if self.time_dim in [0, 1]:
- return TimeTensor(
- data=func_output,
- time_dim=self.time_dim
- )
- else:
- return func_output
- # end if
- else:
- if self.time_dim == 0:
- return TimeTensor(
- data=func_output,
- time_dim=0
- )
- elif self.time_dim == 1:
- return TimeTensor(
- data=func_output,
- time_dim=2
- )
- else:
- return func_output
- # end if
- # end if
- # end after_cdist
-
- # After combinations
- def after_combinations(
- self,
- func_ret,
- input,
- r=2,
- with_replacement=False
- ) -> 'TimeTensor':
- r"""After :func:`torch.combinations`.
- """
- return TimeTensor(
- data=func_ret,
- time_dim=0
- )
- # end after_combinations
-
- # After cummax
- def after_cummax(
- self,
- func_ret,
- input,
- dim=None
- ):
- r"""After :func:`torch.cummax`.
- """
- return (
- self.convert_after_reduction(func_ret.values, input, dim, True),
- self.convert_after_reduction(func_ret.indices, input, dim, True)
- )
- # end after_cummax
-
- # After cummin
- def after_cummin(
- self,
- func_ret,
- input,
- dim=None
- ):
- r"""After :func:`torch.cummax`.
- """
- return (
- self.convert_after_reduction(func_ret.values, input, dim, True),
- self.convert_after_reduction(func_ret.indices, input, dim, True)
- )
- # end after_cummin
-
- # endregion TORCH_OTHER
-
- # region TORCH_BLAS_LAPACK
-
- # After mm (matrix multiplication)
- def mm(
- self,
- func_output: Any,
- m1,
- m2
- ) -> Union['TimeTensor', torch.Tensor]:
- r"""After mm (matrix multiplication)
-
- :param m1: first tensor.
- :type m1: :class:`TimeTensor` or ``torch.Tensor``
- :param m2: second tensor.
- :type m2: :class:`TimeTensor` or ``torch.Tensor``
- """
- return func_output
- # end mm
-
- # endregion TORCH_BLAS_LAPACK
-
- # region TORCH_CONVOLUTION
-
- # After fold
- def after_fold(
- self,
- func_output: Any,
- output_size,
- kernel_size,
- dilation=1,
- padding=0,
- stride=1
- ) -> 'TimeTensor':
- r"""After :func:`torch.fold`.
- """
- return echotorch.as_timetensor(
- data=func_output,
- time_dim=2
- )
- # end after_fold
-
- # endregion TORCH_CONVOLUTION
-
- # region TORCH_SPARSE
-
- # After embedding_bag
- def after_embedding_bag(
- self,
- func_output,
- input,
- weight,
- offsets=None,
- max_norm=None,
- norm_type=2,
- scale_grad_by_freq=False,
- mode='mean',
- sparse=False,
- per_sample_weights=None,
- include_last_offset=False,
- padding_idx=None
- ) -> Union['TimeTensor', torch.Tensor]:
- r"""After :func:`torch.embedding_bag`.
- """
- if input.ndim == 1:
- return echotorch.as_timetensor(
- data=func_output,
- time_dim=0
- )
- else:
- return func_output
- # end if
- # end after_embedding_bag
-
- # endregion TORCH_SPARSE
-
- # region TORCH_DISTANCE
-
- # After pairwise_distance
- def pairwise_distance(
- self,
- func_output,
- x1,
- x2,
- p=2.0,
- eps=1e-06,
- keepdim=False
- ) -> Union['TimeTensor', torch.Tensor]:
- r"""After :func:`torch.pairwise_distance`.
- """
- if self.time_dim == 0:
- return echotorch.as_timetensor(
- data=func_output,
- time_dim=0
- )
- else:
- return func_output
- # end if
- # end pairwise_distance
-
- # endregion TORCH_DISTANCE
-
- # Transform to timetensor
- def transform_to_timetensors(
- self,
- tensors: Any
- ):
- r"""Transform :class:`torch.Tensor` to :class:`TimeTensor`.
- """
- return [echotorch.as_timetensor(o, time_dim=self.time_dim) for o in tensors]
- # end transform_to_timetensors
-
- # Transform object to timetensor if possible
- def transform_object_to_timetensor(
- self,
- obj
- ):
- r"""Transform object to timetensor if possible
- """
- if isinstance(obj, torch.Tensor):
- return echotorch.as_timetensor(
- data=obj,
- time_dim=self.time_dim
- )
- else:
- return obj
- # end if
- # end transform_object_to_timetensor
-
- # Convert to timetensor
- def convert_to_timetensor(self, func_ret):
- r"""Convert to timetensor.
- """
- if isinstance(func_ret, torch.Tensor):
- return TimeTensor(
- data=func_ret,
- time_dim=self.time_dim
- )
- elif isinstance(func_ret, list):
- return [self.transform_object_to_timetensor(x) for x in func_ret]
- elif isinstance(func_ret, tuple):
- return tuple([self.transform_object_to_timetensor(x) for x in func_ret])
- else:
- return func_ret
- # end if
- # end convert_to_timetensor
-
- # Transform to timetensor if coherent
- def transform_similar_tensors(self, inpt):
- r"""Convert input to timetensors if elements are tensor with the same number of dimension.
- """
- if isinstance(inpt, torch.Tensor) and inpt.ndim == self.ndim:
- return echotorch.as_timetensor(
- data=inpt,
- time_dim=self.time_dim
- )
- else:
- return inpt
- # end if
- # end transform_similar_tensors
-
- # Convert to timetensor if coherent
- def convert_similar_tensors(self, inpt):
- r"""Convert input to timetensors if elements are tensor with the same number of dimension.
- """
- if isinstance(inpt, torch.Tensor) and inpt.ndim == self.ndim:
- return echotorch.as_timetensor(
- data=inpt,
- time_dim=self.time_dim
- )
- elif isinstance(inpt, list):
- return [self.transform_similar_tensors(x) for x in inpt]
- elif isinstance(inpt, tuple):
- return tuple([self.transform_similar_tensors(x) for x in inpt])
- else:
- return inpt
- # end if
- # end convert_similar_tensors
-
- # Check that all timetensors have the right time dimension index
- def check_time_dim(
- self,
- tensors: Any
- ) -> None:
- r"""Check that all timetensors have the right time dimension index.
- """
- for tensor in tensors:
- if isinstance(tensor, TimeTensor) and self.time_dim != tensor.time_dim:
- raise RuntimeError(
- "Expected timetensors with the same time dimension index, got {} and {}".format(
- self.time_dim,
- tensor.time_dim
- )
- )
- # end if
- # end for
- # end check_time_dim
-
- # Convert the output of a reduction operation
- def convert_after_reduction(
- self,
- func_ret,
- input,
- dim: int = None,
- keepdim: bool = False,
- **kwargs
- ) -> Union['TimeTensor', torch.Tensor]:
- r"""Convert the output of a reduction operation.
- """
- if (dim is None or dim == self.time_dim) and not keepdim:
- return func_ret
- else:
- return self.convert_to_timetensor(func_ret)
- # end if
- # end convert_after_reduction
-
- # Transpose
- # def t(self) -> 'TimeTensor':
- # r"""Expects the timetensor to be <= 1-D timetensor and transposes dimensions 0 and 1.
- #
- # If time dimension is in position 0, then it is switched to 1, and vice versa.
- #
- # TODO: complet doc
- # """
- # if self.ndim == 2:
- # return TimeTensor(
- # data=self._tensor.t(),
- # time_dim=1-self._time_dim
- # )
- # elif self.ndim < 2:
- # return self
- # else:
- # # Inverse time dim if targeted
- # if self._time_dim in [0, 1]:
- # return TimeTensor(
- # data=self._tensor.t(),
- # time_dim=1 - self._time_dim
- # )
- # else:
- # return TimeTensor(
- # data=self._tensor.t(),
- # time_dim=self._time_dim
- # )
- # # end if
- # # end if
- # # end t
-
- # As strided
- def as_strided(
- self,
- size,
- stride,
- storage_offset=0,
- time_dim=None
- ) -> 'TimeTensor':
- r"""TODO: document
-
- :param size:
- :param stride:
- :param storage_offset:
- :return:
- :rtype:
- """
- # Strided tensor
- data_tensor = self._tensor.as_strided(size, stride, storage_offset)
-
- # Time dim still present
- if len(size) >= self._time_dim + 1:
- # Return timetensor
- return TimeTensor.new_timetensor(
- data=data_tensor,
- time_dim=self._time_dim if time_dim is None else time_dim
- )
- elif time_dim is not None:
- pass
- else:
- return data_tensor
- # end if
- # end as_strided
-
- # Torch functions
- def __torch_function__(
- self,
- func,
- types,
- args=(),
- kwargs=None
- ):
- """
- Torch functions
- """
- # Dict if None
- if kwargs is None:
- kwargs = {}
-
- # end if
-
- # Convert timetensor to tensors
- def convert(args):
- if type(args) is TimeTensor:
- return args.tensor
- elif type(args) is tuple:
- return tuple([convert(a) for a in args])
- elif type(args) is list:
- return [convert(a) for a in args]
- else:
- return args
- # end if
-
- # end convert
-
- # Raise error if unsupported operation
- if func.__name__ in TORCH_OPS_UNSUPPORTED:
- raise RuntimeError(
- "Operation {} is not supported for timetensors".format(func.__name__)
- )
- # end if
-
- # Print warning if not implemented
- if func.__name__ not in TORCH_OPS_IMPLEMENTED:
- warnings.warn(
- "Operation {} not implemented for timetensors, unpredictable behaviors here!".format(func.__name__)
- )
- # end if
-
- # Validate ops inputs
- if hasattr(self, 'validate_' + func.__name__): getattr(self, 'validate_' + func.__name__)(*args, **kwargs)
-
- # Before callback
- if hasattr(self, 'before_' + func.__name__): args = getattr(self, 'before_' + func.__name__)(*args, **kwargs)
-
- # Get the tensor in the arguments
- conv_args = [convert(a) for a in args]
-
- # Middle callback
- if hasattr(self, 'middle_' + func.__name__): args = getattr(self, 'middle_' + func.__name__)(*args, **kwargs)
-
- # Execute function
- ret = func(*conv_args, **kwargs)
-
- # If output can be directly converted to timetensor
- if func.__name__ in TORCH_OPS_DIRECT:
- ret = self.convert_to_timetensor(ret)
- elif func.__name__ in TORCH_OPS_REDUCTION:
- ret = self.convert_after_reduction(ret, *args, **kwargs)
- # end if
-
- # Create TimeTensor and returns or returns directly
- if hasattr(self, 'after_' + func.__name__):
- return getattr(self, 'after_' + func.__name__)(ret, *args, **kwargs)
- else:
- return self.convert_similar_tensors(ret)
- # end if
- # end __torch_function__
-
- # endregion TORCH_FUNCTION
-
- # region OVERRIDE
-
- # Get item
- def __getitem__(self, item) -> Union['TimeTensor', torch.Tensor]:
- """
- Get data in the tensor
- """
- # Multiple indices
- if type(item) is tuple:
- # If time dim is in
- if len(item) > self._time_dim:
- # Selection or slice?
- if type(item[self._time_dim]) in [slice, list]:
- return self.indexing_timetensor(item)
- else:
- return self._tensor[item]
- # end if
- else:
- return self.indexing_timetensor(item)
- # end if
- elif type(item) in [slice, list]:
- return self.indexing_timetensor(item)
- else:
- # Time selection?
- if self._time_dim == 0:
- return self._tensor[item]
- else:
- return self.indexing_timetensor(item)
- # end if
- # end __getitem__
-
- # Set item
- def __setitem__(self, key, value) -> None:
- """
- Set data in the tensor
- """
- self._tensor[key] = value
- # end __setitem__
-
- # Length
- def __len__(self) -> int:
- """
- Time length of the time series
- """
- return self.tlen
- # end __len__
-
- # Get representation
- def __repr__(self) -> str:
- r"""Get a string representation
-
- :return: ``TimeTensor`` representation.
- :rtype: ``str``
- """
- tensor_desc = self._tensor.__repr__()
- tensor_desc = tensor_desc[7:-1]
- return "timetensor({}, time_dim: {})".format(tensor_desc, self._time_dim)
- # end __repr__
-
- # Less than operation with time tensors.
- def __lt__(self, other) -> 'TimeTensor':
- r"""Less than operation with time tensors.
- """
- return TimeTensor(
- data=self._tensor < other,
- time_dim=self.time_dim
- )
- # end __lt__
-
- # Less or equal than operation with time tensors.
- def __le__(self, other) -> 'TimeTensor':
- r"""Less than operation with time tensors.
- """
- return TimeTensor(
- data=self._tensor <= other,
- time_dim=self.time_dim
- )
- # end __le__
-
- # Greater than operation with time tensors.
- def __gt__(self, other) -> 'TimeTensor':
- r"""Greater than operation with time tensors.
- """
- return TimeTensor(
- data=self._tensor > other,
- time_dim=self.time_dim
- )
- # end __gt__
-
- # Greater or equal than operation with time tensors.
- def __ge__(self, other) -> 'TimeTensor':
- r"""Greater or equal than operation with time tensors.
- """
- return TimeTensor(
- data=self._tensor >= other,
- time_dim=self.time_dim
- )
- # end __ge__
-
- # Are two time-tensors equivalent
- def __eq__(
- self,
- other: 'TimeTensor'
- ) -> bool:
- r"""Are two time-tensors equivalent?
-
- :param other: The other time-tensor
- :type other: ``TimeTensor``
-
- """
- return super(TimeTensor, self).__eq__(other) and self.time_dim == other.time_dim
- # end __eq__
-
- # Are two time-tensors not equal
- def __ne__(
- self,
- other: 'TimeTensor'
- ) -> bool:
- r"""Are two time-tensors not equal
- """
- return not(self.__eq__(self, other))
- # end __ne__
-
- # endregion OVERRIDE
-
- # region STATIC
-
- # Returns a new TimeTensor with data as the tensor data.
- @classmethod
- def new_timetensor(
- cls,
- data: Union[torch.Tensor, 'TimeTensor'],
- time_dim: Optional[int] = 0
- ) -> 'TimeTensor':
- """
- Returns a new TimeTensor with data as the tensor data.
- @param data:
- @param time_lengths:
- @param time_dim:
- @param copy_data:
- @return:
- """
- return TimeTensor(
- data,
- time_dim=time_dim
- )
- # end new_timetensor
-
- # Returns new time tensor with a specific function
- @classmethod
- def new_timetensor_with_func(
- cls,
- *size: Tuple[int],
- func: Callable,
- length: int,
- batch_size: Optional[Tuple[int]] = None,
- out: Optional['TimeTensor'] = None,
- **kwargs
- ) -> 'TimeTensor':
- r"""Returns a new :class:`TimeTensor` with a specific function to generate the data.
-
- :param func:
- :type func:
- :param size:
- :type size:
- :param length:
- :type length:
- :param batch_size:
- :type batch_size:
- :param out:
- :type out:
- """
- # Batch size
- batch_size = tuple() if batch_size is None else batch_size
-
- # Time dim
- time_dim = len(batch_size)
-
- # Total size
- tt_size = list(batch_size) + [length] + list(size)
-
- # Output object
- out = out.tensor if isinstance(out, TimeTensor) else out
-
- # Out mode
- if out is None:
- # Create TimeTensor
- return TimeTensor(
- data=func(tuple(tt_size), **kwargs),
- time_dim=time_dim
- )
- else:
- # Call function
- func(tuple(tt_size), out=out.tensor, **kwargs)
- out.time_dim = time_dim
- return out
- # end if
- # end new_timetensor_with_func
-
- # endregion STATIC
-
-# end TimeTensor
-
-# endregion TIMETENSOR
-
-
-# region VARIANTS
-
-# Float time tensor
-class FloatTimeTensor(TimeTensor):
- r"""Float time tensor.
- """
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'TimeTensor'],
- time_dim: Optional[int] = 0
- ) -> None:
- r"""Float TimeTensor constructor
-
- :param data: The data in a torch tensor to transform to timetensor.
- :param time_dim: The position of the time dimension.
- """
- # Super call
- super(FloatTimeTensor, self).__init__(
- self,
- data,
- time_dim=time_dim
- )
-
- # Transform type
- self.float()
- # end __init__
-
-# end FloatTimeTensor
-
-
-# Double time tensor
-class DoubleTimeTensor(TimeTensor):
- r"""Double time tensor.
- """
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'TimeTensor'],
- time_dim: Optional[int] = 0
- ) -> None:
- r"""Double TimeTensor constructor
-
- :param data: The data in a torch tensor to transform to timetensor.
- :param time_dim: The position of the time dimension.
- """
- # Super call
- super(DoubleTimeTensor, self).__init__(
- self,
- data,
- time_dim=time_dim
- )
-
- # Cast data
- self.double()
- # end __init__
-
-# end DoubleTimeTensor
-
-
-# Half time tensor
-class HalfTimeTensor(TimeTensor):
- r"""Half time tensor.
- """
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'TimeTensor'],
- time_dim: Optional[int] = 0
- ) -> None:
- r"""Half TimeTensor constructor
-
- :param data: The data in a torch tensor to transform to timetensor.
- :param time_dim: The position of the time dimension.
- """
- # Super call
- super(HalfTimeTensor, self).__init__(
- self,
- data,
- time_dim=time_dim
- )
-
- # Cast data
- self.halt()
- # end __init__
-
-# end HalfTimeTensor
-
-
-# 16-bit floating point 2 time tensor
-class BFloat16Tensor(TimeTensor):
- r"""16-bit floating point 2 time tensor.
- """
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'TimeTensor'],
- time_dim: Optional[int] = 0
- ) -> None:
- r"""16-bit TimeTensor constructor
-
- :param data: The data in a torch tensor to transform to timetensor.
- :param time_dim: The position of the time dimension.
- """
- # Super call
- super(BFloat16Tensor, self).__init__(
- self,
- data,
- time_dim=time_dim
- )
-
- # Cast
- self.bfloat16()
- # end __init__
-
-# end BFloat16Tensor
-
-
-# 8-bit integer (unsigned) time tensor
-class ByteTimeTensor(TimeTensor):
- r"""8-bit integer (unsigned) time tensor.
- """
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'TimeTensor'],
- time_dim: Optional[int] = 0
- ) -> None:
- r"""8-bit integer (unsigned) TimeTensor constructor
-
- :param data: The data in a torch tensor to transform to timetensor.
- :param time_dim: The position of the time dimension.
- """
- # Super call
- super(ByteTimeTensor, self).__init__(
- self,
- data,
- time_dim=time_dim
- )
-
- # Cast
- self.byte()
- # end __init__
-
-# end ByteTimeTensor
-
-
-# 8-bit integer (signed) time tensor
-class CharTimeTensor(TimeTensor):
- r"""8-bit integer (unsigned) time tensor.
- """
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'TimeTensor'],
- time_dim: Optional[int] = 0
- ) -> None:
- r"""CharTimeTensor constructor
-
- :param data: The data in a torch tensor to transform to timetensor.
- :param time_dim: The position of the time dimension.
- """
- # Super call
- super(CharTimeTensor, self).__init__(
- self,
- data,
- time_dim=time_dim
- )
-
- # Cast
- self.char()
- # end __init__
-
-# end CharTimeTensor
-
-
-# Boolean time tensor
-class BooleanTimeTensor(TimeTensor):
- r"""Boolean time tensor.
- """
-
- # Constructor
- def __init__(
- self,
- data: Union[torch.Tensor, 'TimeTensor'],
- time_dim: Optional[int] = 0
- ) -> None:
- r"""BooleanTimeTensor constructor
-
- :param data: The data in a torch tensor to transform to timetensor.
- :param time_dim: The position of the time dimension.
- """
- # Super call
- super(BooleanTimeTensor, self).__init__(
- self,
- data,
- time_dim=time_dim
- )
-
- # Cast
- self.bool()
- # end __init__
-
-# end CharTimeTensor
-
-
-# endregion VARIANTS
diff --git a/echotorch/training_and_evaluation.py b/echotorch/training_and_evaluation.py
deleted file mode 100644
index 4ada024..0000000
--- a/echotorch/training_and_evaluation.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/training_and_evaluation.py
-# Description : Utility functions to easily train and evaluate ESNs
-# Date : 23th of January, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-
-# Imports
-
-
-# Train an ESN with a dataset
-def fit(echo_model, dataset):
- """
- Train an ESN with a dataset
- :param echo_model: EchoTorch model to train
- :param dataset: Dataset object to use for training
- """
- pass
-# end fit
-
-
-# Evaluate a trained ESN with a dataset
-def eval(echo_model, dataset):
- """
- Evaluate a trained ESN with a dataset
- :param echo_model: EchoTorch model to train
- :param dataset: Dataset object to use for testing
- """
- pass
-# end eval
-
-
-# Train and evaluate an ESN with a dataset using cross-validation
-def cross_val_score(echo_model, dataset, eval_function=None, cv=10, n_jobs=None, verbose=False):
- """
- Train and evaluate an ESN with a dataset using cross-validation
- :param echo_model: EchoTorch model to evaluate.
- :param dataset: Dataset to use for training and evaluation.
- :param eval_function: Evaluation function (pytorch loss function)
- :param cv: Cross-validation parameter (default: 10 cross-validation) as integer, or a CrossValidation object.
- :param n_jobs: Number of jobs to run in parallel.
- """
- pass
-# end cross_val_score
diff --git a/echotorch/transforms/Aggregator.py b/echotorch/transforms/Aggregator.py
index efb8170..c3a442c 100644
--- a/echotorch/transforms/Aggregator.py
+++ b/echotorch/transforms/Aggregator.py
@@ -82,7 +82,7 @@ def entries(self):
"""
Registered entries
"""
- return self._data.keys()
+ return list(self._data.keys())
# end entries
# Is initialized
diff --git a/echotorch/transforms/aggregators/StatsAggregator.py b/echotorch/transforms/aggregators/StatsAggregator.py
index 58efdb5..f01e369 100644
--- a/echotorch/transforms/aggregators/StatsAggregator.py
+++ b/echotorch/transforms/aggregators/StatsAggregator.py
@@ -33,28 +33,6 @@ class StatsAggregator(Aggregator):
An aggregator which compute the basic statistics about time series
"""
- # region PROPERTIES
-
- # Get data
- @property
- def data(self):
- """
- Get data
- """
- return self._data
- # end data
-
- # Get counters
- @property
- def counters(self):
- """
- Get counters
- """
- return self._counters
- # end counters
-
- # endregion PROPERTIES
-
# region PUBLIC
# Get statistics
@@ -62,9 +40,6 @@ def get_statistics(self, stat_type):
"""
Get statistics
"""
- print(stat_type)
- print(self._data[stat_type])
- print(self._counters[stat_type])
return self._data[stat_type] / self._counters[stat_type]
# end get_statistics
@@ -93,6 +68,8 @@ def _initialize(self):
self._register("mean", torch.zeros(self._input_dim))
self._register("std", torch.zeros(self._input_dim))
self._register("mean_length", 0)
+ self._register("max", torch.zeros(self._input_dim))
+ self._register("min", torch.zeros(self._input_dim))
self._initialized = True
# end _initialize
@@ -102,11 +79,12 @@ def _aggregate(self, x):
Aggregate information
:param x: Input tensor
"""
- if torch.numel(x) > 0:
- self._update_entry("mean", torch.mean(x[torch.isnan(x) == False], dim=0))
- self._update_entry("std", torch.std(x[torch.isnan(x) == False], dim=0))
- self._update_entry("mean_length", x.size(0))
- # end if
+ # Mean, std, mean length, max, min
+ self._update_entry("mean", torch.mean(x, dim=0))
+ self._update_entry("std", torch.std(x, dim=0))
+ self._update_entry("mean_length", x.size(0))
+ self._update_entry("max", torch.max(x, dim=0)[0])
+ self._update_entry("min", torch.min(x, dim=0)[0])
# end _aggregate
# endregion OVERRIDE
diff --git a/echotorch/transforms/functional/__init__.py b/echotorch/transforms/functional/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/echotorch/transforms/functional/filters.py b/echotorch/transforms/functional/filters.py
deleted file mode 100644
index e69de29..0000000
diff --git a/echotorch/transforms/functional/images.py b/echotorch/transforms/functional/images.py
deleted file mode 100644
index e69de29..0000000
diff --git a/echotorch/transforms/functional/text.py b/echotorch/transforms/functional/text.py
deleted file mode 100644
index e69de29..0000000
diff --git a/echotorch/transforms/timeseries/Chain.py b/echotorch/transforms/timeseries/Chain.py
deleted file mode 100644
index 41a5f3e..0000000
--- a/echotorch/transforms/timeseries/Chain.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/transforms/timeseries/Chain.py
-# Description : Run transformers in chain
-# Date : 25th of January, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti ,
-
-
-# Imports
-import torch
-
-# EchoTorch imports
-from echotorch.transforms import Transformer
-
-
-# Chain
-class Chain(Transformer):
- """
- Run transformers in chain
- """
-
- # region CONSTRUCTORS
-
- # Constructor
- def __init__(self, children, *args, **kwargs):
- """
- Constructor
- """
- # Super constructor
- super(Chain, self).__init__(
- *args,
- input_dim=children[0].input_dim,
- output_dim=children[-1].output_dim,
- **kwargs
- )
-
- # Properties
- self._children = children
- # end __init__
-
- # endregion CONSTRUCTORS
-
- # region PROPERTIES
-
- # Children
- @property
- def children(self):
- """
- Children
- """
- return self._children
- # end children
-
- # endregion PROPERTIES
-
- # region OVERRIDE
-
- # Transform data
- def _transform(self, x):
- """
- Transform information
- """
- # Run each agg/trans
- for child in self._children:
- x = child(x)
- # end for
-
- return x
- # end _transform
-
- # endregion OVERRIDE
-
-# end Chain
diff --git a/echotorch/transforms/timeseries/FilterInfiniteValue.py b/echotorch/transforms/timeseries/FilterInfiniteValue.py
deleted file mode 100644
index aeac228..0000000
--- a/echotorch/transforms/timeseries/FilterInfiniteValue.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/transforms/timeseries/FilterInfiniteValue.py
-# Description : Filter infinite values in time series
-# Date : 27th of January 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti ,
-
-
-# Imports
-import torch
-from ..Transformer import Transformer
-
-
-# FilterInfiniteValue
-class FilterInfiniteValue(Transformer):
- """
- Filter infinite values in time series
- """
-
- # Constructor
- def __init__(self, input_dim, dummy_value, dtype=torch.float64):
- """
- Constructor
- """
- # Super constructor
- super(FilterInfiniteValue, self).__init__(
- input_dim=input_dim,
- output_dim=input_dim,
- dtype=dtype
- )
-
- # Properties
- self._dummy_value = dummy_value
- # end __init__
-
- # region PROPERTIES
-
- # Value to replace infinity
- @property
- def dummy_value(self):
- """
- Value to replace infinity
- :return: Value to replace infinity
- """
- return self._dummy_value
- # end dummy_value
-
- # endregion PROPERTIES
-
- # region OVERRIDE
-
- # Transform
- def _transform(self, x):
- """
- Transform input
- """
- x[torch.isinf(x)] = self._dummy_value
- return x
- # end _transform
-
- # endregion OVERRIDE
-
-# end FilterInfiniteValue
diff --git a/echotorch/transforms/timeseries/Normalize.py b/echotorch/transforms/timeseries/Normalize.py
index 1527023..cf14aae 100644
--- a/echotorch/transforms/timeseries/Normalize.py
+++ b/echotorch/transforms/timeseries/Normalize.py
@@ -31,25 +31,24 @@ class Normalize(Transformer):
"""
# Constructor
- def __init__(self, input_dim, mu=None, std=None, dummy_zero_std=1.0, dtype=torch.float64):
+ def __init__(self, input_dim, mu, std, dtype=torch.float64):
"""
Constructor
"""
# Super constructor
super(Normalize, self).__init__(
input_dim=input_dim,
- output_dim=input_dim,
- dtype=dtype
+ output_dim=input_dim
)
# Properties
self._mu = mu
self._std = std
self._input_dim = input_dim
- self._dummy_zero_std = dummy_zero_std
+ self._dtype = dtype
# end __init__
- # region PROPERTIES
+ #region PROPERTIES
# Dimension of the input timeseries
@property
@@ -81,27 +80,9 @@ def dtype(self):
return self._dtype
# end output_dim
- # Mean
- @property
- def mean(self):
- """
- Mean
- """
- return self._mu
- # end mean
-
- # Standard deviation
- @property
- def std(self):
- """
- Stanard deviation
- """
- return self._std
- # end std
-
- # endregion PROPERTIES
+ #endregion PROPERTIES
- # region PRIVATE
+ #region PRIVATE
# Transform
def _transform(self, x):
@@ -110,33 +91,17 @@ def _transform(self, x):
:param x:
:return:
"""
- # Mean
- if self._mu is None:
- x -= torch.mean(x, dim=0)
- else:
- x -= self._mu
- # end if
-
- # Standard deviation
- if self._std is None:
- x_std = torch.std(x, dim=0)
- x_std[x_std == 0] = self._dummy_zero_std
- x /= x_std
- else:
- x /= self._std
- # end if
-
- return x
+ return (x - self._mu) / self._std
# end _transform
- # endregion PRIVATE
+ #endregion PRIVATE
- # region OVERRIDE
+ #region OVERRIDE
- # endregion OVERRIDE
+ #endregion OVERRIDE
- # region STATIC
+ #region STATIC
- # endregion STATIC
+ #endregion STATIC
# end Normalize
diff --git a/echotorch/transforms/timeseries/Range.py b/echotorch/transforms/timeseries/Range.py
deleted file mode 100644
index cc3eb8f..0000000
--- a/echotorch/transforms/timeseries/Range.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/transforms/timeseries/Normalize.py
-# Description : Normalize a timeserie.
-# Date : 12th of April, 2020
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti ,
-
-
-# Imports
-import torch
-from ..Transformer import Transformer
-
-
-# Range transformer
-class Range(Transformer):
- """
- Put a timeseries in a range
- """
-
- # region CONSTRUCTORS
-
- # Constructor
- def __init__(self, input_dim, min, max, dtype=torch.float64):
- """
- Constructor
- :param input_dim: Input dimension
- :param min: Lower limit of the range
- :param max: Higher limit of the range
- """
- # Super constructor
- super(Range, self).__init__(input_dim=input_dim, output_dim=input_dim, dtype=dtype)
-
- # Properties
- self._min = min
- self._max = max
- # end __init__
-
- # endregion CONSTRUCTORS
-
- # region PROPERTIES
-
- # endregion PROPERTIES
-
- # region OVERRIDE
-
- # Transform
- def _transform(self, x):
- """
- Transform input
- :param x:
- :return:
- """
- # Maximum
- if isinstance(self._max, torch.Tensor):
- # For each channel
- for i_i in range(self._input_dim):
- x[x[:, i_i] > self._max[i_i], i_i] = self._max[i_i]
- # end for
- else:
- x[x > self._max] = self._max
- # end for
-
- # Minimum
- if isinstance(self._min, torch.Tensor):
- # For each channel
- for i_i in range(self._input_dim):
- x[x[:, i_i] < self._min[i_i], i_i] = self._min[i_i]
- # end for
- else:
- x[x < self._min] = self._min
- # end if
-
- return x
- # end _transform
-
- # endregion OVERRIDE
-
-# end Normalize
diff --git a/echotorch/transforms/timeseries/Resampling.py b/echotorch/transforms/timeseries/Resampling.py
deleted file mode 100644
index 8e19c33..0000000
--- a/echotorch/transforms/timeseries/Resampling.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/transforms/timeseries/Resampling.py
-# Description : Resample a timeseries
-# Date : 5th of February, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti ,
-
-
-# Imports
-import math
-import torch
-import torch.nn.functional
-import torch.nn as nn
-import numpy as np
-from scipy import interpolate
-from ..Transformer import Transformer
-
-
-# Resampling transformer
-class Resampling(Transformer):
- """
- Resample a timeseries
- """
-
- # region CONSTRUCTORS
-
- # Constructor
- def __init__(self, input_dim, scaling_factor=1.0, mode='scipy', kind='zero', dtype=torch.float64):
- """
- Constructor
- :param input_dim: Input dimension
- :param scaling_factor: Increasing/reducing factor for timeseries (1.0 returns the inputs)
- :param mode: Use 'torch' or 'scipy' for different interpolation method.
- """
- # Super constructor
- super(Resampling, self).__init__(input_dim=input_dim, output_dim=input_dim, dtype=dtype)
-
- # Properties
- self._scaling_factor = scaling_factor
- self._mode = mode
- self._kind = kind
- # end __init__
-
- # endregion CONSTRUCTORS
-
- # region PROPERTIES
-
- # Resampling factors
- @property
- def scaling_factor(self):
- """
- Scales
- """
- return self._scaling_factor
- # end scaling_factor
-
- # endregion PROPERTIES
-
- # region OVERRIDE
-
- # Transform
- def _transform(self, x):
- """
- Transform input
- :param x:
- :return:
- """
- if self._mode == 'torch':
- return torch.transpose(
- torch.squeeze(
- torch.nn.functional.interpolate(
- torch.unsqueeze(
- torch.transpose(x, 0, 1),
- dim=0
- ),
- scale_factor=(self._scaling_factor,),
- recompute_scale_factor=True
- )
- ),
- 0,
- 1
- )
- elif self._mode == 'scipy':
- # Sizes
- time_length, n_channels = x.shape
-
- # Reduced time length
- r_time_len = int(math.floor(time_length * self._scaling_factor))
-
- # Output tensor
- output = torch.zeros(r_time_len, n_channels)
-
- # For each channel
- for chan_i in range(n_channels):
- # Compute interpolation function
- chan_func = interpolate.interp1d(np.arange(0, time_length), x[:, chan_i].numpy(), kind=self._kind)
-
- # Downsampled data
- down_data = chan_func(np.linspace(0, time_length - 1, r_time_len))
-
- # Set output
- output[:, chan_i] = torch.from_numpy(down_data)
- # end for
-
- return output
- # end if
- # end _transform
-
- # endregion OVERRIDE
-
-# end Resampling
diff --git a/echotorch/transforms/timeseries/Scale.py b/echotorch/transforms/timeseries/Scale.py
deleted file mode 100644
index cc4fd8e..0000000
--- a/echotorch/transforms/timeseries/Scale.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/transforms/timeseries/Scale.py
-# Description : Multiply channels by a constant
-# Date : 26th of January, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti ,
-
-
-# Imports
-import torch
-from ..Transformer import Transformer
-
-
-# Scale transformer
-class Scale(Transformer):
- """
- Multiply channels by a constant
- """
-
- # region CONSTRUCTORS
-
- # Constructor
- def __init__(self, input_dim, scales, dtype=torch.float64):
- """
- Constructor
- :param input_dim: Input dimension
- :param scales: Scales as a scalar or a tensor
- """
- # Super constructor
- super(Scale, self).__init__(input_dim=input_dim, output_dim=input_dim, dtype=dtype)
-
- # Properties
- self._scales = scales
- # end __init__
-
- # endregion CONSTRUCTORS
-
- # region PROPERTIES
-
- # Scales
- @property
- def scales(self):
- """
- Scales
- """
- return self._scales
- # end scales
-
- # endregion PROPERTIES
-
- # region OVERRIDE
-
- # Transform
- def _transform(self, x):
- """
- Transform input
- :param x:
- :return:
- """
- if isinstance(self._scales, torch.Tensor):
- return torch.mm(x, torch.diag(self._scales))
- else:
- return x * self._scales
- # end if
- # end _transform
-
- # endregion OVERRIDE
-
-# end Scale
diff --git a/echotorch/transforms/timeseries/__init__.py b/echotorch/transforms/timeseries/__init__.py
index 72b3bd9..f421848 100644
--- a/echotorch/transforms/timeseries/__init__.py
+++ b/echotorch/transforms/timeseries/__init__.py
@@ -21,18 +21,10 @@
# Imports
from .AddNoise import AddNoise
-from .Chain import Chain
-from .FilterInfiniteValue import FilterInfiniteValue
from .FourierTransform import FourierTransform
from .Normalize import Normalize
-from .Range import Range
-from .Resampling import Resampling
-from .Scale import Scale
from .SelectChannels import SelectChannels
from .ToOneHot import ToOneHot
# All
-__all__ = [
- 'AddNoise', 'Chain', 'FilterInfiniteValue', 'FourierTransform', 'Normalize', 'Range', 'Resampling', 'Scale',
- 'SelectChannels', 'ToOneHot'
-]
+__all__ = ['AddNoise', 'FourierTransform', 'Normalize', 'SelectChannels', 'ToOneHot']
diff --git a/echotorch/cuda/base_tensors.py b/echotorch/utility_functions.py
similarity index 100%
rename from echotorch/cuda/base_tensors.py
rename to echotorch/utility_functions.py
diff --git a/echotorch/utils/__init__.py b/echotorch/utils/__init__.py
index df4db56..62f1351 100644
--- a/echotorch/utils/__init__.py
+++ b/echotorch/utils/__init__.py
@@ -1,27 +1,9 @@
# -*- coding: utf-8 -*-
#
-# File : utils/__init__.py
-# Description : Utils subpackage init file
-# Date : 27th of April, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
# Matrix generation
-# from .matrix_generation import MatlabLoader, MatrixFactory, MatrixGenerator, NormalMatrixGenerator, NumpyLoader
-# from .matrix_generation import UniformMatrixGenerator
+from .matrix_generation import MatlabLoader, MatrixFactory, MatrixGenerator, NormalMatrixGenerator, NumpyLoader
+from .matrix_generation import UniformMatrixGenerator
# Error measure
from .error_measures import nrmse, nmse, rmse, mse, perplexity, cumperplexity, generalized_squared_cosine
@@ -35,15 +17,15 @@
pattern_interpolation, find_pattern_interpolation, find_pattern_interpolation_threshold, quota, rank, \
entropy
-# ALL
+# Visualisation
+from .visualisation import ESNCellObserver, Observable
+
__all__ = [
- # Error measures
- 'nrmse', 'nmse', 'rmse', 'mse', 'perplexity', 'cumperplexity', 'generalized_squared_cosine',
- # Random functions
- 'manual_seed',
- # Utility functions
- 'align_pattern', 'compute_correlation_matrix', 'spectral_radius', 'deep_spectral_radius', 'normalize',
- 'average_prob', 'max_average_through_time', 'compute_singular_values', 'compute_similarity_matrix',
- 'pattern_interpolation', 'find_pattern_interpolation', 'find_pattern_interpolation_threshold', 'quota', 'rank',
+ 'align_pattern', 'compute_correlation_matrix', 'nrmse', 'nmse', 'rmse', 'mse', 'perplexity', 'cumperplexity',
+ 'spectral_radius', 'deep_spectral_radius',
+ 'normalize', 'average_prob', 'max_average_through_time', 'compute_singular_values', 'generalized_squared_cosine',
+ 'compute_similarity_matrix', 'pattern_interpolation', 'MatlabLoader', 'MatrixFactory', 'MatrixGenerator',
+ 'NormalMatrixGenerator', 'NumpyLoader', 'UniformMatrixGenerator', 'ESNCellObserver',
+ 'Observable', 'find_pattern_interpolation', 'find_pattern_interpolation_threshold', 'quota', 'rank', 'manual_seed',
'entropy'
]
diff --git a/echotorch/utils/error_measures.py b/echotorch/utils/error_measures.py
index 5c2bcc0..4eff634 100644
--- a/echotorch/utils/error_measures.py
+++ b/echotorch/utils/error_measures.py
@@ -22,7 +22,7 @@ def nrmse(outputs, targets):
# Check dim
if outputs.size() != targets.size():
- raise ValueError(u"Ouputs and targets tensors don have the same number of elements")
+ raise ValueError("Ouputs and targets tensors don have the same number of elements")
# end if
# Normalization with N-1
@@ -50,7 +50,7 @@ def rmse(outputs, targets):
# Check dim
if outputs.size() != targets.size():
- raise ValueError(u"Ouputs and targets tensors don have the same number of elements")
+ raise ValueError("Ouputs and targets tensors don have the same number of elements")
# end if
# Error
@@ -75,7 +75,7 @@ def mse(outputs, targets):
# Check dim
if outputs.size() != targets.size():
- raise ValueError(u"Ouputs and targets tensors don have the same number of elements")
+ raise ValueError("Ouputs and targets tensors don have the same number of elements")
# end if
# Error
@@ -100,7 +100,7 @@ def nmse(outputs, targets):
# Check dim
if outputs.size() != targets.size():
- raise ValueError(u"Ouputs and targets tensors don have the same number of elements")
+ raise ValueError("Ouputs and targets tensors don have the same number of elements")
# end if
# Normalization with N-1
diff --git a/echotorch/utils/functional/__init__.py b/echotorch/utils/functional/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/echotorch/utils/functional/statistics.py b/echotorch/utils/functional/statistics.py
deleted file mode 100644
index e69de29..0000000
diff --git a/echotorch/utils/helpers/ESN.py b/echotorch/utils/helpers/ESN.py
deleted file mode 100644
index e69de29..0000000
diff --git a/echotorch/utils/helpers/SPESN.py b/echotorch/utils/helpers/SPESN.py
deleted file mode 100644
index e69de29..0000000
diff --git a/echotorch/utils/helpers/__init__.py b/echotorch/utils/helpers/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/echotorch/utils/matrix_generation/CycleWithJumpsMatrixGenerator.py b/echotorch/utils/matrix_generation/CycleWithJumpsMatrixGenerator.py
index daba5c3..ef82b3c 100644
--- a/echotorch/utils/matrix_generation/CycleWithJumpsMatrixGenerator.py
+++ b/echotorch/utils/matrix_generation/CycleWithJumpsMatrixGenerator.py
@@ -33,8 +33,6 @@ class CycleWithJumpsMatrixGenerator(MatrixGenerator):
Generate cycle matrix with jumps (Rodan and Tino, 2012)
"""
- # region CONSTRUCTORS
-
# Constructor
def __init__(self, **kwargs):
"""
@@ -56,9 +54,7 @@ def __init__(self, **kwargs):
self._set_parameters(args=kwargs)
# end __init__
- # endregion CONSTRUCTORS
-
- # region PRIVATE
+ #region PRIVATE
# Generate the matrix
def _generate_matrix(self, size, dtype=torch.float64):
@@ -99,7 +95,7 @@ def _generate_matrix(self, size, dtype=torch.float64):
# end if
# end _generate_matrix
- # endregion PRIVATE
+ #endregion PRIVATE
# end CycleWithJumpsMatrixGenerator
diff --git a/echotorch/utils/matrix_generation/MatloabLoader.py b/echotorch/utils/matrix_generation/MatloabLoader.py
index bf7d113..380b58c 100644
--- a/echotorch/utils/matrix_generation/MatloabLoader.py
+++ b/echotorch/utils/matrix_generation/MatloabLoader.py
@@ -35,8 +35,6 @@ class MatlabLoader(MatrixGenerator):
Load matrix from matlab file
"""
- # region CONSTRUCTORS
-
# Constructor
def __init__(self, **kwargs):
"""
@@ -54,9 +52,7 @@ def __init__(self, **kwargs):
self._set_parameters(args=kwargs)
# end __init__
- # endregion CONSTRUCTORS
-
- # region PRIVATE
+ #region PRIVATE
# Generate the matrix
def _generate_matrix(self, size, dtype=torch.float32):
@@ -74,7 +70,7 @@ def _generate_matrix(self, size, dtype=torch.float32):
m = io.loadmat(file_name)[entity_name]
# Reshape
- if 'shape' in self._parameters.keys() and self._parameters['shape'] is not None:
+ if 'shape' in list(self._parameters.keys()):
m = np.reshape(m, self.get_parameter('shape'))
# end if
@@ -88,7 +84,7 @@ def _generate_matrix(self, size, dtype=torch.float32):
return m
# end _generate_matrix
- # endregion PRIVATE
+ #endregion PRIVATE
# end MatlabLoader
diff --git a/echotorch/utils/matrix_generation/MatrixFactory.py b/echotorch/utils/matrix_generation/MatrixFactory.py
index 51b5876..aeb82f0 100644
--- a/echotorch/utils/matrix_generation/MatrixFactory.py
+++ b/echotorch/utils/matrix_generation/MatrixFactory.py
@@ -45,7 +45,7 @@ def __init__(self):
self._instance = self
# end __init__
- # region PUBLIC
+ #region PUBLIC
# Register creator
def register_generator(self, name, generator):
@@ -72,9 +72,9 @@ def get_generator(self, name, **kwargs):
return generator(**kwargs)
# end get_generator
- # endregion PUBLIC
+ #endregion PUBLIC
- # region STATIC
+ #region STATIC
# Get instance
def get_instance(self):
@@ -117,7 +117,7 @@ def to_sparse(m):
return torch.sparse.FloatTensor(indices, values)
# end to_sparse
- # endregion STATIC
+ #endregion STATIC
# end MatrixFactory
diff --git a/echotorch/utils/matrix_generation/MatrixGenerator.py b/echotorch/utils/matrix_generation/MatrixGenerator.py
index 23b1c13..3399f8e 100644
--- a/echotorch/utils/matrix_generation/MatrixGenerator.py
+++ b/echotorch/utils/matrix_generation/MatrixGenerator.py
@@ -44,7 +44,7 @@ def __init__(self, **kwargs):
self._parameters['scale'] = 1.0
# Set parameter values given
- for key, value in kwargs.items():
+ for key, value in list(kwargs.items()):
self._parameters[key] = value
# end for
# end __init__
@@ -142,7 +142,7 @@ def _set_parameters(self, args):
Set parameters
:param args: Parameters as dict
"""
- for key, value in args.items():
+ for key, value in list(args.items()):
self.set_parameter(key, value)
# end for
# end _set_parameters
diff --git a/echotorch/utils/matrix_generation/NormalMatrixGenerator.py b/echotorch/utils/matrix_generation/NormalMatrixGenerator.py
index 1e9877b..e701f5f 100644
--- a/echotorch/utils/matrix_generation/NormalMatrixGenerator.py
+++ b/echotorch/utils/matrix_generation/NormalMatrixGenerator.py
@@ -34,8 +34,6 @@ class NormalMatrixGenerator(MatrixGenerator):
Generate matrix it normally distributed weights.
"""
- # region CONSTRUCTORS
-
# Constructor
def __init__(self, **kwargs):
"""
@@ -57,9 +55,7 @@ def __init__(self, **kwargs):
self._set_parameters(args=kwargs)
# end __init__
- # endregion CONSTRUCTORS
-
- # region PRIVATE
+ #region PRIVATE
# Generate the matrix
def _generate_matrix(self, size, dtype=torch.float64):
@@ -105,7 +101,7 @@ def _generate_matrix(self, size, dtype=torch.float64):
return w
# end _generate_matrix
- # endregion PRIVATE
+ #enregion PRIVATE
# end NormalMatrixGenerator
diff --git a/echotorch/utils/matrix_generation/NumpyLoader.py b/echotorch/utils/matrix_generation/NumpyLoader.py
index 024c804..cb5f5a7 100644
--- a/echotorch/utils/matrix_generation/NumpyLoader.py
+++ b/echotorch/utils/matrix_generation/NumpyLoader.py
@@ -69,7 +69,7 @@ def _generate_matrix(self, size, dtype=torch.float64):
m = np.load(file_name)
# Reshape
- if 'shape' in self._parameters.keys():
+ if 'shape' in list(self._parameters.keys()):
m = np.reshape(m, self.get_parameter('shape'))
# end if
diff --git a/echotorch/utils/matrix_generation/UniformMatrixGenerator.py b/echotorch/utils/matrix_generation/UniformMatrixGenerator.py
index d85a7c9..b55d897 100644
--- a/echotorch/utils/matrix_generation/UniformMatrixGenerator.py
+++ b/echotorch/utils/matrix_generation/UniformMatrixGenerator.py
@@ -56,7 +56,7 @@ def __init__(self, **kwargs):
self._set_parameters(args=kwargs)
# end __init__
- # region PRIVATE
+ #region PRIVATE
# Generate the matrix
def _generate_matrix(self, size, dtype=torch.float64):
@@ -115,7 +115,7 @@ def _generate_matrix(self, size, dtype=torch.float64):
return w
# end _generate_matrix
- # endregion PRIVATE
+ #endregion PRIVATE
# end UniformMatrixGenerator
diff --git a/echotorch/utils/matrix_generation/functional/__init__.py b/echotorch/utils/matrix_generation/functional/__init__.py
deleted file mode 100644
index 63a2977..0000000
--- a/echotorch/utils/matrix_generation/functional/__init__.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/matrices.py
-# Description : EchoTorch matrix creation utility functions.
-# Date : 30th of March, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti, University of Neuchâtel ,
-# University of Geneva
-
-
diff --git a/echotorch/utils/matrix_generation/functional/cycles.py b/echotorch/utils/matrix_generation/functional/cycles.py
deleted file mode 100644
index fb4ef64..0000000
--- a/echotorch/utils/matrix_generation/functional/cycles.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/matrices.py
-# Description : EchoTorch matrix creation utility functions.
-# Date : 30th of March, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti, University of Neuchâtel ,
-# University of Geneva
-
-
-# Imports
-from typing import Tuple, Optional
-import torch
-
-# EchoTorch imports
-import echotorch.utils.matrix_generation as etmg
-from echotorch.utils.matrix_generation import MatrixGenerator
-
-
-# Cycle matrix with jumps generator
-def cycle_with_jumps_generator(
- *size: Tuple[int],
- cycle_weight: Optional[float] = 1.0,
- jump_weight: Optional[float] = 1.0,
- jump_size: Optional[float] = 2.0,
- dtype: Optional[torch.dtype] = None
-) -> torch.Tensor:
- r"""Generate cycle matrix with jumps (Rodan and Tino, 2012)
-
- :param size: Size of the matrix
- :type size: Tuple of two ints
- :param cycle_weight:
- :type cycle_weight:
- :param jump_weight:
- :type jump_weight:
- :param jump_size:
- :type jump_size:
- :param dtype:
- :type dtype: ``torch.dtype``
- :return: Generated cycle matrix
- :rtype: ``torch.Tensor``
-
- """
- # The matrix must be a square matrix nxn
- if len(size) == 2 and size[0] == size[1]:
- # Matrix full of zeros
- w = torch.zeros(size, dtype=dtype)
-
- # How many neurons
- n_neurons = size[0]
-
- # Create the cycle
- w[0, -1] = cycle_weight
- for i in range(n_neurons):
- w[i, i - 1] = cycle_weight
- # end for
-
- # Create jumps
- for i in range(0, n_neurons - jump_size + 1, jump_size):
- w[i, (i + jump_size) % n_neurons] = jump_weight
- w[(i + jump_size) % n_neurons, i] = jump_weight
- # end for
-
- return w
- else:
- raise ValueError("The generated matrix must be a square matrix : {}".format(size))
- # end if
-# end cycle_with_jumps_generator
-
diff --git a/echotorch/utils/matrix_generation/functional/distrib.py b/echotorch/utils/matrix_generation/functional/distrib.py
deleted file mode 100644
index 63a2977..0000000
--- a/echotorch/utils/matrix_generation/functional/distrib.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/matrices.py
-# Description : EchoTorch matrix creation utility functions.
-# Date : 30th of March, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti, University of Neuchâtel ,
-# University of Geneva
-
-
diff --git a/echotorch/utils/matrix_generation/functional/matlab.py b/echotorch/utils/matrix_generation/functional/matlab.py
deleted file mode 100644
index 63a2977..0000000
--- a/echotorch/utils/matrix_generation/functional/matlab.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/matrices.py
-# Description : EchoTorch matrix creation utility functions.
-# Date : 30th of March, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti, University of Neuchâtel ,
-# University of Geneva
-
-
diff --git a/echotorch/utils/optim/RidgeRegression.py b/echotorch/utils/optim/RidgeRegression.py
deleted file mode 100644
index bd0c7b1..0000000
--- a/echotorch/utils/optim/RidgeRegression.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/matrices.py
-# Description : EchoTorch matrix creation utility functions.
-# Date : 30th of March, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti, University of Neuchâtel ,
-# University of Geneva
-
-
-# Imports
-from typing import Optional
-from torch.optim import Optimizer
-
-
-# RidgeRegression
-class RidgeRegression(Optimizer):
- r"""Ridge Regression (RR) optimizer.
-
- .. warning::
- Parameters need to be specified as collections that have a deterministic
- ordering that is consistent between runs. Examples of objects that don't
- satisfy those properties are sets and iterators over values of dictionaries.
-
- Args:
- params (iterable): an iterable of :class:`torch.Tensor` s or
- :class:`dict` s. Specifies what Tensors should be optimized.
- """
-
- # region CONSTRUCTORS
-
- # Constructor
- def __init__(self, params, ridge_param=0.0):
- """
- Constructor
- """
- # Check ridge param
- if ridge_param < 0:
- raise ValueError("Invalid ridge parameter: {}".format(ridge_param))
- # end if
-
- # Properties
- self._ridge_param = ridge_param
-
- # Default parameter
- defaults = dict(ridge_param=ridge_param)
-
- # Super call
- super(RidgeRegression, self).__init__(params, defaults)
- # end __init__
-
- # endregion CONSTRUCTORS
-
- # region PROPERTIES
-
- # Ridge parameter (getter)
- @property
- def ridge_param(self):
- """Ridge parameter.
- """
- return self._ridge_param
- # end ridge_param
-
- # Ridge param (setter)
- @ridge_param.setter
- def ridge_param(self, value):
- """Ridge param."""
- self._ridge_param = value
- # end ridge_param
-
- # endregion PROPERTIES
-
- # region OVERRIDE
-
- # Step
- def step(self, closure):
- """Step."""
- pass
- # end step
-
- # Zero grad
- def zero_grad(self, set_to_none: bool = False):
- r"""Do nothing"""
- pass
- # end zero_grad
-
- # endregion OVERRIDE
-
-# end RidgeRegression
diff --git a/echotorch/utils/optim/__init__.py b/echotorch/utils/optim/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/echotorch/utils/optimization/GeneticOptimizer.py b/echotorch/utils/optimization/GeneticOptimizer.py
index d75539a..225ec6a 100644
--- a/echotorch/utils/optimization/GeneticOptimizer.py
+++ b/echotorch/utils/optimization/GeneticOptimizer.py
@@ -86,7 +86,7 @@ def _generate_random_population(self, param_ranges):
individual = dict()
# For each parameters
- for param_name, param_range in param_ranges.items():
+ for param_name, param_range in list(param_ranges.items()):
# Get a random value for this param
individual[param_name] = param_range[random.randrange(len(param_range))]
# end for
@@ -107,7 +107,7 @@ def _crossover(self, individual1, individual2):
:return: A new individual made from the crossover
"""
# How many parameters there is
- params = individual1.keys()
+ params = list(individual1.keys())
n_params = len(params)
# Generate a random position in the DNA
@@ -135,7 +135,7 @@ def _mutation(self, individual, mutation_prob, parameter_ranges):
:return: The mutated individual
"""
# For each parameter
- for param_name, param_range in parameter_ranges.items():
+ for param_name, param_range in list(parameter_ranges.items()):
# Check probability
if random.random() < mutation_prob:
individual[param_name] = param_range[random.randrange(len(param_range))]
diff --git a/echotorch/utils/optimization/GridSearchOptimizer.py b/echotorch/utils/optimization/GridSearchOptimizer.py
index 4b36976..743e0c9 100644
--- a/echotorch/utils/optimization/GridSearchOptimizer.py
+++ b/echotorch/utils/optimization/GridSearchOptimizer.py
@@ -64,7 +64,7 @@ def _convert_parameter_range(self, param_ranges):
comb_count = 1.0
# For each item
- for key, value in param_ranges.items():
+ for key, value in list(param_ranges.items()):
if type(value) is list:
output_dictionary[key] = value
elif type(value) is int:
@@ -103,7 +103,7 @@ def _optimize_func(self, test_function, param_ranges, datasets, **kwargs):
# Population of parameter values
parameter_population = (
- dict(zip(param_ranges_list.keys(), values)) for values in product(*param_ranges_list.values())
+ dict(list(zip(list(param_ranges_list.keys()), values))) for values in product(*list(param_ranges_list.values()))
)
# Save fitness values
diff --git a/echotorch/utils/optimization/Optimizer.py b/echotorch/utils/optimization/Optimizer.py
index 0431540..aba9a4d 100644
--- a/echotorch/utils/optimization/Optimizer.py
+++ b/echotorch/utils/optimization/Optimizer.py
@@ -47,7 +47,7 @@ def __init__(self, num_workers=1, **kwargs):
self._hooks = dict()
# Set parameter values given
- for key, value in kwargs.items():
+ for key, value in list(kwargs.items()):
self._parameters[key] = value
# end for
# end __init__
@@ -159,7 +159,7 @@ def _call_hook(self, hook_name, *args, **kwargs):
# Check hook exists
if hook_name in self.hooks_list:
# Check that function are registrered
- if hook_name in self._hooks.keys():
+ if hook_name in list(self._hooks.keys()):
# Get function
hook_funcs = self._hooks[hook_name]
@@ -252,7 +252,7 @@ def _set_parameters(self, args):
Set parameters
:param args: Parameters as dict
"""
- for key, value in args.items():
+ for key, value in list(args.items()):
self.set_parameter(key, value)
# end for
# end _set_parameters
diff --git a/echotorch/utils/optimization/RandomOptimizer.py b/echotorch/utils/optimization/RandomOptimizer.py
index f56ceaf..e86c105 100644
--- a/echotorch/utils/optimization/RandomOptimizer.py
+++ b/echotorch/utils/optimization/RandomOptimizer.py
@@ -75,7 +75,7 @@ def _optimize_func(self, test_function, param_ranges, datasets, **kwargs):
individual = dict()
# For each parameters
- for param_name, param_range in param_ranges.items():
+ for param_name, param_range in list(param_ranges.items()):
# Get a random value for this param
individual[param_name] = param_range[random.randrange(len(param_range))]
# end for
diff --git a/echotorch/utils/random.py b/echotorch/utils/random.py
index 7373e51..55b660f 100644
--- a/echotorch/utils/random.py
+++ b/echotorch/utils/random.py
@@ -1,26 +1,8 @@
# -*- coding: utf-8 -*-
#
-# File : echotorch/utils/random.py
-# Description : Utility function for random number generators
-# Date : 30th of April, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
# Imports
-import random
+from . import random
import numpy as np
import torch
diff --git a/echotorch/utils/utility_functions.py b/echotorch/utils/utility_functions.py
index 43fb0c0..aeb7d9e 100644
--- a/echotorch/utils/utility_functions.py
+++ b/echotorch/utils/utility_functions.py
@@ -1,23 +1,5 @@
# -*- coding: utf-8 -*-
#
-# File : echotorch/utils/utility_functions.py
-# Description : Utility functions
-# Date : 23th of February, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
# Imports
import torch
@@ -475,106 +457,3 @@ def max_average_through_time(tensor, dim=0):
average = torch.mean(tensor, dim=dim)
return torch.max(average, dim=dim)[1]
# end max_average_through_time
-
-
-# Compute covariance for a lag
-def cov(x, y):
- """
- Compute covariance for a lag
- :param x: Timeseries tensor
- :param y: Timeseries tensor
- :return: The covariance coefficients
- """
- # Average x and y
- x_mu = torch.mean(x, dim=0)
- y_mu = torch.mean(y, dim=0)
-
- # Average covariance over length
- return torch.mean(torch.mul(x - x_mu, y - y_mu))
-# end cov
-
-
-# AutoCorrelation coefficients function for a time series
-def autocorrelation_function(x: torch.Tensor, n_lags: int):
- """
- AutoCorrelation coefficients function for a time series
- @param x: The 1-D timeseries
- @param n_lags: Number of lags
- @return: A 1-D tensor with n_lags+1 components
- """
- # Store coefs
- autocov_coefs = torch.zeros(n_lags+1)
-
- # Time length for comparison
- com_time_length = x.size(0) - n_lags
-
- # The time length for comparison must
- # be superior (or equal) to the number of lags required
- if com_time_length < n_lags:
- raise ValueError(
- "Time time length for comparison must "
- "be superior (or equal) to the number of lags required (series of length "
- "{}, {} lags, comparison length of {})".format(x.size(0), n_lags, com_time_length)
- )
- # end if
-
- # Covariance t to t
- autocov_coefs[0] = cov(x[:com_time_length], x[:com_time_length])
-
- # For each lag
- for lag_i in range(1, n_lags+1):
- autocov_coefs[lag_i] = cov(
- x[:com_time_length],
- x[lag_i:lag_i + com_time_length]
- )
- # end for
-
- # Co
- c0 = autocov_coefs[0].item()
-
- # Normalize with first coef
- autocov_coefs /= c0
-
- return autocov_coefs
-# end autocorrelation_function
-
-
-# AutoCorrelation Coefficients for a time series
-def autocorrelation_coefs(x: torch.Tensor, n_coefs: int):
- """
- AutoCorrelation Coefficients for a time series
- @param x: A 2D tensor (no batch) or 3D tensor (with batch)
- @param n_coefs: Number of coefficients for each dimension
- @return: A 2D tensor (n. channels x n. coefs) if no batch, 3D tensor (n. batch x n.channels x n. coefs) if batched
- """
- # Has batch?
- use_batch = x.ndim == 3
-
- # Add batch dim if necessary
- if not use_batch:
- x = torch.unsqueeze(x, dim=0)
- # end if
-
- # Sizes
- batch_size, time_length, n_channels = x.size()
-
- # Result collector
- result_collector = torch.zeros(batch_size, n_channels, n_coefs+1)
-
- # For each batch
- for batch_i in range(batch_size):
- # For each channel
- for channel_i in range(n_channels):
- result_collector[batch_i, channel_i] = autocorrelation_function(x[batch_i, :, channel_i], n_lags=n_coefs)
- # end for
- # end for
-
- # Return result
- if not use_batch:
- return torch.squeeze(result_collector, dim=0)
- # end if
- return result_collector
-# end autocorrelation_coefs
-
-
-
diff --git a/echotorch/viz/ESNCellObserver.py b/echotorch/utils/visualisation/ESNCellObserver.py
similarity index 99%
rename from echotorch/viz/ESNCellObserver.py
rename to echotorch/utils/visualisation/ESNCellObserver.py
index af560b5..f9f27c1 100644
--- a/echotorch/viz/ESNCellObserver.py
+++ b/echotorch/utils/visualisation/ESNCellObserver.py
@@ -81,7 +81,7 @@ def draw_matrix_graph(self, matrix_name, draw=True, with_labels=True, font_weigh
G = nx.Graph()
# Add each nodes
- G.add_nodes_from(range(m.size(0)))
+ G.add_nodes_from(list(range(m.size(0))))
# For each entry in m
for i in range(m_dim):
diff --git a/echotorch/viz/NodeObserver.py b/echotorch/utils/visualisation/NodeObserver.py
similarity index 97%
rename from echotorch/viz/NodeObserver.py
rename to echotorch/utils/visualisation/NodeObserver.py
index 97d8af8..e700bec 100644
--- a/echotorch/viz/NodeObserver.py
+++ b/echotorch/utils/visualisation/NodeObserver.py
@@ -123,7 +123,7 @@ def draw_matrix_graph(self, matrix_name, draw=True, with_labels=True, font_weigh
G = nx.Graph()
# Add each nodes
- G.add_nodes_from(range(m.size(0)))
+ G.add_nodes_from(list(range(m.size(0))))
# For each entry in m
for i in range(m_dim):
@@ -194,7 +194,7 @@ def get_data(self, point, states, idxs):
list_of_observations = list()
# For each observed states
- for state in self._observation_data[point_obj].keys():
+ for state in list(self._observation_data[point_obj].keys()):
# List of data
list_of_data = self._observation_data[point_obj][state]
@@ -232,7 +232,7 @@ def _observation_point_handler(self, observation_point, data):
# If active
if self._active:
# Create list for state if necessary
- if self._current_state not in self._observation_data[observation_point].keys():
+ if self._current_state not in list(self._observation_data[observation_point].keys()):
self._observation_data[observation_point][self._current_state] = list()
# end if
diff --git a/echotorch/viz/Observable.py b/echotorch/utils/visualisation/Observable.py
similarity index 100%
rename from echotorch/viz/Observable.py
rename to echotorch/utils/visualisation/Observable.py
diff --git a/echotorch/viz/ObservationPoint.py b/echotorch/utils/visualisation/ObservationPoint.py
similarity index 100%
rename from echotorch/viz/ObservationPoint.py
rename to echotorch/utils/visualisation/ObservationPoint.py
diff --git a/echotorch/viz/StateVisualiser.py b/echotorch/utils/visualisation/StateVisualiser.py
similarity index 100%
rename from echotorch/viz/StateVisualiser.py
rename to echotorch/utils/visualisation/StateVisualiser.py
diff --git a/echotorch/viz/Visualiser.py b/echotorch/utils/visualisation/Visualiser.py
similarity index 100%
rename from echotorch/viz/Visualiser.py
rename to echotorch/utils/visualisation/Visualiser.py
diff --git a/echotorch/viz/__init__.py b/echotorch/utils/visualisation/__init__.py
similarity index 80%
rename from echotorch/viz/__init__.py
rename to echotorch/utils/visualisation/__init__.py
index b7f0008..c041d03 100644
--- a/echotorch/viz/__init__.py
+++ b/echotorch/utils/visualisation/__init__.py
@@ -25,15 +25,9 @@
from .Observable import Observable
from .ObservationPoint import ObservationPoint
from .StateVisualiser import StateVisualiser
-from .timeplots import timescatter, timeplot
-from .visualisation import show_similarity_matrix, plot_2D_ellipse, pairs
+from .visualisation import show_similarity_matrix, plot_2D_ellipse
from .Visualiser import Visualiser
# ALL
-__all__ = [
- 'ESNCellObserver', 'NodeObserver', 'Observable', 'ObservationPoint', 'StateVisualiser', 'Visualiser',
- # Timeplots
- 'timescatter', 'timeplot',
- # Visualisation
- 'show_similarity_matrix', 'plot_2D_ellipse', 'pairs'
-]
+__all__ = ['ESNCellObserver', 'NodeObserver', 'Observable', 'ObservationPoint', 'StateVisualiser',
+ 'show_similarity_matrix', 'Visualiser', 'plot_2D_ellipse']
diff --git a/echotorch/viz/conceptors.py b/echotorch/utils/visualisation/conceptors.py
similarity index 100%
rename from echotorch/viz/conceptors.py
rename to echotorch/utils/visualisation/conceptors.py
diff --git a/echotorch/viz/visualisation.py b/echotorch/utils/visualisation/visualisation.py
similarity index 64%
rename from echotorch/viz/visualisation.py
rename to echotorch/utils/visualisation/visualisation.py
index 21d6f7c..6bfad7b 100644
--- a/echotorch/viz/visualisation.py
+++ b/echotorch/utils/visualisation/visualisation.py
@@ -20,147 +20,11 @@
# Copyright Nils Schaetti
# Imports
-from typing import List, Any, Tuple, Optional, Dict
import torch
import numpy as np
import matplotlib.pyplot as plt
import numpy.linalg as lin
import math
-import echotorch
-
-
-# Show pairs of variables against each others
-def pairs(
- input: echotorch.TimeTensor,
- labels: Optional[List[str]] = None,
- figsize: Optional[Tuple[int, int]] = None,
- tight_layout: Optional[Dict] = None,
- bins: Optional[int] = 10,
- plot_correlations: Optional[bool] = True,
- sign_level: Optional[float] = 0.05,
- **kwargs
-) -> None:
- r"""Show matrix of scatter plots for pairs of :math:`p` channels contained in *input*. Scatter plots will be placed in a :math:`p \times p` matrix.
-
- :param input: A 1-D time series with :math:`p` channels.
- :type input: ``TimeTensor``
- :param labels: List of :math:`p` channels to be used as title in the scatter plot matrix (default: None).
- :type labels: List of str, optional
- :param figsize: Width and height of the figure in inches (default: None).
- :type figsize: (``float``, ``float``), optional
- :param tight_layout: Padding options for matplotlib (pad, h_pad, w_pad, rect) (default: None).
- :type tight_layout: ``dict``, optional
- :param bins: How many bins for histograms (default: 10).
- :type bins: ``int``, optional
- :param plot_correlations: Show Pearson correlation coefficients (default: True).
- :type plot_correlations: ``bool``, optional
- :param sign_level: Significance level for correlation (default: 0.05)
- :type sign_level: ``float``, optional
- :param kwargs: Additional positional argument for the scatter function.
-
- Example:
-
- >>> x = echotorch.randn(5, time_length=100)
- >>> echotorch.viz.pairs(x, figsize=(12, 8), s=3, sign_level=0.5)
- """
- # Must be a 1-D channel
- if input.cdim != 1:
- raise ValueError(
- "Expected a 1-D timetensors (got {})".format(input.cdim)
- )
- # end if
-
- # Number of channels
- nc = input.csize()[0]
-
- # Labels
- if labels is None:
- labels = [str(i) for i in range(nc)]
- # end if
-
- # Compute correlation matrix R and p-values
- if plot_correlations:
- R, pvs = echotorch.cor(input, input, pvalue=True)
- # end if
-
- # Figure
- fig, axs = plt.subplots(nc, nc, figsize=figsize)
-
- # For each pair
- for i in range(nc):
- for j in range(nc):
- # Default, no ticks
- axs[i, j].get_xaxis().set_visible(False)
- axs[i, j].get_yaxis().set_visible(False)
-
- # Not diagonal plot
- if i != j:
- # Show scatter plot
- axs[i, j].scatter(input[:, i], input[:, j], **kwargs)
-
- # X-labels
- if (i == 0 and j > 0) or (i == nc - 1 and j == 0):
- # Enable ticks
- axs[i, j].get_xaxis().set_visible(True)
-
- # Top or bottom?
- if i == 0:
- axs[i, j].get_xaxis().set_ticks_position('top')
- # end if
- # end if
-
- # Y-labels
- if (j == 0 and i > 0) or (j == nc - 1 and i == 0):
- # Enable ticks
- axs[i, j].get_yaxis().set_visible(True)
-
- # Right of left
- if i == 0:
- axs[i, j].get_yaxis().set_ticks_position('right')
- # end if
- # end if
-
- # Plot text
- if plot_correlations:
- # Coef background color
- back_color = 'white' if pvs[i, j] >= sign_level else 'green'
-
- # Show correlation coefficient
- axs[i, j].text(
- 0.04,
- 0.07,
- "{}".format(round(R[i, j].item(), 2)),
- fontsize=10,
- verticalalignment='bottom',
- horizontalalignment='left',
- bbox=dict(boxstyle='square', facecolor=back_color, alpha=0.75),
- transform=axs[i, j].transAxes
- )
- # end if
- else:
- # Show titles
- axs[i, j].set_title("{}".format(labels[i]))
-
- # Activate avis
- axs[i, j].get_xaxis().set_visible(True)
- axs[i, j].get_yaxis().set_visible(True)
-
- # Plot histogram
- axs[i, j].hist(input.tensor[:, i].numpy(), bins=bins)
- # end if
- # end for
- # end for
-
- # Tight layout
- if tight_layout is not None:
- fig.tight_layout(*tight_layout)
- else:
- fig.tight_layout()
- # end if
-
- # Show
- plt.show()
-# end pairs
# Show singular values increasing aperture
@@ -192,7 +56,7 @@ def show_sv_for_increasing_aperture(conceptor, factor, title):
# end for
# Show
- ax.set_xlabel(u"Singular values")
+ ax.set_xlabel("Singular values")
ax.set_title(title)
plt.show()
plt.close()
@@ -438,7 +302,6 @@ def show_1d_timeseries(ts, title, xmin, xmax, ymin, ymax, start=0, timesteps=-1)
plt.close()
# end show_1d_timeseries
-
def plot_2D_ellipse(A, colorstring, linewidth, resolution):
"""
Plots a 2D ellipse centered on 0 whose shape matrix is given by the
diff --git a/echotorch/viz/timeplots.py b/echotorch/viz/timeplots.py
deleted file mode 100644
index a7f0902..0000000
--- a/echotorch/viz/timeplots.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/utils/visualisation/visualistion.py
-# Description : Visualisation utility functions
-# Date : 6th of December, 2019
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-# Imports
-from typing import List, Dict, Optional, Tuple
-import matplotlib.pyplot as plt
-import numpy as np
-
-# Import echotorch
-from echotorch import TimeTensor
-
-
-# Show a 2D-timeseries as a set of points
-def timescatter(
- data: TimeTensor,
- title: Optional[str] = None,
- xlab: Optional[str] = None,
- ylab: Optional[str] = None,
- xticks: Optional[List[float]] = None,
- yticks: Optional[List[float]] = None,
- xlim: Optional[Tuple[float]] = None,
- ylim: Optional[Tuple[float]] = None,
- **kwargs
-) -> None:
- r"""Show a 2D-timeseries as a set of points on a 2D scatter plot.
-
- :param data: The ``TimeTensor`` to plot, there should be no batch dimensions and 2 channel dimensions.
- :type data: ``TimeTensor`` of size (time length x 2)
- :param title: Plot title
- :type title: ``str``
- :param xlab: X-axis label
- :type xlab: ``str``
- :param ylab: Y-axis label
- :type ylab: ``str``
- :param xticks: X-axis ticks
- :type xticks: List of ``float``
- :param yticks: Y-axis ticks
- :type yticks: List of ``float``
- :param xlim: X-axis start and end
- :type xlim: Tuple of ``float``
- :param ylim: Y-axis start and end
- :type ylim: Tuple of ``float``
-
- Example
- >>> x = echotorch.data.henon(1, 100, (0, 0), 1.4, 0.3, 0)
- >>> echotorch.timescatter(x[0], title="Henon Attractor", xlab="x", ylab="y")
-
- """
- # Plot properties
- if title is not None: plt.title(title)
- if xlab is not None: plt.xlabel(xlab)
- if ylab is not None: plt.ylabel(ylab)
- if xlim is not None: plt.xlim(xlim)
- if ylim is not None: plt.ylim(ylim)
- if xticks is not None: plt.xticks(xticks)
- if yticks is not None: plt.yticks(yticks)
-
- # Plot
- plt.scatter(data[:, 0], data[:, 1], **kwargs)
-# end timescatter
-
-
-# Plot a timetensor
-def timeplot(
- data: TimeTensor,
- title: Optional[str] = None,
- tstart: Optional[float] = 0.0,
- tstep: Optional[float] = 1.0,
- tlab: Optional[str] = "Time",
- xlab: Optional[str] = None,
- tticks: Optional[List[float]] = None,
- xticks: Optional[List[float]] = None,
- tlim: Optional[Tuple[float]] = None,
- xlim: Optional[Tuple[float]] = None,
- axis: Optional[plt.axis] = None,
- **kwargs
-) -> None:
- r"""Show a 0-D or 1-D timeseries, one line per channel, on a plot with time as the X-axis.
-
- :param data: The ``TimeTensor`` to plot, there must be no batch dimensions.
- :type data: ``TimeTensor``
- :param title: Plot title
- :type title: ``str``, optional
- :param tstart: Starting time position on the Time-axis
- :type tstart: ``float``, optional
- :param tstep: Time step on the Time-axis
- :type tstep: ``float``, optional
- :param tlab: Time-axis label
- :type tlab: ``str``, optional
- :param xlab: X-axis label
- :type xlab: ``str``, optional
- :param tticks: Time-axis ticks
- :type tticks: List of ``float``, optional
- :param xticks: X-axis ticks
- :type xticks: List of ``float``, optional
- :param tlim: Time-axis start and end
- :type tlim: Tuple of ``float``, optional
- :param xlim: X-axis start and end
- :type xlim: Tuple of ``float``, optional
- :param axis:
- :type axis:
-
- Example
-
- >>> x = echotorch.data.random_walk(1, length=10000, shape=())
- >>> echotorch.timeplot(x[0], title="Random Walk", xlab="X_t")
- """
- # Who to call
- plt_call = axis if axis is not None else plt
-
- # Plot properties
- if axis is None:
- if title is not None: plt_call.title(title)
- if tlab is not None: plt_call.xlabel(tlab)
- if xlab is not None: plt_call.ylabel(xlab)
- if tlim is not None: plt_call.xlim(tlim)
- if xlim is not None: plt_call.ylim(xlim)
- if tticks is not None: plt_call.xticks(tticks)
- if xticks is not None: plt_call.yticks(xticks)
- else:
- if title is not None: plt_call.set_title(title)
- if tlab is not None: plt_call.set_xlabel(tlab)
- if xlab is not None: plt_call.set_ylabel(xlab)
- if tlim is not None: plt_call.set_xlim(tlim)
- if xlim is not None: plt_call.set_ylim(xlim)
- if tticks is not None: plt_call.set_xticks(tticks)
- if xticks is not None: plt_call.set_yticks(xticks)
- # end if
-
- # 0-D or 1-D
- multi_dim = data.cdim > 0
-
- # Number of channels
- n_chan = data.csize()[0] if multi_dim else 0
-
- # X-axis
- if multi_dim:
- x_data = np.expand_dims(np.arange(tstart, tstep * data.tlen, tstep), axis=1)
- x_data = np.repeat(x_data, n_chan, axis=1)
- else:
- x_data = np.arange(tstart, tstep * data.tlen, tstep)
- # end if
-
- # Plot
- plt_call.plot(x_data, data.numpy(), **kwargs)
-# end timeplot
-
diff --git a/examples/conceptors/boolean_operations.py b/examples/conceptors/boolean_operations.py
index 2f4e9df..2f1087f 100644
--- a/examples/conceptors/boolean_operations.py
+++ b/examples/conceptors/boolean_operations.py
@@ -22,7 +22,7 @@
# Imports
import echotorch.nn.conceptors as ecnc
import echotorch.utils.matrix_generation as mg
-import echotorch.visualisation as ecvs
+import echotorch.utils.visualisation as ecvs
import argparse
import torch
import matplotlib.pyplot as plt
@@ -44,7 +44,7 @@
# endregion PARAMS
# Argument parsing
-parser = argparse.ArgumentParser(prog=u"Boolean operations", description="Boolean operation demo")
+parser = argparse.ArgumentParser(prog="Boolean operations", description="Boolean operation demo")
parser.add_argument("--x", type=str, default="", required=False)
parser.add_argument("--x-name", type=str, default="", required=False)
parser.add_argument("--y", type=str, default="", required=False)
@@ -160,11 +160,11 @@
AandB = ecnc.Conceptor.operator_AND(A, B)
AorB = ecnc.Conceptor.operator_OR(A, B)
notA = ecnc.Conceptor.operator_NOT(A)
-print(A.conceptor_matrix())
-print(B.conceptor_matrix())
-print(AandB.conceptor_matrix())
-print(AorB.conceptor_matrix())
-print(notA.conceptor_matrix())
+print((A.conceptor_matrix()))
+print((B.conceptor_matrix()))
+print((AandB.conceptor_matrix()))
+print((AorB.conceptor_matrix()))
+print((notA.conceptor_matrix()))
# endregion BOOLEAN_OPERATIONS
# region PLOTS
diff --git a/examples/conceptors/conceptor_patterns_evidence.py b/examples/conceptors/conceptor_patterns_evidence.py
index dd9afb4..58bbb51 100644
--- a/examples/conceptors/conceptor_patterns_evidence.py
+++ b/examples/conceptors/conceptor_patterns_evidence.py
@@ -27,7 +27,7 @@
import argparse
import echotorch.utils
import echotorch.datasets as etds
-import echotorch.visualisation as ecvs
+import echotorch.utils.visualisation as ecvs
from echotorch.datasets import DatasetComposer
from torch.utils.data.dataloader import DataLoader
import matplotlib.pyplot as plt
@@ -63,7 +63,7 @@
gamma = 10.0
# Argument parsing
-parser = argparse.ArgumentParser(prog="subspace_demo", description=u"Fig. 1 BC subspace first demo")
+parser = argparse.ArgumentParser(prog="subspace_demo", description="Fig. 1 BC subspace first demo")
parser.add_argument("--w", type=str, default="", required=False)
parser.add_argument("--w-name", type=str, default="", required=False)
parser.add_argument("--win", type=str, default="", required=False)
@@ -221,7 +221,7 @@
# Compute NRMSE
training_NRMSE = echotorch.utils.nrmse(predY, Y_collector)
-print("Training NRMSE : {}".format(training_NRMSE))
+print(("Training NRMSE : {}".format(training_NRMSE)))
# Conceptors OFF
conceptor_net.conceptor_active(False)
@@ -358,7 +358,7 @@
plt.show()
# Show NRMSE
-print("NRMSEs aligned : {}".format(torch.mean(NRMSEs_aligned)))
+print(("NRMSEs aligned : {}".format(torch.mean(NRMSEs_aligned))))
# For each aperture (1.0, 10.0, 100.0, 1000.0, 10000.0)
ca = 0.01
diff --git a/examples/conceptors/conceptors_4_patterns_generation.py b/examples/conceptors/conceptors_4_patterns_generation.py
index 854d269..24bff7b 100644
--- a/examples/conceptors/conceptors_4_patterns_generation.py
+++ b/examples/conceptors/conceptors_4_patterns_generation.py
@@ -143,7 +143,7 @@
inputs, outputs, labels = data
# Show timeseries
- echotorch.utils.show_1d_timeseries(ts=inputs[0], title=u"p{}".format(i+1), xmin=0, xmax=20, ymin=-1, ymax=1, timesteps=21)
+ echotorch.utils.show_1d_timeseries(ts=inputs[0], title="p{}".format(i+1), xmin=0, xmax=20, ymin=-1, ymax=1, timesteps=21)
# Compute hidden states
hidden_states = esn(u=inputs.view(1, -1, 1).double(), y=inputs.view(1, -1, 1).double(), c=pattern_conceptors[i])
@@ -155,7 +155,7 @@
echotorch.utils.neurons_activities_1d(
stats=hidden_states[0],
neurons=torch.LongTensor([0, 50, 99]),
- title=u"Random neurons activities of p{}".format(i+1),
+ title="Random neurons activities of p{}".format(i+1),
colors=['r', 'g', 'b'],
xmin=0,
xmax=20,
@@ -165,10 +165,10 @@
)
# Show log10 PC energy
- echotorch.utils.plot_singular_values(stats=hidden_states[0], title=u"Log10 PC energy (p{})".format(i+1), log=True, xmin=0, xmax=100, ymin=-20, ymax=10)
+ echotorch.utils.plot_singular_values(stats=hidden_states[0], title="Log10 PC energy (p{})".format(i+1), log=True, xmin=0, xmax=100, ymin=-20, ymax=10)
# Show leading PC energy
- S, U = echotorch.utils.plot_singular_values(stats=hidden_states[0], title=u"Leading PC energy (p{})".format(i+1), xmin=0, xmax=10, ymin=0, ymax=40)
+ S, U = echotorch.utils.plot_singular_values(stats=hidden_states[0], title="Leading PC energy (p{})".format(i+1), xmin=0, xmax=10, ymin=0, ymax=40)
# Save SVD
svd_matrices.append((S, U))
@@ -176,7 +176,7 @@
# Compute similarity matrix
sim_matrix = echotorch.utils.compute_similarity_matrix(svd_matrices)
-echotorch.utils.show_similarity_matrix(sim_matrix, u"R-based similarities")
+echotorch.utils.show_similarity_matrix(sim_matrix, "R-based similarities")
# Close the conceptor net
esn.finalize()
@@ -187,7 +187,7 @@
# end for
# Show similarities between conceptors
-echotorch.utils.show_conceptors_similarity_matrix(pattern_conceptors, u"C-based similarities (a = {})".format(aperture))
+echotorch.utils.show_conceptors_similarity_matrix(pattern_conceptors, "C-based similarities (a = {})".format(aperture))
# For each patterns
for i, p in enumerate(patterns):
@@ -196,7 +196,7 @@
# Legends
legends = list()
- legends.append(u"Pattern {}".format(i+1))
+ legends.append("Pattern {}".format(i+1))
# Aperture
a = aperture
@@ -216,7 +216,7 @@
plt.plot(y_hat[0, washout_period+phase_shift:washout_period+phase_shift + 40].numpy())
# Legend
- legends.append(u"a = {} ({})".format(a, nrmse))
+ legends.append("a = {} ({})".format(a, nrmse))
# Multiply aperture
pattern_conceptors[i].multiply_aperture(factor)
@@ -224,7 +224,7 @@
# end for
plt.legend(legends, loc='upper right')
- plt.title(u"Regeneration of pattern {}".format(i+1))
+ plt.title("Regeneration of pattern {}".format(i+1))
plt.show()
plt.close()
# end for
diff --git a/examples/conceptors/memory_management.py b/examples/conceptors/memory_management.py
index 823d57f..f037eb9 100644
--- a/examples/conceptors/memory_management.py
+++ b/examples/conceptors/memory_management.py
@@ -449,8 +449,8 @@
# end for
# Show the average NRMSE
-print(u"Average NRMSE : {}".format(torch.mean(NRMSEs_aligned)))
-print(u"Average NRMSE except last : {}".format(torch.mean(NRMSEs_aligned[:-1])))
+print(("Average NRMSE : {}".format(torch.mean(NRMSEs_aligned))))
+print(("Average NRMSE except last : {}".format(torch.mean(NRMSEs_aligned[:-1]))))
print(NRMSEs_aligned)
# endregion TEST
@@ -524,12 +524,12 @@
# Title
if p == 0:
- plt.title(u'p and y')
+ plt.title('p and y')
# end if
# Title
if p == 0:
- plt.title(u'p and y')
+ plt.title('p and y')
# end if
# X labels
diff --git a/examples/conceptors/memory_management_debug.py b/examples/conceptors/memory_management_debug.py
index 79f4ab8..5e7661d 100644
--- a/examples/conceptors/memory_management_debug.py
+++ b/examples/conceptors/memory_management_debug.py
@@ -635,8 +635,8 @@
# end for
# Show the average NRMSE
-print(u"Average NRMSE : {}".format(torch.mean(NRMSEs_aligned)))
-print(u"Average NRMSE except last : {}".format(torch.mean(NRMSEs_aligned[:-1])))
+print(("Average NRMSE : {}".format(torch.mean(NRMSEs_aligned))))
+print(("Average NRMSE except last : {}".format(torch.mean(NRMSEs_aligned[:-1]))))
# endregion TEST
@@ -710,12 +710,12 @@
# Title
if p == 0:
- plt.title(u'p and y')
+ plt.title('p and y')
# end if
# Title
if p == 0:
- plt.title(u'p and y')
+ plt.title('p and y')
# end if
# X labels
diff --git a/examples/conceptors/memory_management_forgetting_debug.py b/examples/conceptors/memory_management_forgetting_debug.py
index e4adb8b..2ae4f3a 100644
--- a/examples/conceptors/memory_management_forgetting_debug.py
+++ b/examples/conceptors/memory_management_forgetting_debug.py
@@ -661,8 +661,8 @@
# end for
# Show the average NRMSE
-print(u"Average NRMSE : {}".format(torch.mean(NRMSEs_aligned)))
-print(u"Average NRMSE except last : {}".format(torch.mean(NRMSEs_aligned[:-1])))
+print(("Average NRMSE : {}".format(torch.mean(NRMSEs_aligned))))
+print(("Average NRMSE except last : {}".format(torch.mean(NRMSEs_aligned[:-1]))))
# endregion TEST
@@ -736,12 +736,12 @@
# Title
if p == 0:
- plt.title(u'p and y')
+ plt.title('p and y')
# end if
# Title
if p == 0:
- plt.title(u'p and y')
+ plt.title('p and y')
# end if
# X labels
diff --git a/examples/conceptors/morphing_periodic_patterns.py b/examples/conceptors/morphing_periodic_patterns.py
index 3523f5b..c7a5a6c 100644
--- a/examples/conceptors/morphing_periodic_patterns.py
+++ b/examples/conceptors/morphing_periodic_patterns.py
@@ -27,7 +27,7 @@
import argparse
import echotorch.utils
import echotorch.datasets as etds
-import echotorch.visualisation as ecvs
+import echotorch.utils.visualisation as ecvs
from echotorch.datasets import DatasetComposer
from torch.utils.data.dataloader import DataLoader
import matplotlib.pyplot as plt
@@ -79,7 +79,7 @@
alpha = 1000
# Argument parsing
-parser = argparse.ArgumentParser(prog="subspace_demo", description=u"Fig. 1 BC subspace first demo")
+parser = argparse.ArgumentParser(prog="subspace_demo", description="Fig. 1 BC subspace first demo")
parser.add_argument("--w", type=str, default="", required=False)
parser.add_argument("--w-name", type=str, default="", required=False)
parser.add_argument("--win", type=str, default="", required=False)
@@ -243,7 +243,7 @@
# Compute NRMSE
training_NRMSE = echotorch.utils.nrmse(predY, Y_collector)
-print("Training NRMSE : {}".format(training_NRMSE))
+print(("Training NRMSE : {}".format(training_NRMSE)))
# Train conceptors (Compute C from R)
conceptors.finalize()
@@ -337,7 +337,7 @@
# More remove first 20, and the last was which are
# not accurate enough.
interpolation_steps = int(1.0 / interpolation_increment)
-x_crossing_discounts = x_crossing_discounts[range(interpolation_steps - 1, interpolation_length, interpolation_steps)]
+x_crossing_discounts = x_crossing_discounts[list(range(interpolation_steps - 1, interpolation_length, interpolation_steps))]
x_crossing_discounts *= interpolation_increment
x_crossing_discounts[:20] = np.ones(20) * x_crossing_discounts[19]
x_crossing_discounts[-1] = x_crossing_discounts[-2]
diff --git a/examples/conceptors/morphing_periodic_sine.py b/examples/conceptors/morphing_periodic_sine.py
index 23678b8..8893bf7 100644
--- a/examples/conceptors/morphing_periodic_sine.py
+++ b/examples/conceptors/morphing_periodic_sine.py
@@ -27,7 +27,7 @@
import argparse
import echotorch.utils
import echotorch.datasets as etds
-import echotorch.visualisation as ecvs
+import echotorch.utils.visualisation as ecvs
from echotorch.datasets import DatasetComposer
from torch.utils.data.dataloader import DataLoader
import matplotlib.pyplot as plt
@@ -79,7 +79,7 @@
alpha = 1000
# Argument parsing
-parser = argparse.ArgumentParser(prog="subspace_demo", description=u"Fig. 1 BC subspace first demo")
+parser = argparse.ArgumentParser(prog="subspace_demo", description="Fig. 1 BC subspace first demo")
parser.add_argument("--w", type=str, default="", required=False)
parser.add_argument("--w-name", type=str, default="", required=False)
parser.add_argument("--win", type=str, default="", required=False)
@@ -243,7 +243,7 @@
# Compute NRMSE
training_NRMSE = echotorch.utils.nrmse(predY, Y_collector)
-print("Training NRMSE : {}".format(training_NRMSE))
+print(("Training NRMSE : {}".format(training_NRMSE)))
# Train conceptors (Compute C from R)
conceptors.finalize()
@@ -343,7 +343,7 @@
# More remove first 20, and the last was which are
# not accurate enough.
interpolation_steps = int(1.0 / interpolation_increment)
-x_crossing_discounts = x_crossing_discounts[range(interpolation_steps - 1, interpolation_length, interpolation_steps)]
+x_crossing_discounts = x_crossing_discounts[list(range(interpolation_steps - 1, interpolation_length, interpolation_steps))]
x_crossing_discounts *= interpolation_increment
x_crossing_discounts[:20] = np.ones(20) * x_crossing_discounts[19]
x_crossing_discounts[-1] = x_crossing_discounts[-2]
diff --git a/examples/conceptors/morphing_sines.py b/examples/conceptors/morphing_sines.py
index cc35956..1b451c4 100644
--- a/examples/conceptors/morphing_sines.py
+++ b/examples/conceptors/morphing_sines.py
@@ -27,7 +27,7 @@
import argparse
import echotorch.utils
import echotorch.datasets as etds
-import echotorch.visualisation as ecvs
+import echotorch.utils.visualisation as ecvs
from echotorch.datasets import DatasetComposer
from torch.utils.data.dataloader import DataLoader
import matplotlib.pyplot as plt
@@ -79,7 +79,7 @@
alpha = 10
# Argument parsing
-parser = argparse.ArgumentParser(prog="subspace_demo", description=u"Fig. 1 BC subspace first demo")
+parser = argparse.ArgumentParser(prog="subspace_demo", description="Fig. 1 BC subspace first demo")
parser.add_argument("--w", type=str, default="", required=False)
parser.add_argument("--w-name", type=str, default="", required=False)
parser.add_argument("--win", type=str, default="", required=False)
@@ -243,7 +243,7 @@
# Compute NRMSE
training_NRMSE = echotorch.utils.nrmse(predY, Y_collector)
-print("Training NRMSE : {}".format(training_NRMSE))
+print(("Training NRMSE : {}".format(training_NRMSE)))
# Train conceptors (Compute C from R)
conceptors.finalize()
@@ -336,7 +336,7 @@
# More remove first 20, and the last was which are
# not accurate enough.
interpolation_steps = int(1.0 / interpolation_increment)
-x_crossing_discounts = x_crossing_discounts[range(interpolation_steps - 1, interpolation_length, interpolation_steps)]
+x_crossing_discounts = x_crossing_discounts[list(range(interpolation_steps - 1, interpolation_length, interpolation_steps))]
x_crossing_discounts *= interpolation_increment
x_crossing_discounts[:20] = np.ones(20) * x_crossing_discounts[19]
x_crossing_discounts[-1] = x_crossing_discounts[-2]
diff --git a/examples/conceptors/morphing_square.py b/examples/conceptors/morphing_square.py
index 5e442b8..62179b1 100644
--- a/examples/conceptors/morphing_square.py
+++ b/examples/conceptors/morphing_square.py
@@ -29,7 +29,7 @@
import argparse
import echotorch.utils
import echotorch.datasets as etds
-import echotorch.visualisation as ecvs
+import echotorch.utils.visualisation as ecvs
from echotorch.datasets import DatasetComposer
from echotorch.nn.Node import Node
from torch.utils.data.dataloader import DataLoader
@@ -79,7 +79,7 @@
gamma = 10.0
# Argument parsing
-parser = argparse.ArgumentParser(prog="subspace_demo", description=u"Fig. 1 BC subspace first demo")
+parser = argparse.ArgumentParser(prog="subspace_demo", description="Fig. 1 BC subspace first demo")
parser.add_argument("--w", type=str, default="", required=False)
parser.add_argument("--w-name", type=str, default="", required=False)
parser.add_argument("--win", type=str, default="", required=False)
@@ -261,7 +261,7 @@
# Compute NRMSE
training_NRMSE = echotorch.utils.nrmse(predY, Y_collector)
-print("Training NRMSE : {}".format(training_NRMSE))
+print(("Training NRMSE : {}".format(training_NRMSE)))
# Conceptors OFF
conceptor_net.conceptor_active(False)
@@ -348,17 +348,17 @@
morphing_vectors=mixture_vector
)
if i == 0 and j == 0:
- print("{} {}".format(i, j))
- print(generated_sample[0])
+ print(("{} {}".format(i, j)))
+ print((generated_sample[0]))
elif i == 8 and j == 0:
- print("{} {}".format(i, j))
- print(generated_sample[0])
+ print(("{} {}".format(i, j)))
+ print((generated_sample[0]))
elif i == 0 and j == 8:
- print("{} {}".format(i, j))
- print(generated_sample[0])
+ print(("{} {}".format(i, j)))
+ print((generated_sample[0]))
elif i == 8 and j == 8:
- print("{} {}".format(i, j))
- print(generated_sample[0])
+ print(("{} {}".format(i, j)))
+ print((generated_sample[0]))
# end if
# Save outputs
plots[i, j] = generated_sample[0, :, 0]
diff --git a/examples/conceptors/morphing_square_debug.py b/examples/conceptors/morphing_square_debug.py
index 2328181..81bbcd2 100644
--- a/examples/conceptors/morphing_square_debug.py
+++ b/examples/conceptors/morphing_square_debug.py
@@ -27,7 +27,7 @@
import argparse
import echotorch.utils
import echotorch.datasets as etds
-import echotorch.visualisation as ecvs
+import echotorch.utils.visualisation as ecvs
from echotorch.datasets import DatasetComposer
from echotorch.nn.Node import Node
from torch.utils.data.dataloader import DataLoader
@@ -87,7 +87,7 @@
gamma = 10.0
# Argument parsing
-parser = argparse.ArgumentParser(prog="subspace_demo", description=u"Fig. 1 BC subspace first demo")
+parser = argparse.ArgumentParser(prog="subspace_demo", description="Fig. 1 BC subspace first demo")
parser.add_argument("--w", type=str, default="", required=False)
parser.add_argument("--w-name", type=str, default="", required=False)
parser.add_argument("--win", type=str, default="", required=False)
@@ -287,7 +287,7 @@
# Compute NRMSE
training_NRMSE = echotorch.utils.nrmse(predY, Y_collector)
-print("Training NRMSE : {}".format(training_NRMSE))
+print(("Training NRMSE : {}".format(training_NRMSE)))
# Conceptors OFF
conceptor_net.conceptor_active(False)
diff --git a/examples/conceptors/subspace_demo.py b/examples/conceptors/subspace_demo.py
index aade0cb..ede359f 100644
--- a/examples/conceptors/subspace_demo.py
+++ b/examples/conceptors/subspace_demo.py
@@ -26,7 +26,7 @@
import echotorch.utils.matrix_generation as mg
import argparse
import echotorch.utils
-import echotorch.visualisation as ecvs
+import echotorch.utils.visualisation as ecvs
from echotorch.datasets import DatasetComposer
from torch.utils.data.dataloader import DataLoader
import matplotlib.pyplot as plt
@@ -73,7 +73,7 @@
gamma = 10.0
# Argument parsing
-parser = argparse.ArgumentParser(prog="subspace_demo", description=u"Fig. 1 BC subspace first demo")
+parser = argparse.ArgumentParser(prog="subspace_demo", description="Fig. 1 BC subspace first demo")
parser.add_argument("--w", type=str, default="", required=False)
parser.add_argument("--w-name", type=str, default="", required=False)
parser.add_argument("--win", type=str, default="", required=False)
@@ -223,7 +223,7 @@
# Compute NRMSE
if loading_method == ecnc.SPESNCell.W_LOADING:
training_NRMSE = echotorch.utils.nrmse(predY, Y_collector)
- print("Training NRMSE : {}".format(training_NRMSE))
+ print(("Training NRMSE : {}".format(training_NRMSE)))
# end if
# Conceptors OFF
@@ -361,8 +361,8 @@
plt.show()
# Show NRMSE
-print("NRMSEs aligned : {}".format(torch.mean(NRMSEs_aligned)))
-print(conceptors.similarity_matrix(based_on='R'))
+print(("NRMSEs aligned : {}".format(torch.mean(NRMSEs_aligned))))
+print((conceptors.similarity_matrix(based_on='R')))
# Plot R similarity matrix
ecvs.show_similarity_matrix(
@@ -371,8 +371,8 @@
)
# Print the similarity matrix
-print("C-based similarity matrix, aperture = {}".format(alpha))
-print(conceptors.similarity_matrix())
+print(("C-based similarity matrix, aperture = {}".format(alpha)))
+print((conceptors.similarity_matrix()))
# Plot conceptors similarity matrix at aperture = 10.0
ecvs.show_similarity_matrix(
diff --git a/examples/conceptors/subspace_demo_debug.py b/examples/conceptors/subspace_demo_debug.py
index 809a78f..c60ba67 100644
--- a/examples/conceptors/subspace_demo_debug.py
+++ b/examples/conceptors/subspace_demo_debug.py
@@ -27,7 +27,7 @@
import argparse
import echotorch.utils
import echotorch.datasets as etds
-import echotorch.visualisation as ecvs
+import echotorch.utils.visualisation as ecvs
from echotorch.datasets import DatasetComposer
from echotorch.nn.Node import Node
from torch.utils.data.dataloader import DataLoader
@@ -83,7 +83,7 @@
gamma = 10.0
# Argument parsing
-parser = argparse.ArgumentParser(prog="subspace_demo", description=u"Fig. 1 BC subspace first demo")
+parser = argparse.ArgumentParser(prog="subspace_demo", description="Fig. 1 BC subspace first demo")
parser.add_argument("--w", type=str, default="", required=False)
parser.add_argument("--w-name", type=str, default="", required=False)
parser.add_argument("--win", type=str, default="", required=False)
@@ -287,7 +287,7 @@
# Compute NRMSE
if loading_method == ecnc.SPESNCell.W_LOADING:
training_NRMSE = echotorch.utils.nrmse(predY, Y_collector)
- print("Training NRMSE : {}".format(training_NRMSE))
+ print(("Training NRMSE : {}".format(training_NRMSE)))
# end if
# Conceptors OFF
@@ -425,7 +425,7 @@
plt.show()
# Show NRMSE
-print("NRMSEs aligned : {}".format(torch.mean(NRMSEs_aligned)))
+print(("NRMSEs aligned : {}".format(torch.mean(NRMSEs_aligned))))
# Plot R similarity matrix
ecvs.show_similarity_matrix(
diff --git a/examples/datasets/logistic_map.py b/examples/datasets/logistic_map.py
index b31818a..ffc2e89 100644
--- a/examples/datasets/logistic_map.py
+++ b/examples/datasets/logistic_map.py
@@ -14,5 +14,5 @@
# For each sample
for data in log_map_dataset:
- print(data[0])
+ print((data[0]))
# end for
diff --git a/examples/datasets/random_processes.py b/examples/datasets/random_processes.py
deleted file mode 100644
index 5f905ba..0000000
--- a/examples/datasets/random_processes.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : examples/datasets/random_processes.py
-# Description : Examples of time series generation based on random processes
-# Date : 12th of August, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-
-# Imports
-import matplotlib.pyplot as plt
-import torch
-import echotorch.data
-import echotorch.viz
-
-# Random seed
-torch.manual_seed(1)
-
-# Random walk
-# random_walk = echotorch.data.unirw(1, length=10000)
-# print("Univariate Random walk: {}".format(random_walk))
-random_walk = echotorch.data.random_walk(1, length=10000, shape=(2,))
-print("Multivariate Random Walk: {}".format(random_walk[0]))
-
-# Plot random walk
-plt.figure()
-echotorch.viz.timeplot(
- random_walk[0],
- tstart=0.0,
- tstep=0.01,
- title="Random walk",
- xlab="x(t)"
-)
-plt.show()
-
-# Multivariate moving average (VMA)
-moving_average = echotorch.data.ma(1, length=200, order=30, size=1)
-
-# Plot MA(q)
-plt.figure()
-echotorch.viz.timeplot(
- moving_average[0][:, 0],
- tstart=0.0,
- tstep=0.01,
- title="Multivariate Moving Average MA(q)",
- xlab="x(t)"
-)
-plt.show()
-
-# Autoregressive process
-autoregressive_process = echotorch.data.ar(1, length=200, order=5, size=1)
-
-# Plot AR(p)
-plt.figure()
-echotorch.viz.timeplot(
- autoregressive_process[0][:, 0],
- tstart=0.0,
- tstep=0.01,
- title="Autoregressive Process AR(p)",
- xlab="X(t)"
-)
-plt.show()
-
-# ARMA
-arma_process = echotorch.data.arma(1, length=200, regressive_order=5, moving_average_order=7, size=1)
-
-# Plot ARMA(p, q)
-plt.figure()
-echotorch.viz.timeplot(
- arma_process[0],
- tstart=0,
- tstep=0.01,
- title="AutoRegressive Moving Average process ARMA(p, q)",
- xlab="x(t)"
-)
diff --git a/examples/datasets/strange_attractors.py b/examples/datasets/strange_attractors.py
index 7d15195..e69de29 100644
--- a/examples/datasets/strange_attractors.py
+++ b/examples/datasets/strange_attractors.py
@@ -1,42 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : examples/datasets/strange_attractors.py
-# Description : Examples of time series generation based on chaotic and strange attractors
-# Date : 12th of August, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-
-# Imports
-import matplotlib.pyplot as plt
-import echotorch.data
-import echotorch.viz
-
-
-# Henon strange attractor
-henon_series = echotorch.data.henon(
- size=1,
- length=100,
- xy=(0, 0),
- a=1.4,
- b=0.3,
- washout=0
-)
-
-# Show points
-plt.figure()
-echotorch.viz.timescatter(henon_series[0], title="Henon Attractor", xlab="Feature 1", ylab="Feature 2")
-plt.show()
diff --git a/examples/datasets/triplet_batching_random_and_sine_patterns.py b/examples/datasets/triplet_batching_random_and_sine_patterns.py
index 4366f12..2f01274 100644
--- a/examples/datasets/triplet_batching_random_and_sine_patterns.py
+++ b/examples/datasets/triplet_batching_random_and_sine_patterns.py
@@ -109,11 +109,11 @@
negative_class = negative_sample[2].item()
# Print classes
- print("Anchor: {}, Positive: {}, Negative: {}".format(
+ print(("Anchor: {}, Positive: {}, Negative: {}".format(
anchor_class,
positive_class,
negative_class
- ))
+ )))
# Plot
plt.figure()
diff --git a/examples/features/independent_component_analysis.py b/examples/features/independent_component_analysis.py
index e7457ea..b3c039e 100644
--- a/examples/features/independent_component_analysis.py
+++ b/examples/features/independent_component_analysis.py
@@ -161,7 +161,7 @@ def h(u):
# Print average of zs to checked
# centeredness
-print("Average of zs : {}".format(torch.mean(zs, dim=0)))
+print(("Average of zs : {}".format(torch.mean(zs, dim=0))))
# Compute the covariance matrix of zs
cov_zsT = torch.mm(zs.t(), zs) / total_points
diff --git a/examples/features/slow_feature_analysis.py b/examples/features/slow_feature_analysis.py
index 29c4ea4..76a5b44 100644
--- a/examples/features/slow_feature_analysis.py
+++ b/examples/features/slow_feature_analysis.py
@@ -144,7 +144,7 @@ def h(u):
# Training
y = sfa_node(z)
-print(y.size())
+print((y.size()))
# Show components from home made SFA
plt.title("Components from SFA cell")
plt.plot(ts[:plot_points], y[0, :plot_points, 0].numpy(), color='r')
diff --git a/examples/generation/narma10_esn_feedbacks.py b/examples/generation/narma10_esn_feedbacks.py
index 9fca247..96deebb 100644
--- a/examples/generation/narma10_esn_feedbacks.py
+++ b/examples/generation/narma10_esn_feedbacks.py
@@ -90,14 +90,14 @@
# Test MSE
dataiter = iter(testloader)
-test_u, test_y = dataiter.next()
+test_u, test_y = next(dataiter)
test_u, test_y = Variable(test_u), Variable(test_y)
gen_u = Variable(torch.zeros(batch_size, test_sample_length, input_dim))
if use_cuda: test_u, test_y, gen_u = test_u.cuda(), test_y.cuda(), gen_u.cuda()
y_predicted = esn(test_u)
-print(u"Test MSE: {}".format(echotorch.utils.mse(y_predicted.data, test_y.data)))
-print(u"Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, test_y.data)))
-print(u"")
+print(("Test MSE: {}".format(echotorch.utils.mse(y_predicted.data, test_y.data))))
+print(("Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, test_y.data))))
+print("")
y_generated = esn(gen_u)
print(y_generated)
diff --git a/examples/models/NilsNet_example.py b/examples/models/NilsNet_example.py
index 7944647..c9799b0 100644
--- a/examples/models/NilsNet_example.py
+++ b/examples/models/NilsNet_example.py
@@ -75,8 +75,8 @@ def imshow(inp, title=None):
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
-print(inputs.size())
-print(classes.size())
+print((inputs.size()))
+print((classes.size()))
inputs = Variable(inputs)
classes = Variable(classes)
@@ -89,4 +89,4 @@ def imshow(inp, title=None):
outputs = nilsnet(inputs)
print(outputs)
-print(outputs.size())
\ No newline at end of file
+print((outputs.size()))
\ No newline at end of file
diff --git a/examples/nodes/pca_tests.py b/examples/nodes/pca_tests.py
index 1fd4ed6..e1281a4 100644
--- a/examples/nodes/pca_tests.py
+++ b/examples/nodes/pca_tests.py
@@ -41,8 +41,8 @@
test_samples_np = test_samples[0].numpy()
# Show
-print(u"Training samples : {}".format(training_samples_np))
-print(u"Test samples : {}".format(test_samples_np))
+print(("Training samples : {}".format(training_samples_np)))
+print(("Test samples : {}".format(test_samples_np)))
# PCA node
mdp_pca_node = mdp.Flow([mdp.nodes.PCANode(input_dim=input_dim, output_dim=output_dim)])
@@ -50,7 +50,7 @@
pca_reduced = mdp_pca_node(test_samples_np)
# Show
-print(u"PCA reduced : {}".format(pca_reduced))
+print(("PCA reduced : {}".format(pca_reduced)))
# EchoTorch PCA node
et_pca_node = etnn.PCACell(input_dim=input_dim, output_dim=output_dim)
@@ -59,5 +59,5 @@
et_reduced = et_pca_node(Variable(test_samples))
# Show
-print(u"Reduced with EchoTorch/PCA :")
+print("Reduced with EchoTorch/PCA :")
print(et_reduced)
diff --git a/examples/optimization/genetic_search.py b/examples/optimization/genetic_search.py
index 1e7432b..2556346 100644
--- a/examples/optimization/genetic_search.py
+++ b/examples/optimization/genetic_search.py
@@ -66,5 +66,5 @@
)
# Show the result
-print("Best hyper-parameters found : {}".format(best_param))
-print("Best NRMSE : {}".format(best_NRMSE))
+print(("Best hyper-parameters found : {}".format(best_param)))
+print(("Best NRMSE : {}".format(best_NRMSE)))
diff --git a/examples/optimization/grid_search.py b/examples/optimization/grid_search.py
index 8bf8c27..29c3f8b 100644
--- a/examples/optimization/grid_search.py
+++ b/examples/optimization/grid_search.py
@@ -24,7 +24,7 @@
from echotorch.datasets.NARMADataset import NARMADataset
import echotorch.utils.optimization as optim
import numpy as np
-from narma_evaluation import evaluation_function
+from .narma_evaluation import evaluation_function
# Length of training samples
train_sample_length = 5000
@@ -66,5 +66,5 @@
)
# Show the result
-print("Best hyper-parameters found : {}".format(best_param))
-print("Best NRMSE : {}".format(best_NRMSE))
+print(("Best hyper-parameters found : {}".format(best_param)))
+print(("Best NRMSE : {}".format(best_NRMSE)))
diff --git a/examples/optimization/narma_evaluation.py b/examples/optimization/narma_evaluation.py
index cd4c843..bb609a3 100644
--- a/examples/optimization/narma_evaluation.py
+++ b/examples/optimization/narma_evaluation.py
@@ -125,7 +125,7 @@ def evaluation_function(parameters, datasets, n_samples=5):
# Get the first sample in test set,
# and transform it to Variable.
dataiter = iter(testloader)
- test_u, test_y = dataiter.next()
+ test_u, test_y = next(dataiter)
test_u, test_y = Variable(test_u), Variable(test_y)
if use_cuda: test_u, test_y = test_u.cuda(), test_y.cuda()
diff --git a/examples/optimization/random_search.py b/examples/optimization/random_search.py
index d6387bb..53fc0e8 100644
--- a/examples/optimization/random_search.py
+++ b/examples/optimization/random_search.py
@@ -24,7 +24,7 @@
from echotorch.datasets.NARMADataset import NARMADataset
import echotorch.utils.optimization as optim
import numpy as np
-from narma_evaluation import evaluation_function
+from .narma_evaluation import evaluation_function
# Length of training samples
train_sample_length = 5000
@@ -66,5 +66,5 @@
)
# Show the result
-print("Best hyper-parameters found : {}".format(best_param))
-print("Best NRMSE : {}".format(best_NRMSE))
+print(("Best hyper-parameters found : {}".format(best_param)))
+print(("Best NRMSE : {}".format(best_NRMSE)))
diff --git a/examples/timeserie_classification/test.py b/examples/timeserie_classification/test.py
deleted file mode 100644
index 56ef9e8..0000000
--- a/examples/timeserie_classification/test.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : examples/timeserie_classification/test.py
-# Description : Example of timeseries classification
-# Date : 27th of April, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-
diff --git a/examples/timeserie_prediction/mackey_glass_esn.py b/examples/timeserie_prediction/mackey_glass_esn.py
index 8296bc9..ec60cb5 100644
--- a/examples/timeserie_prediction/mackey_glass_esn.py
+++ b/examples/timeserie_prediction/mackey_glass_esn.py
@@ -81,20 +81,20 @@
# Train MSE
dataiter = iter(trainloader)
-train_u, train_y = dataiter.next()
+train_u, train_y = next(dataiter)
train_u, train_y = Variable(train_u), Variable(train_y)
if use_cuda: train_u, train_y = train_u.cuda(), train_y.cuda()
y_predicted = esn(train_u)
-print(u"Train MSE: {}".format(echotorch.utils.mse(y_predicted.data, train_y.data)))
-print(u"Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, train_y.data)))
-print(u"")
+print(("Train MSE: {}".format(echotorch.utils.mse(y_predicted.data, train_y.data))))
+print(("Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, train_y.data))))
+print("")
# Test MSE
dataiter = iter(testloader)
-test_u, test_y = dataiter.next()
+test_u, test_y = next(dataiter)
test_u, test_y = Variable(test_u), Variable(test_y)
if use_cuda: test_u, test_y = test_u.cuda(), test_y.cuda()
y_predicted = esn(test_u)
-print(u"Test MSE: {}".format(echotorch.utils.mse(y_predicted.data, test_y.data)))
-print(u"Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, test_y.data)))
-print(u"")
+print(("Test MSE: {}".format(echotorch.utils.mse(y_predicted.data, test_y.data))))
+print(("Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, test_y.data))))
+print("")
diff --git a/examples/timeserie_prediction/narma1010_esn_helper.py b/examples/timeserie_prediction/narma1010_esn_helper.py
deleted file mode 100644
index 72c01f8..0000000
--- a/examples/timeserie_prediction/narma1010_esn_helper.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : examples/timeserie_prediction/narma10_esn_helper.py
-# Description : NARMA-10 prediction with ESN
-# Date : 27th of April, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-
-# Imports
-
diff --git a/examples/timeserie_prediction/narma10_esn.py b/examples/timeserie_prediction/narma10_esn.py
index 19a8aab..0243543 100644
--- a/examples/timeserie_prediction/narma10_esn.py
+++ b/examples/timeserie_prediction/narma10_esn.py
@@ -133,7 +133,7 @@
# Get the first sample in training set,
# and transform it to Variable.
dataiter = iter(trainloader)
-train_u, train_y = dataiter.next()
+train_u, train_y = next(dataiter)
train_u, train_y = Variable(train_u), Variable(train_y)
if use_cuda: train_u, train_y = train_u.cuda(), train_y.cuda()
@@ -141,14 +141,14 @@
y_predicted = esn(train_u)
# Print training MSE and NRMSE
-print(u"Train MSE: {}".format(echotorch.utils.mse(y_predicted.data, train_y.data)))
-print(u"Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, train_y.data)))
-print(u"")
+print(("Train MSE: {}".format(echotorch.utils.mse(y_predicted.data, train_y.data))))
+print(("Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, train_y.data))))
+print("")
# Get the first sample in test set,
# and transform it to Variable.
dataiter = iter(testloader)
-test_u, test_y = dataiter.next()
+test_u, test_y = next(dataiter)
test_u, test_y = Variable(test_u), Variable(test_y)
if use_cuda: test_u, test_y = test_u.cuda(), test_y.cuda()
@@ -156,9 +156,9 @@
y_predicted = esn(test_u)
# Print test MSE and NRMSE
-print(u"Test MSE: {}".format(echotorch.utils.mse(y_predicted.data, test_y.data)))
-print(u"Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, test_y.data)))
-print(u"")
+print(("Test MSE: {}".format(echotorch.utils.mse(y_predicted.data, test_y.data))))
+print(("Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, test_y.data))))
+print("")
# Show target and predicted
plt.plot(test_y[0, :plot_length, 0].data, 'r')
diff --git a/examples/timeserie_prediction/narma10_esn_for_reservoir_sizes.py b/examples/timeserie_prediction/narma10_esn_for_reservoir_sizes.py
index b95adbe..b5f3c03 100644
--- a/examples/timeserie_prediction/narma10_esn_for_reservoir_sizes.py
+++ b/examples/timeserie_prediction/narma10_esn_for_reservoir_sizes.py
@@ -111,7 +111,7 @@
# Get the first sample in test set,
# and transform it to Variable.
dataiter = iter(testloader)
- test_u, test_y = dataiter.next()
+ test_u, test_y = next(dataiter)
test_u, test_y = Variable(test_u), Variable(test_y)
if use_cuda: test_u, test_y = test_u.cuda(), test_y.cuda()
diff --git a/examples/timeserie_prediction/narma10_esn_sgd.py b/examples/timeserie_prediction/narma10_esn_sgd.py
index 93bc453..90cb4ed 100644
--- a/examples/timeserie_prediction/narma10_esn_sgd.py
+++ b/examples/timeserie_prediction/narma10_esn_sgd.py
@@ -100,19 +100,19 @@
optimizer.step()
# Print error measures
- print(u"Train MSE: {}".format(float(loss.data)))
- print(u"Train NRMSE: {}".format(echotorch.utils.nrmse(out.data, targets.data)))
+ print(("Train MSE: {}".format(float(loss.data))))
+ print(("Train NRMSE: {}".format(echotorch.utils.nrmse(out.data, targets.data))))
# end for
# Test reservoir
dataiter = iter(testloader)
- test_u, test_y = dataiter.next()
+ test_u, test_y = next(dataiter)
test_u, test_y = Variable(test_u), Variable(test_y)
if use_cuda: test_u, test_y = test_u.cuda(), test_y.cuda()
y_predicted = esn(test_u)
# Print error measures
- print(u"Test MSE: {}".format(echotorch.utils.mse(y_predicted.data, test_y.data)))
- print(u"Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, test_y.data)))
- print(u"")
+ print(("Test MSE: {}".format(echotorch.utils.mse(y_predicted.data, test_y.data))))
+ print(("Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, test_y.data))))
+ print("")
# end for
diff --git a/examples/timeserie_prediction/narma10_gated_esn.py b/examples/timeserie_prediction/narma10_gated_esn.py
index d82fca6..3389b29 100644
--- a/examples/timeserie_prediction/narma10_gated_esn.py
+++ b/examples/timeserie_prediction/narma10_gated_esn.py
@@ -112,19 +112,19 @@
optimizer.step()
# Print error measures
- print(u"Train MSE: {}".format(float(loss.data)))
- print(u"Train NRMSE: {}".format(echotorch.utils.nrmse(out.data, targets.data)))
+ print(("Train MSE: {}".format(float(loss.data))))
+ print(("Train NRMSE: {}".format(echotorch.utils.nrmse(out.data, targets.data))))
# end for
# Test reservoir
dataiter = iter(testloader)
- test_u, test_y = dataiter.next()
+ test_u, test_y = next(dataiter)
test_u, test_y = Variable(test_u), Variable(test_y)
if use_cuda: test_u, test_y = test_u.cuda(), test_y.cuda()
y_predicted = gated_esn(test_u)
# Print error measures
- print(u"Test MSE: {}".format(echotorch.utils.mse(y_predicted.data, test_y.data)))
- print(u"Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, test_y.data)))
- print(u"")
+ print(("Test MSE: {}".format(echotorch.utils.mse(y_predicted.data, test_y.data))))
+ print(("Test NRMSE: {}".format(echotorch.utils.nrmse(y_predicted.data, test_y.data))))
+ print("")
# end for
diff --git a/examples/timeserie_prediction/narma10_stacked_esn.py b/examples/timeserie_prediction/narma10_stacked_esn.py
index 842c067..4e4c1d6 100644
--- a/examples/timeserie_prediction/narma10_stacked_esn.py
+++ b/examples/timeserie_prediction/narma10_stacked_esn.py
@@ -73,6 +73,6 @@
# Accumulate xTx and xTy
hidden_states = esn(inputs, targets)
for i in range(10):
- print(hidden_states[0, i])
+ print((hidden_states[0, i]))
# end if
# end for
\ No newline at end of file
diff --git a/examples/timetensor/acf.py b/examples/timetensor/acf.py
deleted file mode 100644
index 5d9ccee..0000000
--- a/examples/timetensor/acf.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : examples/timetensor/statistics.py
-# Description : Statistical operations on TimeTensors
-# Date : 17th of August, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti, University of Neuchâtel
-# University of Geneva
-
-# Imports
-import numpy as np
-import matplotlib.pyplot as plt
-import echotorch.data
-import echotorch.acf
-import echotorch.viz
-
-
-# Three 1-D timeseries from Moving Average of order 5 MA(5)
-x = echotorch.data.ma(1, length=1000, order=5, size=1)[0]
-y = echotorch.data.ma(1, length=1000, order=5, size=1)[0]
-z = echotorch.data.ma(1, length=1000, order=5, size=5)[0]
-
-# Compute auto-covariance coefficients
-autocov_coeffs = echotorch.acf.acf(x, k=50)
-
-# Show autocov coeffs
-echotorch.acf.correlogram(x, k=50, plot_params={'title': "Auto-covariance coefficients"})
-
-# Compute auto-correlation coefficients
-echotorch.acf.correlogram(x, k=50, coeffs_type="correlation", plot_params={'title': "Auto-correlation coefficients"})
-
-# Compute cross auto-correlation coefficients
-echotorch.acf.cross_correlogram(x, y, k=50, coeffs_type="correlation", plot_params={'title': "Cross Autocorrelation coefficients"})
-
-# Show cross-correlogram
-echotorch.acf.ccfpairs(
- z,
- k=20,
- coeffs_type="correlation",
- figsize=(12, 10),
- labels=['A', 'B', 'C', 'D', 'E']
-)
diff --git a/examples/timetensor/creation_ops.py b/examples/timetensor/creation_ops.py
deleted file mode 100644
index 10ac9ab..0000000
--- a/examples/timetensor/creation_ops.py
+++ /dev/null
@@ -1,204 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : examples/timetensor/creation_ops.py
-# Description : Creation operators for TimeTensors
-# Date : 3, August 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti ,
-
-
-# Imports
-import torch
-import numpy as np
-import echotorch
-
-
-# Create a timetensor with timetensor()
-print("timetensor")
-x0 = echotorch.timetensor([1, 2, 3, 4])
-print("x0: {}".format(x0))
-print("x0.time_dim: {}".format(x0.time_dim))
-print("x0.size(): {}".format(x0.size()))
-print("x0.csize(): {}".format(x0.csize()))
-print("x0.bsize(): {}".format(x0.bsize()))
-print("x0.tlen: {}".format(x0.tlen))
-print("")
-
-# Create time-tensor with timetensor()
-print("timetensor")
-x1 = echotorch.timetensor(torch.zeros(4, 100, 6), time_dim=1)
-# print("x1: {}".format(x1))
-print("x1.time_dim: {}".format(x1.time_dim))
-print("x1.size(): {}".format(x1.size()))
-print("x1.csize(): {}".format(x1.csize()))
-print("x1.bsize(): {}".format(x1.bsize()))
-print("x1.tlen: {}".format(x1.tlen))
-print("")
-
-# Create time-tensor with as_timetensor()
-print("as_timetensor")
-# x2 = echotorch.as_timetensor(np.zeros((4, 100, 6)), time_dim=1)
-x2 = echotorch.as_timetensor([[0], [1], [2]], time_dim=0)
-print("x2: {}".format(x2))
-print("x2.time_dim: {}".format(x2.time_dim))
-print("x2.size(): {}".format(x2.size()))
-print("x2.csize(): {}".format(x2.csize()))
-print("x2.bsize(): {}".format(x2.bsize()))
-print("x2.tlen: {}".format(x2.tlen))
-print("")
-
-# Create time-tensor with from_numpy()
-print("from_numpy")
-x3 = echotorch.from_numpy(np.zeros((4, 100, 6)), time_dim=1)
-# print("x3: {}".format(x3))
-print("x3.time_dim: {}".format(x3.time_dim))
-print("x3.size(): {}".format(x3.size()))
-print("x3.csize(): {}".format(x3.csize()))
-print("x3.bsize(): {}".format(x3.bsize()))
-print("x3.tlen: {}".format(x3.tlen))
-print("")
-
-# Create a time-tensor with full()
-print("full")
-x4 = echotorch.full(6, fill_value=5, length=100)
-# print("x4: {}".format(x4))
-print("x4.time_dim: {}".format(x4.time_dim))
-print("x4.size(): {}".format(x4.size()))
-print("x4.csize(): {}".format(x4.csize()))
-print("x4.bsize(): {}".format(x4.bsize()))
-print("x4.tlen: {}".format(x4.tlen))
-print("")
-
-# Create a time-tensor with full() and batch dimension
-print("full")
-x5 = echotorch.full(6, fill_value=5, length=100, batch_size=(2, 2))
-# print("x5: {}".format(x5))
-print("x5.time_dim: {}".format(x5.time_dim))
-print("x5.size(): {}".format(x5.size()))
-print("x5.csize(): {}".format(x5.csize()))
-print("x5.bsize(): {}".format(x5.bsize()))
-print("x5.tlen: {}".format(x5.tlen))
-print("")
-
-# Create a time-tensor with randn()
-print("randn")
-x6 = echotorch.randn(2, length=100)
-# print("x6: {}".format(x6))
-print("x6.time_dim: {}".format(x6.time_dim))
-print("x6.size(): {}".format(x6.size()))
-print("x6.csize(): {}".format(x6.csize()))
-print("x6.bsize(): {}".format(x6.bsize()))
-print("x6.tlen: {}".format(x6.tlen))
-print("")
-
-# Create a sparse COO timetensor
-print("sparse_coo_timetensor")
-x7 = echotorch.sparse_coo_timetensor(
- indices=torch.tensor([[0, 1, 1], [2, 0, 2]]),
- values=torch.tensor([3, 4, 5], dtype=torch.float32),
- size=[2, 4]
-)
-print("x7: {}".format(x7))
-print("x7.time_dim: {}".format(x7.time_dim))
-print("x7.size(): {}".format(x7.size()))
-print("x7.csize(): {}".format(x7.csize()))
-print("x7.bsize(): {}".format(x7.bsize()))
-print("x7.tlen: {}".format(x7.tlen))
-print("")
-
-# As strided
-# print("as_strided")
-# x8 = echotorch.as_strided(x6, )
-
-# Create timetensor full of zeros
-print("zeros")
-x9 = echotorch.zeros(2, length=100)
-print("x9.time_dim: {}".format(x9.time_dim))
-print("x9.size(): {}".format(x9.size()))
-print("x9.csize(): {}".format(x9.csize()))
-print("x9.bsize(): {}".format(x9.bsize()))
-print("x9.tlen: {}".format(x9.tlen))
-print("")
-
-# Create timetensor with arange
-x10 = echotorch.arange(1, 2.5, 0.5)
-print("x10: {}".format(x10))
-print("x10.time_dim: {}".format(x10.time_dim))
-print("x10.size(): {}".format(x10.size()))
-print("x10.csize(): {}".format(x10.csize()))
-print("x10.bsize(): {}".format(x10.bsize()))
-print("x10.tlen: {}".format(x10.tlen))
-print("")
-
-# Create timetensor with linspace
-x11 = echotorch.linspace(-10, 10, steps=1)
-print("x11: {}".format(x11))
-print("x11.time_dim: {}".format(x11.time_dim))
-print("x11.size(): {}".format(x11.size()))
-print("x11.csize(): {}".format(x11.csize()))
-print("x11.bsize(): {}".format(x11.bsize()))
-print("x11.tlen: {}".format(x11.tlen))
-print("")
-
-# Create timetensor with logspace
-x12 = echotorch.logspace(start=2, end=2, steps=1, base=2)
-print("x12: {}".format(x12))
-print("x12.time_dim: {}".format(x12.time_dim))
-print("x12.size(): {}".format(x12.size()))
-print("x12.csize(): {}".format(x12.csize()))
-print("x12.bsize(): {}".format(x12.bsize()))
-print("x12.tlen: {}".format(x12.tlen))
-print("")
-
-# Create timetensor with empty
-x13 = echotorch.empty(2, length=100)
-print("x13.time_dim: {}".format(x13.time_dim))
-print("x13.size(): {}".format(x13.size()))
-print("x13.csize(): {}".format(x13.csize()))
-print("x13.bsize(): {}".format(x13.bsize()))
-print("x13.tlen: {}".format(x13.tlen))
-print("")
-
-# Create timetensor with empty_like
-x13 = echotorch.empty_like(x13)
-print("x13.time_dim: {}".format(x13.time_dim))
-print("x13.size(): {}".format(x13.size()))
-print("x13.csize(): {}".format(x13.csize()))
-print("x13.bsize(): {}".format(x13.bsize()))
-print("x13.tlen: {}".format(x13.tlen))
-print("")
-
-
-# Create timetensor with empty_strided
-x14 = echotorch.empty_strided((2, 3), (1, 2), length=100, time_stride=2)
-print("x14.time_dim: {}".format(x14.time_dim))
-print("x14.size(): {}".format(x14.size()))
-print("x14.csize(): {}".format(x14.csize()))
-print("x14.bsize(): {}".format(x14.bsize()))
-print("x14.tlen: {}".format(x14.tlen))
-print("")
-
-# Create a time-tensor with rand()
-print("rand")
-x15 = echotorch.rand(2, length=10)
-print("x15: {}".format(x15))
-print("x15.time_dim: {}".format(x15.time_dim))
-print("x15.size(): {}".format(x15.size()))
-print("x15.csize(): {}".format(x15.csize()))
-print("x15.bsize(): {}".format(x15.bsize()))
-print("x15.tlen: {}".format(x15.tlen))
-print("")
-
diff --git a/examples/timetensor/data_tensors.py b/examples/timetensor/data_tensors.py
deleted file mode 100644
index 1693e13..0000000
--- a/examples/timetensor/data_tensors.py
+++ /dev/null
@@ -1,33 +0,0 @@
-
-
-# Imports
-import torch
-import echotorch
-
-# data_indexer = echotorch.DataIndexer(["f1", "f2"])
-# print(data_indexer)
-# print("keys: {}".format(data_indexer.keys))
-# print("Indices: {}".format(data_indexer.indices))
-# print("")
-#
-# print("to_index('f1'): {}".format(data_indexer.to_index('f1')))
-# print("to_index(['f1', 'f2']): {}".format(data_indexer.to_index(['f1', 'f2'])))
-# print("to_index('test1': 'f1', 'test2': 'f2'): {}".format(data_indexer.to_index({'test1': 'f1', 'test2': 'f2'})))
-# print("to_index(tuple('f1', f2')): {}".format(data_indexer.to_index(slice('f1', 'f2'))))
-# print("")
-#
-# print("to_keys(0): {}".format(data_indexer.to_keys(0)))
-# print("to_keys([0, 1]): {}".format(data_indexer.to_keys([0, 1])))
-# print("to_keys('test1': 0, 'test2': 1): {}".format(data_indexer.to_keys({'test1': 0, 'test2': 1})))
-# print("")
-
-data_tensor = echotorch.DataTensor(torch.randn(20, 2, 3), [None, ['f1', 'f2'], None])
-print(data_tensor)
-print(data_tensor[:, ['f1'], :])
-print(data_tensor[0])
-print(data_tensor[[0, 1, 2]])
-print(data_tensor[:, 'f1', :])
-print(data_tensor[:, 0, :])
-print(data_tensor[:, ['f1', 'f2']])
-print(data_tensor[:, [0, 'f2']])
-print(data_tensor[:, ['f1', 'f1']])
diff --git a/examples/timetensor/intoduction.py b/examples/timetensor/intoduction.py
deleted file mode 100644
index 6280d2d..0000000
--- a/examples/timetensor/intoduction.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : examples/timetensor/introduction.py
-# Description : Introduction to time tensors
-# Date : 31th of July, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti, University of Neuchâtel
-# University of Geneva
-
-
-# Imports
-import torch
-import numpy as np
-import echotorch
-
-
-# Create a timetensor
-x = echotorch.timetensor([1, 2, 3], time_dim=0)
-# print(x)
-
-# Zeros
-x = echotorch.zeros(4, 4, time_length=20)
-# print(x)
-# print(x.size())
-# print(x.tsize())
-# print(x.tlen)
-
-# Indexing
-# subx = x[0, :]
-# print(subx)
-# print(type(subx))
-# print(subx.size())
-# if type(subx) is echotorch.TimeTensor: print(subx.tsize())
-
-# # Ones
-# x = echotorch.ones((4, 4), 20)
-# print(x)
-#
-# # Empty
-# x = echotorch.empty((4, 4), 20)
-# print(x)
-#
-# # Full
-# x = echotorch.full((4, 4), 20, -1)
-# print(x)
-
-# Rand
-x = echotorch.rand(4, 4, time_length=20).cpu()
-y = echotorch.rand(4, 4, time_length=30).cpu()
-print("Base timetenors: ")
-print("x size: {}".format(x.size()))
-print("x time_dim: {}".format(x.time_dim))
-print("x time_length: {}".format(len(x)))
-print("x csize: {}".format(x.csize()))
-print("")
-print("y size: {}".format(y.size()))
-print("y time_dim: {}".format(y.time_dim))
-print("y time_length: {}".format(len(y)))
-print("y csize: {}".format(y.csize()))
-print("")
-
-# Equal
-print("==")
-print("x == y: {}".format(x == y))
-print("x == x: {}".format(x == x))
-print("")
-
-# Cat
-xy = torch.cat((x, y), dim=0)
-print("torch.cat dim=0:")
-print("out time_dim: {}".format(xy.time_dim))
-print("out time_length: {}".format(len(xy)))
-print("out csize: {}".format(xy.csize()))
-print("")
-
-# Unsqueeze
-xx = torch.unsqueeze(x, dim=0)
-print("torch.unsqueeze")
-print("out size: {}".format(xx.size()))
-print("out time_length: {}".format(len(xx)))
-print("out time dim: {}".format(xx.time_dim))
-print("")
-
-# tmean
-z = echotorch.randn(5, time_length=100)
-z2 = echotorch.randn(5, time_length=100)
-print("echotorch.tmean")
-print("z size: {}".format(z.size()))
-print("z tmean: {}".format(echotorch.tmean(z)))
-print("z z1 cov: {}".format(echotorch.cov(z, z2)))
-print("z z cov: {}".format(echotorch.cov(z, z)))
-z_n = z.numpy()
-z2_n = z2.numpy()
-print("z_n: {}".format(z_n.shape))
-z_n_cov = np.cov(z_n, rowvar=False)
-print("z_n_cov: {}".format(z_n_cov.shape))
-print("z z cov with numpy: {}".format(z_n_cov))
-z_z2_n_cov = np.cov(z_n, z2_n, rowvar=False)
-print("z z2 cov with numpy: {}".format(z_z2_n_cov[:5, 5:]))
-
diff --git a/examples/timetensor/statistics.py b/examples/timetensor/statistics.py
deleted file mode 100644
index c6b99b8..0000000
--- a/examples/timetensor/statistics.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : examples/timetensor/statistics.py
-# Description : Statistical operations on TimeTensors
-# Date : 17th of August, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti, University of Neuchâtel
-# University of Geneva
-
-# Imports
-import numpy as np
-import matplotlib.pyplot as plt
-import echotorch.data
-import echotorch.acf
-import echotorch.viz
-
-
-# Create a two timetensors
-# x = echotorch.rand(5, length=100)
-# y = echotorch.rand(5, length=100)
-x = echotorch.data.ma(1, length=1000, order=5, size=1)[0]
-y = echotorch.data.ma(1, length=1000, order=5, size=1)[0]
-z = echotorch.data.ma(1, length=1000, order=5, size=5)[0]
-
-# Print tensors
-print("Timetensor x: {}".format(x))
-print("Timetensor y: {}".format(y))
-print("")
-
-# Average mean over time
-xtm = echotorch.tmean(x)
-ytm = echotorch.tmean(y)
-
-# Show t-mean
-print("Average over time of x: {}".format(xtm))
-print("Average over time of y: {}".format(ytm))
-print("")
-
-# Standard deviation over time
-xsig = echotorch.tstd(x)
-ysig = echotorch.tstd(y)
-
-# Show standard deviation over time
-print("Std over time of x: {}".format(xsig))
-print("Std over time of y: {}".format(ysig))
-print("")
-
-# Variance over time
-xvar = echotorch.tvar(x)
-yvar = echotorch.tvar(y)
-
-# Show variance over time
-print("Var over time of x: {}".format(xvar))
-print("Var over time of y: {}".format(yvar))
-print("")
-
-# Compute covariance matrix
-cov_xy = echotorch.cov(x, y)
-
-# Show covariance matrix
-print("Cov(X, Y): {}".format(cov_xy))
-print("")
-
-# Compute correlation matrix
-cor_xy = echotorch.cor(x, y, pvalue=True)
-
-# Show correlation matrix
-print("Cor(X, Y): {}".format(cor_xy))
-print("Cor(X, X): {}".format(echotorch.cor(x, x)))
-print("")
-
-# Compute auto-covariance coefficients
-autocov_coeffs = echotorch.acf.acf(x, k=50)
-
-# Show autocov coeffs
-echotorch.acf.correlogram(x, k=50, plot_params={'title': "Auto-covariance coefficients"})
-
-# Compute auto-correlation coefficients
-echotorch.acf.correlogram(x, k=50, coeffs_type="correlation", plot_params={'title': "Auto-correlation coefficients"})
-
-# Compute cross auto-correlation coefficients
-echotorch.acf.cross_correlogram(x, y, k=50, coeffs_type="correlation", plot_params={'title': "Cross Autocorrelation coefficients"})
-
-# Show cross-correlogram
-echotorch.acf.ccfpairs(
- z,
- k=20,
- coeffs_type="correlation",
- figsize=(12, 10),
- labels=['A', 'B', 'C', 'D', 'E']
-)
diff --git a/examples/timetensor/tests.py b/examples/timetensor/tests.py
deleted file mode 100644
index ac1754c..0000000
--- a/examples/timetensor/tests.py
+++ /dev/null
@@ -1,74 +0,0 @@
-
-import torch
-
-
-class MyTensor(object):
-
- def __init__(self):
- self.attr1 = 'my_attr1'
- self._tensor = torch.ones(100)
- # end __init__
-
- @property
- def tensor(self):
- return self._tensor
- # end tensor
-
- def __getattr__(self, item):
- print("__getattr__: {}".format(item))
- if hasattr(self._tensor, item):
- return getattr(self._tensor, item)
- else:
- raise AttributeError(
- "AttributeError: Neither '{}' object nor its wrapped "
- "tensor has no attribute '{}'".format(self.__class__.__name__, item)
- )
- # end if
- # end __getattr__
-
- # def __getattribute__(self, item):
- # print("__getattribute__: {}".format(item))
- # # end __getattribute__
-
- # Set attributes
- # def __setattr__(self, key, value):
- # print("__setattr__: {} {}".format(key, value))
- # # end __setattr__
-
-# end MyTensor
-
-
-test = MyTensor()
-# print("test: {}".format(test.test))
-# print("")
-
-print("Set requires_grad")
-print(test.requires_grad)
-print("1")
-test.requires_grad = True
-print("2")
-print(test.requires_grad)
-print("")
-
-print("Set attr1")
-#print(test.attr1)
-print("1")
-test.attr1 = "attr2"
-# print("2")
-print(test.attr1)
-
-print("Call is_complex")
-print(test.is_complex)=
-
-print("attr1: {}".format(test.attr1))
-print("")
-
-print("size: {}".format(test.size()))
-print("")
-
-print("ndim: {}".format(test.ndim))
-print("")
-
-# print("other: {}".format(test.other))
-# print("")
-
diff --git a/examples/timetensor/torch_comparison.py b/examples/timetensor/torch_comparison.py
deleted file mode 100644
index 7bd087c..0000000
--- a/examples/timetensor/torch_comparison.py
+++ /dev/null
@@ -1,17 +0,0 @@
-
-import torch
-import echotorch
-
-x = torch.arange(1., 6.)
-print(x)
-print(torch.topk(x, 3))
-
-y = echotorch.arange(5, 0, -1)
-y = torch.unsqueeze(y, dim=1)
-print(y)
-y = torch.tile(y, (1, 2))
-print(y)
-print(torch.topk(y, 1, 0))
-print(torch.topk(y, 1, 1))
-
-print(torch.msort(y))
diff --git a/examples/timetensor/torch_ops.py b/examples/timetensor/torch_ops.py
deleted file mode 100644
index c6dfadc..0000000
--- a/examples/timetensor/torch_ops.py
+++ /dev/null
@@ -1,724 +0,0 @@
-
-
-import torch
-import echotorch
-
-
-def print_var(head, t_in):
- if isinstance(t_in, echotorch.TimeTensor):
- print("{}: {}, {}, time_dim: {}, tlen: {}, csize: {}, bsize: {}".format(head, t_in.size(), t_in.__class__.__name__, t_in.time_dim, t_in.tlen, t_in.csize(), t_in.bsize()))
- elif isinstance(t_in, torch.Tensor):
- print("{}: {}, {}".format(head, t_in.size(), t_in.__class__.__name__))
- elif isinstance(t_in, list) or isinstance(t_in, tuple):
- for el_i, el in enumerate(t_in):
- print_var("{}:{}".format(head, el_i), el)
- # end for
- # end if
-# end print_var
-
-
-# Create tensor and time tensors
-ptch = torch.randn(100, 2)
-echt = echotorch.randn(2, length=100)
-
-# Sizes
-# print("ptch.size(): {}".format(ptch.size()))
-# print("echt.size(): {}".format(echt.size()))
-
-# # Is tensor
-# print("is_tensor(ptch): {}".format(torch.is_tensor(ptch)))
-# print("is_tensor(echt): {}".format(torch.is_tensor(echt)))
-
-# # Numel
-# print("numel(ptch): {}".format(torch.numel(ptch)))
-# print("numel(echt): {}".format(torch.numel(echt)))
-
-# As tensor (doesn't work)
-# print("as_tensor(ptch): {}".format(torch.as_tensor(ptch)))
-# print("as_tensor(echt): {}".format(torch.as_tensor(echt)))
-
-# Cat
-print("=======================")
-print("cat")
-x = torch.randn(3, 2)
-y = echotorch.randn(2, length=3)
-z = echotorch.as_timetensor(torch.randn(10, 2), time_dim=1)
-print_var("x", x)
-print_var("y", y)
-print_var("z", z)
-# print("in x: {}, {}".format(x.size(), type(x)))
-# print("in y: {}, {}, time_dim: {}, tlen: {}".format(y.size(), type(y), y.time_dim, y.tlen))
-out = torch.cat((x, x, x), 0)
-print_var("out", out)
-out = torch.cat((x, x, x), 1)
-print_var("out", out)
-out = torch.cat((x, y, x), 0)
-print_var("out", out)
-out = torch.cat((x, y, x), 1)
-print_var("out", out)
-# Must raise a RuntimeError
-# out = torch.cat((y, z), 0)
-# print_var("out", out)
-print("")
-
-# chunk
-print("=======================")
-print("chunk")
-x = torch.randn(10, 2)
-y = echotorch.randn(2, length=10)
-z = echotorch.as_timetensor(torch.randn(10, 2), time_dim=1)
-print_var("x", x)
-print_var("y", y)
-print_var("z", z)
-out = torch.chunk(x, 3, 0)
-print_var("out", out)
-out = torch.chunk(y, 3, 0)
-print_var("out", out)
-print("")
-
-# dsplit
-print("=======================")
-print("dsplit")
-x = torch.arange(16.0).reshape(2, 2, 4)
-y = echotorch.timetensor(x, time_dim=2)
-z = echotorch.timetensor(x, time_dim=1)
-print_var("x", x)
-out = torch.dsplit(x, 2)
-print_var("out", out)
-out = torch.dsplit(x, [1, 3])
-print_var("out", out)
-out = torch.dsplit(y, [1, 3])
-print_var("out", out)
-out = torch.dsplit(z, [1, 3])
-print_var("out", out)
-print("")
-
-# column_stack
-print("=======================")
-print("column_stack")
-x = torch.arange(20)
-y = torch.arange(40).reshape(20, 2)
-z = echotorch.arange(20)
-print_var("x", x)
-print_var("y", y)
-print_var("z", z)
-out = torch.column_stack((x, y, y))
-print_var("out", out)
-out = torch.column_stack((z, y))
-print_var("out", out)
-print(out)
-print("")
-
-# dstack
-print("=======================")
-print("dstack")
-x = torch.tensor([1, 2, 3])
-y = torch.tensor([4, 5, 6])
-z = echotorch.timetensor([7, 8, 9])
-print_var("x", x)
-print_var("y", y)
-print_var("z", z)
-out = torch.dstack((x, z))
-print_var("out", out)
-out = torch.dstack((x, y))
-print_var("out", out)
-x = torch.tensor([[1], [2], [3]])
-y = torch.tensor([[4], [5], [6]])
-print_var("x", x)
-print_var("y", y)
-out = torch.dstack((x, y))
-print_var("out", out)
-x = torch.tensor([[1, 2, 3]])
-y = torch.tensor([[4, 5, 6]])
-print_var("x", x)
-print_var("y", y)
-out = torch.dstack((x, y))
-print_var("out", out)
-x = torch.tensor([[[1, 2, 3]]])
-y = torch.tensor([[[4, 5, 6]]])
-print_var("x", x)
-print_var("y", y)
-out = torch.dstack((x, y))
-print_var("out", out)
-x = torch.tensor([[1], [2], [3]])
-z = echotorch.timetensor([[7], [8], [9]])
-print_var("x", x)
-print_var("z", z)
-out = torch.dstack((x, z))
-print_var("out", out)
-print("")
-
-# gather
-print("=======================")
-print("gather")
-x = torch.tensor([[1, 2], [3, 4]])
-print_var("x", x)
-out = torch.gather(x, 1, torch.tensor([[0, 0], [1, 0]]))
-print_var("out", out)
-z = echotorch.timetensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [12, 13]])
-print_var("z", z)
-out = torch.gather(z, 1, torch.tensor([[0, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [1, 0]]))
-print_var("out", out)
-
-print("")
-
-
-# at least 3d
-print("=======================")
-print("at_leat_3d")
-x = torch.randn(2)
-print_var("x", x)
-out = torch.atleast_3d(x)
-print_var("out", out)
-x = torch.randn(2, 2)
-print_var("x", x)
-out = torch.atleast_3d(x)
-print_var("out", out)
-x = torch.randn(2, 2, 2)
-print_var("x", x)
-out = torch.atleast_3d(x)
-print_var("out", out)
-x = torch.tensor([])
-print_var("x", x)
-out = torch.atleast_3d(x)
-print_var("out", out)
-z = echotorch.randn(length=2)
-print_var("z", z)
-out = torch.atleast_3d(z)
-print_var("out", out)
-z = echotorch.randn(1, length=2)
-print_var("z", z)
-out = torch.atleast_3d(z)
-print_var("out", out)
-print("")
-
-# hsplit
-print("=======================")
-print("hsplit")
-x = torch.arange(16.0).reshape(4, 4)
-z = echotorch.timetensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])
-print_var("x", x)
-print_var("z", z)
-out = torch.hsplit(x, 2)
-print_var("out", out)
-out = torch.hsplit(z, 2)
-print_var("out", out)
-x = torch.arange(16.0)
-print_var("x", x)
-out = torch.hsplit(x, 8)
-print_var("out", out)
-z = echotorch.arange(16.0)
-print_var("z", z)
-out = torch.hsplit(z, 8)
-print_var("out", out)
-print("")
-
-# hstack
-print("======================")
-print("hstack")
-x = torch.tensor([1, 2, 3])
-y = torch.tensor([4, 5, 6])
-z = echotorch.timetensor([7, 8, 9])
-print_var("x", x)
-print_var("y", y)
-out = torch.hstack((x, y))
-print_var("out", out)
-x = torch.tensor([[1], [2], [3]])
-y = torch.tensor([[4], [5], [6]])
-print_var("x", x)
-print_var("y", y)
-out = torch.hstack((x, y))
-print_var("out", out)
-x = torch.tensor([1, 2, 3])
-print_var("x", x)
-print_var("z", z)
-out = torch.hstack((x, z))
-print_var("out", out)
-x = torch.tensor([[1],[2],[3]])
-z = echotorch.timetensor([[4],[5],[6]])
-print_var("x", x)
-print_var("z", z)
-out = torch.hstack((x, z))
-print_var("out", out)
-print("")
-
-# index_select
-print("======================")
-print("index_select")
-x = torch.randn(3, 4)
-indices = torch.tensor([0, 2])
-print_var("x", x)
-print(x)
-print_var("indices", indices)
-print(indices)
-out = torch.index_select(x, 0, indices)
-print_var("out", out)
-print(out)
-indices = torch.tensor([0])
-print_var("x", x)
-print(x)
-print_var("indices", indices)
-print(indices)
-out = torch.index_select(x, 0, indices)
-print_var("out", out)
-print(out)
-z = echotorch.randn(4, length=10)
-indices = torch.tensor([0, 2, 4, 6, 8])
-print_var("z", z)
-print(z)
-print_var("indices", indices)
-print(indices)
-out = torch.index_select(z, 0, indices)
-print_var("out", out)
-print(out)
-z = echotorch.randn(4, length=10)
-indices = torch.tensor([0, 2, 1, 0, 3])
-print_var("z", z)
-print(z)
-print_var("indices", indices)
-print(indices)
-out = torch.index_select(z, 1, indices)
-print_var("out", out)
-print(out)
-print("")
-
-# masked_select
-# time_dim destroyed !!
-print("======================")
-print("masked_select")
-x = torch.randn(3, 4)
-mask = x.ge(-10)
-print_var("x", x)
-print(x)
-print_var("mask", mask)
-print(mask)
-out = torch.masked_select(x, mask)
-print_var("out", out)
-print(out)
-print("")
-
-# movedim
-print("======================")
-print("movedim")
-x = torch.randn(3, 2, 1)
-print_var("x", x)
-out = torch.movedim(x, 1, 0)
-print_var("out", out)
-z = echotorch.randn(2, 1, length=10)
-print_var("z", z)
-out = torch.movedim(z, 1, 0)
-print_var("out", out)
-print("")
-
-# moveaxis
-print("======================")
-print("moveaxis")
-x = torch.randn(3, 2, 1)
-print_var("x", x)
-out = torch.movedim(x, 1, 0)
-print_var("out", out)
-z = echotorch.randn(2, 1, length=10)
-print_var("z", z)
-out = torch.movedim(z, 1, 0)
-print_var("out", out)
-print("")
-
-# narrow
-print("======================")
-print("narrow")
-x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
-z = echotorch.timetensor([[1, 2], [3, 4], [5, 6], [7, 8]])
-print_var("x", x)
-print_var("z", z)
-print(x)
-print(z)
-out = torch.narrow(x, 0, 0, 2)
-print_var("out", out)
-print(out)
-out = torch.narrow(x, 1, 2, 0)
-print_var("out", out)
-print(out)
-out = torch.narrow(z, 0, 0, 2)
-print_var("out", out)
-print(out)
-out = torch.narrow(z, 1, 0, 0)
-print_var("out", out)
-print(out)
-print("")
-
-# nonzero
-# time dim destroyed !
-print("=======================")
-print("nonzero")
-x = torch.tensor([1, 1, 1, 0, 1])
-print_var("x", x)
-out = torch.nonzero(x)
-print_var("out", out)
-print("")
-
-# reshape
-# time dim destroyed !
-print("=======================")
-print("reshape")
-x = torch.arange(4.)
-print_var("x", x)
-out = torch.reshape(x, (2, 2))
-print_var("out", out)
-print("")
-
-# row_stack
-# alias of torch.vstack()
-
-# scatter
-print("=======================")
-print("scatter")
-x = torch.arange(1, 11).reshape((2, 5))
-y = torch.zeros(3, 5, dtype=x.dtype)
-z = echotorch.timetensor(y, time_dim=0)
-print_var("x", x)
-print_var("y", y)
-print_var("z", z)
-index = torch.tensor([[0, 1, 2, 0]])
-print_var("index", index)
-out = torch.scatter(y, 0, index, x)
-print_var("out", out)
-print(out)
-out = torch.scatter(z, 0, index, x)
-print_var("out", out)
-print(out)
-print("")
-
-# scatter_add
-print("=======================")
-print("scatter_add")
-x = torch.ones((2, 5))
-y = torch.zeros(3, 5, dtype=x.dtype)
-z = echotorch.zeros(5, length=3)
-print_var("x", x)
-print_var("y", y)
-print_var("z", z)
-index = torch.tensor([[0, 1, 2, 0, 0]])
-print_var("index", index)
-out = torch.scatter_add(y, 0, index, x)
-print_var("out", out)
-print(out)
-out = torch.scatter_add(z, 0, index, x)
-print_var("out", out)
-print(out)
-print("")
-
-# split
-print("=======================")
-print("split")
-x = torch.arange(10).reshape(5, 2)
-z = echotorch.timetensor(x)
-print_var("x", x)
-print_var("z", z)
-out = torch.split(x, 2)
-print_var("out", out)
-out = torch.split(x, [1, 4])
-print_var("out", out)
-out = torch.split(z, 2)
-print_var("out", out)
-out = torch.split(z, [1, 4])
-print_var("out", out)
-out = torch.split(z, 1, 1)
-print_var("out", out)
-print("")
-
-# squeeze
-print("=======================")
-print("squeeze")
-x = torch.zeros(2, 1, 2, 1, 2)
-print_var("x", x)
-out = torch.squeeze(x)
-print_var("out", out)
-z = echotorch.zeros(1, 2, 1, 2, length=2)
-print_var("z", z)
-out = torch.squeeze(z)
-print_var("out", out)
-z = echotorch.zeros(1, 2, 1, 2, length=2)
-print_var("z", z)
-out = torch.squeeze(z, dim=0)
-print_var("out", out)
-z = echotorch.zeros(1, 2, 1, 2, length=1)
-print_var("z", z)
-out = torch.squeeze(z)
-print_var("out", out)
-z = echotorch.zeros(1, 2, 1, 2, length=1)
-print_var("z", z)
-out = torch.squeeze(z)
-print_var("out", out)
-z = echotorch.zeros(1, 2, 1, 2, length=1)
-print_var("z", z)
-out = torch.squeeze(z, 3)
-print_var("out", out)
-print("")
-
-# stack
-print("=======================")
-print("stack")
-x = torch.zeros(100, 2)
-y = torch.zeros(100, 2)
-print_var("x", x)
-print_var("y", y)
-out = torch.stack((x, y))
-print_var("out", out)
-z = echotorch.zeros(2, length=100)
-print_var("z", z)
-out = torch.stack((x, z))
-print_var("out", out)
-out = torch.stack((x, z), 1)
-print_var("out", out)
-out = torch.stack((x, z), 2)
-print_var("out", out)
-z2 = echotorch.zeros(2, length=100)
-z2.time_dim = 1
-print_var("z2", z2)
-out = torch.stack((z, z2))
-print_var("out", out)
-out = torch.stack((z, z2), 1)
-print_var("out", out)
-out = torch.stack((z, z2), 2)
-print_var("out", out)
-print("")
-
-# swapaxes
-# alias to transpose()
-
-# swapdims
-# alias to transpose()
-
-# t
-print("=======================")
-print("t")
-x = torch.randn(())
-print_var("x", x)
-out = torch.t(x)
-print_var("out", out)
-x = torch.randn(3)
-print_var("x", x)
-out = torch.t(x)
-print_var("out", out)
-x = torch.randn(2, 3)
-print_var("x", x)
-out = torch.t(x)
-print_var("out", out)
-z = echotorch.randn(length=0)
-print_var("z", z)
-out = torch.t(z)
-print_var("out", out)
-z = echotorch.randn(length=3)
-print_var("z", z)
-out = torch.t(z)
-print_var("out", out)
-z = echotorch.randn(3, length=2)
-print_var("z", z)
-out = torch.t(z)
-print_var("out", out)
-print("")
-
-# take
-# time dim destroyed!
-print("=======================")
-print("take")
-x = torch.tensor([[4, 3, 5], [6, 7, 8]])
-y = torch.tensor([[0, 2, 5], [1, 3, 4]])
-print_var("x", x)
-print(x)
-print_var("y", y)
-print(y)
-out = torch.take(x, y)
-print_var("out", out)
-print(out)
-z = echotorch.timetensor([[4, 3, 5], [6, 7, 8]], time_dim=0)
-print_var("z", z)
-out = torch.take(z, y)
-print_var("out", out)
-print(out)
-print("")
-
-# take_along_dim
-# time dim destroyed!
-print("=======================")
-print("take_along_dim")
-x = torch.tensor([[10, 30, 20], [60, 40, 50]])
-print_var("x", x)
-max_idx = torch.argmax(x)
-print_var("max_idx", max_idx)
-print(max_idx)
-out = torch.take_along_dim(x, max_idx)
-print_var("out", out)
-print(out)
-z = echotorch.timetensor([[10, 30, 20], [60, 40, 50]], time_dim=0)
-print_var("z", z)
-max_idx_z = torch.argmax(z)
-print_var("max_idx_z", max_idx_z)
-print(max_idx_z)
-out = torch.take_along_dim(z, max_idx_z)
-print_var("out", out)
-print("")
-
-# tensor_split
-print("=======================")
-print("tensor_split")
-x = torch.arange(8)
-print_var("x", x)
-print(x)
-out = torch.tensor_split(x, 3)
-print_var("out", out)
-print(out)
-z = echotorch.arange(8)
-print_var("z", z)
-print(z)
-out = torch.tensor_split(z, 10)
-print_var("out", out)
-print(out)
-print("")
-
-# tile
-print("=======================")
-print("tile")
-x = torch.tensor([[1, 2], [3, 4]])
-print_var("x", x)
-print(x)
-out = torch.tile(x, (2, 2))
-print_var("out", out)
-print(out)
-z = echotorch.timetensor([[1, 2], [3, 4]], time_dim=0)
-print_var("z", z)
-print(z)
-out = torch.tile(z, (2, 2))
-print_var("out", out)
-print(out)
-print("")
-
-# transpose
-print("=======================")
-print("transpose")
-x = torch.randn(2, 3)
-print_var("x", x)
-print(x)
-out = torch.transpose(x, 0, 1)
-print_var("out", out)
-print(out)
-x = echotorch.randn(3, length=2)
-print_var("x", x)
-print(x)
-out = torch.transpose(x, 0, 1)
-print_var("out", out)
-print(out)
-
-# unbind
-print("=======================")
-print("unbind")
-x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
-print_var("x", x)
-print(x)
-out = torch.unbind(x)
-print_var("out", out)
-print(out)
-x = echotorch.timetensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], time_dim=0)
-print_var("x", x)
-print(x)
-out = torch.unbind(x)
-print_var("out", out)
-print(out)
-x = echotorch.timetensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], time_dim=0)
-print_var("x", x)
-print(x)
-out = torch.unbind(x, dim=1)
-print_var("out", out)
-print(out)
-print("")
-
-# unsqueeze
-print("=========================")
-print("unsqueeze")
-x = torch.tensor([1, 2, 3, 4])
-print_var("x", x)
-print(x)
-out = torch.unsqueeze(x, 0)
-print_var("out", out)
-print(out)
-z = echotorch.timetensor([1, 2, 3, 4])
-print_var("z", z)
-print(z)
-out = torch.unsqueeze(z, 0)
-print_var("out", out)
-print(out)
-z = echotorch.timetensor([1, 2, 3, 4])
-print_var("z", z)
-print(z)
-out = torch.unsqueeze(z, 1)
-print_var("out", out)
-print(out)
-print("")
-
-# vsplit
-print("=========================")
-print("vsplit")
-x = torch.arange(16.0).reshape(4, 4)
-print_var("x", x)
-print(x)
-out = torch.vsplit(x, 4)
-print_var("out", out)
-print(out)
-z = echotorch.timetensor(torch.arange(16.0).reshape(4, 4), time_dim=0)
-print_var("z", z)
-print(z)
-out = torch.vsplit(z, 4)
-print_var("out", out)
-print(out)
-print("")
-
-# vstack
-print("=========================")
-print("vstack")
-x = torch.tensor([1, 2, 3])
-y = torch.tensor([4, 5, 6])
-print_var("x", x)
-print_var("y", y)
-out = torch.vstack((x, y))
-print_var("out", out)
-print(out)
-z1 = echotorch.timetensor([1, 2, 3])
-z2 = echotorch.timetensor([4, 5, 6])
-print_var("z1", z1)
-print_var("z2", z2)
-out = torch.vstack((z1, z2))
-print_var("out", out)
-print(out)
-z1 = echotorch.timetensor([[1], [2], [3]])
-z2 = echotorch.timetensor([[4], [5], [6]])
-print_var("z1", z1)
-print_var("z2", z2)
-out = torch.vstack((z1, z2))
-print_var("out", out)
-print(out)
-print("")
-
-# where
-print("=========================")
-print("where")
-x = torch.randn(3, 2)
-y = torch.ones(3, 2)
-print_var("x", x)
-print(x)
-print_var("y", y)
-print(y)
-out = torch.where(x > 0, x, y)
-print(x > 0)
-print_var("out", out)
-print(out)
-z1 = echotorch.randn(2, length=10)
-z2 = echotorch.ones(2, length=10)
-print_var("z1", z1)
-print(z1)
-print_var("z2", z2)
-print(z2)
-out = torch.where(z1 > 0, z1, z2)
-print(z1 > 0)
-print_var("out", out)
-print(out)
-
diff --git a/examples/timetensor/torch_ops_advanced.py b/examples/timetensor/torch_ops_advanced.py
deleted file mode 100644
index 111db34..0000000
--- a/examples/timetensor/torch_ops_advanced.py
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
-# Imports
-import torch
-import echotorch
-
-
diff --git a/examples/timetensor/torch_ops_blas_lapack.py b/examples/timetensor/torch_ops_blas_lapack.py
deleted file mode 100644
index e69de29..0000000
diff --git a/examples/timetensor/torch_ops_other.py b/examples/timetensor/torch_ops_other.py
deleted file mode 100644
index 8c735b2..0000000
--- a/examples/timetensor/torch_ops_other.py
+++ /dev/null
@@ -1,238 +0,0 @@
-
-
-# Imports
-import torch
-import echotorch
-
-
-def print_var(head, t_in):
- if isinstance(t_in, echotorch.TimeTensor):
- print("{}: {}, {}, time_dim: {}, tlen: {}, csize: {}, bsize: {}".format(head, t_in.size(), t_in.__class__.__name__, t_in.time_dim, t_in.tlen, t_in.csize(), t_in.bsize()))
- elif isinstance(t_in, torch.Tensor):
- print("{}: {}, {}".format(head, t_in.size(), t_in.__class__.__name__))
- elif isinstance(t_in, list) or isinstance(t_in, tuple):
- for el_i, el in enumerate(t_in):
- print_var("{}:{}".format(head, el_i), el)
- # end for
- # end if
-# end print_var
-
-
-# atleast_1d
-# return timetensor, same data, same time dim
-print("-------------------------")
-print("atleast_1d")
-x = torch.tensor(1.)
-print_var("x", x)
-print(x)
-out = torch.atleast_1d(x)
-print_var("out", out)
-print(out)
-z = echotorch.timetensor([1, 2, 3])
-print_var("z", z)
-print(z)
-out = torch.atleast_1d(z)
-print_var("out", out)
-print(out)
-z = echotorch.timetensor([[1], [2], [3]])
-print_var("z", z)
-print(z)
-out = torch.atleast_1d(z)
-print_var("out", out)
-print(out)
-print("")
-
-# atleast_2d
-# when 0-D timeserie, add a batch dim and time_dim + 1
-print("-------------------------")
-print("atleast_2d")
-x = torch.tensor(1.)
-print_var("x", x)
-print(x)
-out = torch.atleast_2d(x)
-print_var("out", out)
-print(out)
-z = echotorch.timetensor([1, 2, 3])
-print_var("z", z)
-print(z)
-out = torch.atleast_2d(z)
-print_var("out", out)
-print(out)
-z = echotorch.timetensor([[1], [2], [3]])
-print_var("z", z)
-print(z)
-out = torch.atleast_2d(z)
-print_var("out", out)
-print(out)
-print("")
-
-# atleast_3d
-# when 0-D timeserie, add a batch dim and time_dim + 1 and a channel dim
-# when 1-D timseries, add a channel dim
-print("-------------------------")
-print("atleast_3d")
-x = torch.tensor(1.)
-print_var("x", x)
-print(x)
-out = torch.atleast_3d(x)
-print_var("out", out)
-print(out)
-z = echotorch.timetensor([1, 2, 3])
-print_var("z", z)
-print(z)
-out = torch.atleast_3d(z)
-print_var("out", out)
-print(out)
-z = echotorch.timetensor([[1], [2], [3]])
-print_var("z", z)
-print(z)
-out = torch.atleast_3d(z)
-print_var("out", out)
-print(out)
-print("")
-z = echotorch.timetensor([[[1]], [[2]], [[3]]])
-print_var("z", z)
-print(z)
-out = torch.atleast_3d(z)
-print_var("out", out)
-print(out)
-print("")
-
-# bincount
-# destroy time dim
-print("-------------------------")
-print("bincount")
-x = torch.randint(0, 8, (5,), dtype=torch.int64)
-y = torch.linspace(0, 1, steps=5)
-print(x), print(y)
-torch.bincount(x)
-x.bincount(y)
-z = echotorch.timetensor([1, 2, 3])
-print(torch.bincount(z))
-print("")
-
-# block_diag
-# keep time dim of first timetensor
-print("-------------------------")
-print("block_diag")
-A = torch.tensor([[0, 1], [1, 0]])
-B = echotorch.timetensor([[3, 4, 5], [6, 7, 8]])
-C = torch.tensor(7)
-D = torch.tensor([1, 2, 3])
-E = torch.tensor([[4], [5], [6]])
-print(torch.block_diag(A, B, C, D, E))
-print("")
-
-# broadcast_tensors
-print("-------------------------")
-print("broadcast_tensors")
-x = torch.arange(3).view(1, 3)
-print(x)
-# y = torch.arange(3).view(3, 1)
-y = torch.tensor([[4], [5]])
-print(y)
-a, b = torch.broadcast_tensors(x, y)
-print(a.size())
-print(a)
-print(b.size())
-print(b)
-print("##")
-x = echotorch.timetensor([[0, 1, 2]])
-a, b = torch.broadcast_tensors(x, y)
-print(x)
-print(a.size())
-print(a)
-print(b.size())
-print(b)
-print("##")
-z = echotorch.timetensor([[4], [5]], time_dim=1)
-a, b = torch.broadcast_tensors(x, z)
-print(x)
-print(z)
-print(a.size())
-print(a)
-print(b.size())
-print(b)
-print("")
-
-# broadcast_tensors
-# no input tensor
-print("-------------------------")
-print("broadcast_to")
-x = torch.tensor([1, 2, 3])
-out = torch.broadcast_to(x, (5, 3))
-print(x)
-print(out)
-print(out.size())
-z = echotorch.timetensor([1, 2, 3])
-out = torch.broadcast_to(z, (5, 3))
-print(z)
-print(out)
-print(out.size())
-print("")
-
-# bucketize
-print("-------------------------")
-print("bucketize")
-boundaries = torch.tensor([1, 3, 5, 7, 8])
-v = torch.tensor([[3, 6, 9], [3, 6, 9]])
-print(torch.bucketize(v, boundaries, right=False))
-z = echotorch.timetensor([[3, 6, 9], [3, 6, 9]])
-print(torch.bucketize(z, boundaries, right=False))
-print("")
-
-# cartesian_prod
-print("-------------------------")
-print("cartesian_prod")
-a = [1, 2, 3]
-b = [4, 5]
-c = [6, 7]
-tensor_a = torch.tensor(a)
-tensor_b = torch.tensor(b)
-tensor_c = torch.tensor(c)
-print(torch.cartesian_prod(tensor_a, tensor_b, tensor_c))
-ttensor_a = echotorch.timetensor(a)
-ttensor_b = echotorch.timetensor(b)
-ttensor_c = echotorch.timetensor(c)
-print(torch.cartesian_prod(ttensor_a, ttensor_b, ttensor_c))
-print("")
-
-# cdist
-print("-------------------------")
-print("cdist")
-x = echotorch.timetensor([[0.9041, 0.0196], [-0.3108, -2.4423], [-0.4821, 1.059]])
-y = echotorch.timetensor([[-2.1763, -0.4713], [-0.6986, 1.3702]])
-print(torch.cdist(x, y, p=2))
-print("")
-
-# clone
-print("-------------------------")
-print("clone")
-x = echotorch.timetensor([1, 3, 5, 7, 8])
-y = torch.clone(x)
-print(id(x))
-print(id(y))
-print(y)
-print("")
-
-# combinations
-print("-------------------------")
-print("combinations")
-a = echotorch.timetensor([1, 2, 3])
-print(torch.combinations(a))
-
-# cross
-print("-------------------------")
-print("cross")
-a = torch.randn(4, 3)
-b = torch.randn(4, 3)
-print(torch.cross(a, b, dim=1).size())
-a = torch.randn(3, 4)
-b = torch.randn(3, 4)
-print(torch.cross(a, b, dim=0).size())
-a = torch.randn(2, 3, 4)
-b = torch.randn(2, 3, 4)
-print(torch.cross(a, b, dim=1).size())
-a = echotorch.randn(3, length=4)
-b = echotorch.randn(3, length=4)
-print(torch.cross(a, b, dim=1))
diff --git a/examples/timetensor/torch_ops_spectral.py b/examples/timetensor/torch_ops_spectral.py
deleted file mode 100644
index 58ccfdf..0000000
--- a/examples/timetensor/torch_ops_spectral.py
+++ /dev/null
@@ -1,27 +0,0 @@
-
-import torch
-import echotorch
-
-
-x = torch.randn(10, 1000)
-out = torch.stft(x, n_fft=20, hop_length=5, win_length=10)
-# print(out)
-print(out.size())
-
-y = echotorch.randn(length=1000)
-out = torch.stft(y, n_fft=20, hop_length=5, win_length=10)
-# print(out)
-print(out.size())
-
-y = echotorch.randn(length=1000, batch_size=(10,))
-out = torch.stft(y, n_fft=20, hop_length=5, win_length=10)
-# print(out)
-print(out.size())
-
-z = torch.istft(out, n_fft=20, hop_length=5, win_length=10)
-print("z: {}".format(z))
-print(z.size())
-
-x = torch.randn(10, 1000)
-out = torch.bartlett_window(window_length=10)
-print("bartlett_window: {}".format(out))
diff --git a/examples/timetensor/visualisation.py b/examples/timetensor/visualisation.py
deleted file mode 100644
index 5df2187..0000000
--- a/examples/timetensor/visualisation.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : examples/timetensors/visualisation.py
-# Description : Example of visualisation functions for timetensors.
-# Date : 17th of August 2021.
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-# Imports
-import matplotlib.pyplot as plt
-import echotorch.viz
-
-
-# Random timeseries
-x = echotorch.randn(5, time_length=100)
-
-# Pairs visualisation
-echotorch.viz.pairs(x, figsize=(12, 8), s=3, sign_level=0.5)
-
-# Difference operators
-dx = echotorch.diff(x)
-
-# Show difference
-plt.figure()
-echotorch.viz.timeplot(dx, title="diff(x)")
-plt.show()
diff --git a/examples/unsupervised_learning/sfa_logmap.py b/examples/unsupervised_learning/sfa_logmap.py
index 4e8782e..75eccea 100644
--- a/examples/unsupervised_learning/sfa_logmap.py
+++ b/examples/unsupervised_learning/sfa_logmap.py
@@ -44,6 +44,6 @@ def logistic_map(x, r):
resc_dforce = (dforce - np.mean(dforce, 0)) / np.std(dforce, 0)
-print(u"{}".format(mdp.utils.cov2(resc_dforce[:-9], slow)))
-print(u"Eta value (time serie) : {}".format(flow[0].get_eta(t=10000)))
-print(u"Eta value (slow feature) : {}".format(flow[-1].get_eta(t=9996)))
+print(("{}".format(mdp.utils.cov2(resc_dforce[:-9], slow))))
+print(("Eta value (time serie) : {}".format(flow[0].get_eta(t=10000))))
+print(("Eta value (slow feature) : {}".format(flow[-1].get_eta(t=9996))))
diff --git a/papers/gallicchio2017/deep_reservoir_computing_critical_experiment_exp1.py b/papers/gallicchio2017/deep_reservoir_computing_critical_experiment_exp1.py
index f8d4d9a..febeee4 100644
--- a/papers/gallicchio2017/deep_reservoir_computing_critical_experiment_exp1.py
+++ b/papers/gallicchio2017/deep_reservoir_computing_critical_experiment_exp1.py
@@ -159,7 +159,7 @@
plt.show()
# Plot tau for each variant
-print("Kendall's Tau : {}".format([desn_if_KT, desn_ia_KT, desn_ge_KT]))
+print(("Kendall's Tau : {}".format([desn_if_KT, desn_ia_KT, desn_ge_KT])))
plt.figure(figsize=(8, 6))
plt.title("Kendall's Tau")
plt.bar(np.arange(3), [desn_if_KT, desn_ia_KT, desn_ge_KT])
@@ -171,7 +171,7 @@
plt.title("Spearman's footrule distances")
plt.bar(np.arange(3), [desn_if_SF, desn_ia_SF, desn_ge_SF])
plt.xticks(np.arange(3), ('IF', 'IA', 'GE'))
-print("Spearman's footrule distances : {}".format([desn_if_SF, desn_ia_SF, desn_ge_SF]))
+print(("Spearman's footrule distances : {}".format([desn_if_SF, desn_ia_SF, desn_ge_SF])))
plt.show()
# Plot tau for each variant
@@ -179,5 +179,5 @@
plt.title("Timescale separation")
plt.bar(np.arange(3), [desn_if_TS, desn_ia_TS, desn_ge_TS])
plt.xticks(np.arange(3), ('IF', 'IA', 'GE'))
-print("Timescale separation : {}".format([desn_if_TS, desn_ia_TS, desn_ge_TS]))
+print(("Timescale separation : {}".format([desn_if_TS, desn_ia_TS, desn_ge_TS])))
plt.show()
diff --git a/papers/gallicchio2017/deep_reservoir_computing_critical_experimental_analysis.py b/papers/gallicchio2017/deep_reservoir_computing_critical_experimental_analysis.py
index 3add118..299aa82 100644
--- a/papers/gallicchio2017/deep_reservoir_computing_critical_experimental_analysis.py
+++ b/papers/gallicchio2017/deep_reservoir_computing_critical_experimental_analysis.py
@@ -160,20 +160,20 @@
# Perturbation effect
P = perturbation_effect(states_distances[:, perturbation_position:])
- print("Layer perturbation durations : {}".format(P))
+ print(("Layer perturbation durations : {}".format(P)))
# Compute ranking
layer_ranking = ranking_of_layers(P)
- print("Layer ranking : {}".format(layer_ranking))
+ print(("Layer ranking : {}".format(layer_ranking)))
# Compute Kendall's tau
- print("Kendall's tau : {}".format(kendalls_tau(ranking=layer_ranking)))
+ print(("Kendall's tau : {}".format(kendalls_tau(ranking=layer_ranking))))
# Compute Spearman's rule
- print("Spearman's rule : {}".format(spearmans_rule(ranking=layer_ranking)))
+ print(("Spearman's rule : {}".format(spearmans_rule(ranking=layer_ranking))))
# Compute timescales separation
- print("Timescales separation : {}".format(timescales_separation(P)))
+ print(("Timescales separation : {}".format(timescales_separation(P))))
# end for
diff --git a/papers/gallicchio2017/deep_reservoir_computing_critical_experimental_analysis_esn.py b/papers/gallicchio2017/deep_reservoir_computing_critical_experimental_analysis_esn.py
index beb9e08..3009a01 100644
--- a/papers/gallicchio2017/deep_reservoir_computing_critical_experimental_analysis_esn.py
+++ b/papers/gallicchio2017/deep_reservoir_computing_critical_experimental_analysis_esn.py
@@ -152,7 +152,7 @@
# Perturbation effect
P = perturbation_effect(states_distances[:, perturbation_position:])
- print("Layer perturbation durations : {}".format(P.item()))
+ print(("Layer perturbation durations : {}".format(P.item())))
# end for
diff --git a/papers/gallicchio2017/deep_reservoir_hyperparameter_search.py b/papers/gallicchio2017/deep_reservoir_hyperparameter_search.py
index 0834c6b..2cddea9 100644
--- a/papers/gallicchio2017/deep_reservoir_hyperparameter_search.py
+++ b/papers/gallicchio2017/deep_reservoir_hyperparameter_search.py
@@ -138,7 +138,7 @@ def print_population(fitness_evaluation):
:return:
"""
for params, fitness_value, _ in fitness_evaluation:
- print("Params : {}, with fitness value : {}".format(params, fitness_value))
+ print(("Params : {}, with fitness value : {}".format(params, fitness_value)))
# end for
print("")
# end print_population
@@ -218,5 +218,5 @@ def print_population(fitness_evaluation):
)
# Show the result
-print("Best hyper-parameters found : {}".format(best_param))
-print("Best {} : {}".format(fitness_measure, best_measure))
+print(("Best hyper-parameters found : {}".format(best_param)))
+print(("Best {} : {}".format(fitness_measure, best_measure)))
diff --git a/papers/paassen2020/reservoir_memory_machines.py b/papers/paassen2020/reservoir_memory_machines.py
index b907f9b..96895e2 100644
--- a/papers/paassen2020/reservoir_memory_machines.py
+++ b/papers/paassen2020/reservoir_memory_machines.py
@@ -110,11 +110,11 @@
for data_i, data in enumerate(repeat_task_loader):
# Inputs and output
data_inputs, data_outputs = data
- print(data_inputs.size())
- print(data_outputs.size())
+ print((data_inputs.size()))
+ print((data_outputs.size()))
# Print
- print(np.array2string(data_inputs.numpy(), separator=', '))
- print(np.array2string(data_outputs.numpy(), separator=', '))
+ print((np.array2string(data_inputs.numpy(), separator=', ')))
+ print((np.array2string(data_outputs.numpy(), separator=', ')))
# Plot inputs and output
"""plt.plot(data_inputs[0].numpy(), 'b')
diff --git a/papers/schaetti2016/ESNs-Based-RC-for-MNIST-Handwritten-Digits-Recognition.py b/papers/schaetti2016/ESNs-Based-RC-for-MNIST-Handwritten-Digits-Recognition.py
index afe9b3f..30caa39 100644
--- a/papers/schaetti2016/ESNs-Based-RC-for-MNIST-Handwritten-Digits-Recognition.py
+++ b/papers/schaetti2016/ESNs-Based-RC-for-MNIST-Handwritten-Digits-Recognition.py
@@ -216,4 +216,4 @@
# end with
# Show accuracy
-print("Error rate : {}".format(100.0 - (true_positives / float(test_size) * 100.0)))
+print(("Error rate : {}".format(100.0 - (true_positives / float(test_size) * 100.0))))
diff --git a/requirements.txt b/requirements.txt
index c88cf93..adf88ef 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,11 +1,12 @@
# This is an implicit value, here for clarity
--index-url https://pypi.python.org/simple/
future
-numpy>=1.19.2
-scipy>=1.5.2
+python==3.9
+numpy==1.21
+scipy==1.10
scikit-learn
matplotlib
-torch==1.9.0
-torchvision==0.10.0
+torch==1.10
+torchvision==0.11
networkx
-tqdm
\ No newline at end of file
+tqdm
diff --git a/test/test_fold_cross_validation.py b/test/test_fold_cross_validation.py
index 8c26b56..76381ce 100644
--- a/test/test_fold_cross_validation.py
+++ b/test/test_fold_cross_validation.py
@@ -20,11 +20,14 @@
# Copyright Nils Schaetti
# Imports
+import os
+import echotorch.utils
from . import EchoTorchTestCase
import numpy as np
import torch
-import echotorch.utils.evaluation as val
+import echotorch.evaluation as val
from torch.utils.data.dataloader import DataLoader
+from torch.autograd import Variable
from .modules import DummyDataset
diff --git a/test/test_hyperparameters_optimization.py b/test/test_hyperparameters_optimization.py
index 47062dd..5707d44 100644
--- a/test/test_hyperparameters_optimization.py
+++ b/test/test_hyperparameters_optimization.py
@@ -126,7 +126,7 @@ def _evaluation_NARMA10(self, parameters, datasets, n_samples=5):
# Get the first sample in test set,
# and transform it to Variable.
dataiter = iter(testloader)
- test_u, test_y = dataiter.next()
+ test_u, test_y = next(dataiter)
test_u, test_y = Variable(test_u), Variable(test_y)
# Make a prediction with our trained ESN
@@ -195,8 +195,8 @@ def test_genetic_optimization_NARMA10(self):
# Show the result
if debug:
- print("Best hyper-parameters found : {}".format(best_param))
- print("Best NRMSE : {}".format(best_NRMSE))
+ print(("Best hyper-parameters found : {}".format(best_param)))
+ print(("Best NRMSE : {}".format(best_NRMSE)))
# end if
# Test the NRMSE found with optimization
@@ -255,8 +255,8 @@ def test_grid_search_optimization_NARMA10(self):
# Show the result
if debug:
- print("Best hyper-parameters found : {}".format(best_param))
- print("Best NRMSE : {}".format(best_NRMSE))
+ print(("Best hyper-parameters found : {}".format(best_param)))
+ print(("Best NRMSE : {}".format(best_NRMSE)))
# end if
# Test the NRMSE of the ESN found with optimization
@@ -316,8 +316,8 @@ def test_random_optimization_NARMA10(self):
# Show the result
if debug:
- print("Best hyper-parameters found : {}".format(best_param))
- print("Best NRMSE : {}".format(best_NRMSE))
+ print(("Best hyper-parameters found : {}".format(best_param)))
+ print(("Best NRMSE : {}".format(best_NRMSE)))
# end if
# Test the NRMSE of the ESN found with optimization
diff --git a/test/test_memory_management.py b/test/test_memory_management.py
index 12c2e74..1163140 100644
--- a/test/test_memory_management.py
+++ b/test/test_memory_management.py
@@ -595,8 +595,8 @@ def memory_management(
# Print results ?
if print_debug:
- print("NRMSE aligned : {}".format(NRMSE_aligned / value_test_divider))
- print("Expected NRMSE : {}".format(expected_NRMSEs[p] / value_test_divider))
+ print(("NRMSE aligned : {}".format(NRMSE_aligned / value_test_divider)))
+ print(("Expected NRMSE : {}".format(expected_NRMSEs[p] / value_test_divider)))
# end if
# Check NRMSE
@@ -620,7 +620,7 @@ def memory_management(
# if in debug mode
if print_debug:
print("Aligned NRMSEs")
- print(",\n".join(NRMSEs_aligned))
+ print((",\n".join(NRMSEs_aligned)))
# end if
# end memory_management
diff --git a/test/test_narma10_prediction.py b/test/test_narma10_prediction.py
index 75662c8..9f304e9 100644
--- a/test/test_narma10_prediction.py
+++ b/test/test_narma10_prediction.py
@@ -49,8 +49,7 @@ class Test_NARMA10_Prediction(EchoTorchTestCase):
# Run NARMA-10 prediction with classic ESN
def narma10_prediction(self, train_sample_length=5000, test_sample_length=1000, n_train_samples=1, n_test_samples=1,
batch_size=1, reservoir_size=100, leaky_rate=1.0, spectral_radius=0.99, connectivity=0.1,
- input_scaling=1.0, bias_scaling=0.0, ridge_param=0.0000001, use_cuda=False,
- dtype=torch.float64):
+ input_scaling=1.0, bias_scaling=0.0, ridge_param=0.0000001, dtype=torch.float64):
"""
Run NARMA-10 prediction with classic ESN
:param train_sample_length: Training sample length
@@ -65,10 +64,10 @@ def narma10_prediction(self, train_sample_length=5000, test_sample_length=1000,
:param input_scaling: Input scaling
:param bias_scaling: Bias scaling
:param ridge_param: Ridge parameter (regularization)
- :param use_cuda: Test model on the GPU
:return: train MSE, train NRMSE, test MSE, test NRMSE
"""
# Use CUDA?
+ use_cuda = False
use_cuda = torch.cuda.is_available() if use_cuda else False
# Manual seed initialisation
@@ -152,7 +151,7 @@ def narma10_prediction(self, train_sample_length=5000, test_sample_length=1000,
# Get the first sample in training set,
# and transform it to Variable.
dataiter = iter(trainloader)
- train_u, train_y = dataiter.next()
+ train_u, train_y = next(dataiter)
if dtype == torch.float64: train_u, train_y = train_u.double(), train_y.double()
train_u, train_y = Variable(train_u), Variable(train_y)
if use_cuda: train_u, train_y = train_u.cuda(), train_y.cuda()
@@ -163,7 +162,7 @@ def narma10_prediction(self, train_sample_length=5000, test_sample_length=1000,
# Get the first sample in test set,
# and transform it to Variable.
dataiter = iter(testloader)
- test_u, test_y = dataiter.next()
+ test_u, test_y = next(dataiter)
if dtype == torch.float64: test_u, test_y = test_u.double(), test_y.double()
test_u, test_y = Variable(test_u), Variable(test_y)
if use_cuda: test_u, test_y = test_u.cuda(), test_y.cuda()
@@ -206,28 +205,6 @@ def test_narma10_prediction_esn(self):
self.assertLessEqual(test_nrmse32, 1.0)
# end test_narma10_prediction
- # Test NARMA-10 prediction with default hyper-parameters (Nx=100, SP=0.99) (CUDA version)
- def test_narma10_prediction_esn_cuda(self):
- """
- Test NARMA-10 prediction with default hyper-parameters (Nx=100, SP=0.99)
- """
- # Run NARMA-10 prediction with default hyper-parameters (64 and 32)
- train_mse, train_nrmse, test_mse, test_nrmse = self.narma10_prediction(use_cuda=True)
- train_mse32, train_nrmse32, test_mse32, test_nrmse32 = self.narma10_prediction(use_cuda=True, dtype=torch.float32)
-
- # Check results for 64 bits
- self.assertLessEqual(train_mse, 0.01)
- self.assertLessEqual(train_nrmse, 0.5)
- self.assertLessEqual(test_mse, 0.01)
- self.assertLessEqual(test_nrmse, 1.0)
-
- # Check results for 32 bits
- self.assertLessEqual(train_mse32, 0.01)
- self.assertLessEqual(train_nrmse32, 1.0)
- self.assertLessEqual(test_mse32, 0.01)
- self.assertLessEqual(test_nrmse32, 1.0)
- # end test_narma10_prediction_esn_cuda
-
# Test NARMA-10 prediction with ridge param to 0.1 (Nx=100, SP=0.99)
def test_narma10_prediction_esn_ridge01(self):
"""
@@ -253,32 +230,6 @@ def test_narma10_prediction_esn_ridge01(self):
self.assertLessEqual(test_nrmse32, 1.0)
# end test_narma10_prediction_esn_ridge01
- # Test NARMA-10 prediction with ridge param to 0.1 (Nx=100, SP=0.99) (CUDA version)
- def test_narma10_prediction_esn_ridge01_cuda(self):
- """
- Test NARMA-10 prediction with default hyper-parameters (Nx=100, SP=0.99)
- """
- # Run NARMA-10 prediction with default hyper-parameters (64 and 32)
- train_mse, train_nrmse, test_mse, test_nrmse = self.narma10_prediction(use_cuda=True, ridge_param=0.1)
- train_mse32, train_nrmse32, test_mse32, test_nrmse32 = self.narma10_prediction(
- ridge_param=0.1,
- use_cuda=True,
- dtype=torch.float32
- )
-
- # Check results for 64 bits
- self.assertLessEqual(train_mse, 0.01)
- self.assertLessEqual(train_nrmse, 1.0)
- self.assertLessEqual(test_mse, 0.01)
- self.assertLessEqual(test_nrmse, 1.0)
-
- # Check results for 32 bits
- self.assertLessEqual(train_mse32, 0.01)
- self.assertLessEqual(train_nrmse32, 1.0)
- self.assertLessEqual(test_mse32, 0.01)
- self.assertLessEqual(test_nrmse32, 1.0)
- # end test_narma10_prediction_esn_ridge01_cuda
-
# Test NARMA-10 prediction with ridge param to 0.001 (Nx=100, SP=0.99)
def test_narma10_prediction_esn_ridge001(self):
"""
@@ -304,32 +255,6 @@ def test_narma10_prediction_esn_ridge001(self):
self.assertLessEqual(test_nrmse32, 1.0)
# end test_narma10_prediction_esn_ridge001
- # Test NARMA-10 prediction with ridge param to 0.001 (Nx=100, SP=0.99) (CUDA version)
- def test_narma10_prediction_esn_ridge001_cuda(self):
- """
- Test NARMA-10 prediction with default hyper-parameters (Nx=100, SP=0.99)
- """
- # Run NARMA-10 prediction with default hyper-parameters (64 and 32)
- train_mse, train_nrmse, test_mse, test_nrmse = self.narma10_prediction(use_cuda=True, ridge_param=0.01)
- train_mse32, train_nrmse32, test_mse32, test_nrmse32 = self.narma10_prediction(
- ridge_param=0.01,
- use_cuda=True,
- dtype=torch.float32
- )
-
- # Check results for 64 bits
- self.assertLessEqual(train_mse, 0.01)
- self.assertLessEqual(train_nrmse, 1.0)
- self.assertLessEqual(test_mse, 0.01)
- self.assertLessEqual(test_nrmse, 1.0)
-
- # Check results for 32 bits
- self.assertLessEqual(train_mse32, 0.01)
- self.assertLessEqual(train_nrmse32, 1.0)
- self.assertLessEqual(test_mse32, 0.01)
- self.assertLessEqual(test_nrmse32, 1.0)
- # end test_narma10_prediction_esn_ridge001_cuda
-
# Test NARMA-10 prediction with ridge param to 10 (Nx=100, SP=0.99)
def test_narma10_prediction_esn_ridge10(self):
"""
@@ -355,32 +280,6 @@ def test_narma10_prediction_esn_ridge10(self):
self.assertLessEqual(test_nrmse32, 3.0)
# end test_narma10_prediction_esn_ridge10
- # Test NARMA-10 prediction with ridge param to 10 (Nx=100, SP=0.99) (CUDA version)
- def test_narma10_prediction_esn_ridge10_cuda(self):
- """
- Test NARMA-10 prediction with default hyper-parameters (Nx=100, SP=0.99) (CUDA version)
- """
- # Run NARMA-10 prediction with default hyper-parameters (64 and 32)
- train_mse, train_nrmse, test_mse, test_nrmse = self.narma10_prediction(use_cuda=True, ridge_param=10)
- train_mse32, train_nrmse32, test_mse32, test_nrmse32 = self.narma10_prediction(
- ridge_param=10.0,
- use_cuda=True,
- dtype=torch.float32
- )
-
- # Check results for 64 bits
- self.assertLessEqual(train_mse, 0.1)
- self.assertLessEqual(train_nrmse, 3.0)
- self.assertLessEqual(test_mse, 0.1)
- self.assertLessEqual(test_nrmse, 3.0)
-
- # Check results for 32 bits
- self.assertLessEqual(train_mse32, 0.1)
- self.assertLessEqual(train_nrmse32, 3.0)
- self.assertLessEqual(test_mse32, 0.2)
- self.assertLessEqual(test_nrmse32, 3.0)
- # end test_narma10_prediction_esn_ridge10_cuda
-
# Test NARMA-10 prediction with 500 neurons
def test_narma10_prediction_esn_500neurons(self):
"""
@@ -406,32 +305,6 @@ def test_narma10_prediction_esn_500neurons(self):
self.assertLessEqual(test_nrmse32, 3.0)
# end test_narma10_prediction_500neurons
- # Test NARMA-10 prediction with 500 neurons (CUDA version)
- def test_narma10_prediction_esn_500neurons_cuda(self):
- """
- Test NARMA-10 prediction with 500 neurons (CUDA version)
- """
- # Run NARMA-10 prediction with default hyper-parameters (64 and 32 bits)
- train_mse, train_nrmse, test_mse, test_nrmse = self.narma10_prediction(reservoir_size=500, use_cuda=True)
- train_mse32, train_nrmse32, test_mse32, test_nrmse32 = self.narma10_prediction(
- reservoir_size=500,
- use_cuda=True,
- dtype=torch.float32
- )
-
- # Check results for 64 bits
- self.assertLessEqual(train_mse, 0.001)
- self.assertLessEqual(train_nrmse, 0.2)
- self.assertLessEqual(test_mse, 0.001)
- self.assertLessEqual(test_nrmse, 0.4)
-
- # Check results for 32 bits
- self.assertLessEqual(train_mse32, 0.15)
- self.assertLessEqual(train_nrmse32, 3.0)
- self.assertLessEqual(test_mse32, 0.15)
- self.assertLessEqual(test_nrmse32, 3.0)
- # end test_narma10_prediction_esn_500neurons_cuda
-
# Test NARMA-10 prediction with leaky-rate 0.5 (Nx=100, SP=0.99, LR=0.5)
def test_narma10_prediction_liesn(self):
"""
@@ -451,37 +324,14 @@ def test_narma10_prediction_liesn(self):
self.assertLessEqual(test_nrmse, 1.0)
# Check results
- self.assertLessEqual(train_nrmse32, 2.0)
+ # self.assertAlmostEqual(train_mse32, 0.036606427282094955, places=1)
+ # self.assertLessEqual(train_mse32, 0.1)
+ self.assertLessEqual(train_nrmse32, 1.8)
+ # self.assertAlmostEqual(test_mse32, 0.038768090307712555, places=1)
self.assertLessEqual(test_mse32, 0.1)
self.assertLessEqual(test_nrmse32, 1.8)
# end test_narma10_prediction
- # Test NARMA-10 prediction with leaky-rate 0.5 (Nx=100, SP=0.99, LR=0.5) (CUDA version)
- def test_narma10_prediction_liesn_cuda(self):
- """
- Test NARMA-10 prediction with leaky-rate 0.5 (Nx=100, SP=0.99, LR=0.5) (CUDA version)
- """
- # Run NARMA-10 prediction with default hyper-parameters (32 and 64 bits)
- train_mse, train_nrmse, test_mse, test_nrmse = self.narma10_prediction(leaky_rate=0.5, use_cuda=True)
- train_mse32, train_nrmse32, test_mse32, test_nrmse32 = self.narma10_prediction(
- leaky_rate=0.5,
- use_cuda=True,
- dtype=torch.float32
- )
-
- # Check results
- self.assertLessEqual(train_mse, 0.01)
- self.assertLessEqual(train_nrmse, 1.0)
- self.assertLessEqual(test_mse, 0.01)
- self.assertLessEqual(test_nrmse, 1.0)
-
- # Check results
- # TODO: Check why the CUDA version does 3.7291 (train_nrmse32), 0.16 (test_mse32) and 3.42 (test_nrmse32)
- self.assertLessEqual(train_nrmse32, 3.8)
- self.assertLessEqual(test_mse32, 0.17)
- self.assertLessEqual(test_nrmse32, 3.5)
- # end test_narma10_prediction_liesn_cuda
-
# endregion TESTS
# end test_narma10_prediction
diff --git a/test/test_timetensors_statops.py b/test/test_timetensors_statops.py
deleted file mode 100644
index ff662db..0000000
--- a/test/test_timetensors_statops.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : test/test_timetensors_stats.py
-# Description : Test statistical operations on TimeTensors.
-# Date : 17th of August, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-# Imports
-import echotorch.utils.matrix_generation as mg
-import echotorch.utils
-import torch
-import numpy as np
-
-# Local imports
-from . import EchoTorchTestCase
-
-
-# Test cases : Test statistical operations on TimeTensors
-class Test_TimeTensors_StatOps(EchoTorchTestCase):
- """
- Test cases : Test statistical operations on TimeTensors
- """
-
- # region TESTS
-
- # Test covariance
-
- # endregion TESTS
-
-# end Test_TimeTensors_StatOps
-
diff --git a/test/test_torch_ops_indexing.py b/test/test_torch_ops_indexing.py
deleted file mode 100644
index 73865b3..0000000
--- a/test/test_torch_ops_indexing.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : echotorch/timetensor.py
-# Description : A special tensor with a time dimension
-# Date : 25th of January, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-# Imports
-from typing import Optional, Tuple, Union, List, Callable, Any
-import torch
-import echotorch
-import numpy as np
-import warnings
-
-# Test case
-from . import EchoTorchTestCase
-
-
-# Test PyTorch Indexing Ops
-class Test_Torch_Ops_Indexing(EchoTorchTestCase):
- r"""Test PyTorch Indexing Ops
- """
-
- # region TESTS
-
- # Test cat
- def test_cat(self):
- r"""Test :func:`torch.cat`.
- """
- # Parameters
- time_length = 10
- n_features = 2
- time_dim = 1
-
- # Tensors and timetensor
- x = torch.randn(time_length, n_features)
- y = echotorch.randn(n_features, length=time_length)
- z = echotorch.as_timetensor(torch.randn(time_length, n_features), time_dim=time_dim)
-
- # Cat y and z
- out = torch.cat((y, z), 0)
- assert out.size()[0] == time_length * 2 and out.size()[1] == n_features
- assert out.time_dim == 0
- assert isinstance(out, echotorch.TimeTensor)
-
- # Cat x and z
- out = torch.cat((x, z), 0)
- assert out.size()[0] == time_length * 2 and out.size()[1] == n_features
- assert out.time_dim == 1
- assert isinstance(out, echotorch.TimeTensor)
- # end test_cat
-
- # Test chunk
- def test_chunk(self):
- r"""Test :func:`torch.chunk`.
- """
- # Parameters
- time_length = 10
- n_features = 2
-
- # Tensors and TimeTensors
- y = echotorch.randn(n_features, length=time_length)
-
- out = torch.chunk(y, 2, 0)
- assert len(out) == 2
- assert out[0].size()[0] == 5 and out[0].size()[1] == n_features
- assert isinstance(out[0], echotorch.TimeTensor)
- # end test_chunk
-
- # dsplit
- def test_dsplit(self):
- r"""Test :func:`torch.dsplit`.
- """
- # Parameters
- time_length = 10
- n_features = 2
-
- # TimeTensors
- x = torch.arange(16.0).reshape(2, 2, 4)
- y = echotorch.timetensor(x, time_dim=2)
- z = echotorch.timetensor(x, time_dim=1)
-
- # dsplit y
- out = torch.dsplit(y, [1, 3])
- assert len(out) == 3
- assert out[0].size()[2] == 1
- assert out[0].time_dim == 2
- assert out[1].size()[2] == 2
- assert out[1].time_dim == 2
- assert out[2].size()[2] == 1
- assert out[2].time_dim == 2
- assert isinstance(out[0], echotorch.TimeTensor)
-
- # dsplit z
- out = torch.dsplit(z, [1, 3])
- assert len(out) == 3
- assert out[0].size()[2] == 1
- assert out[0].time_dim == 1
- assert out[1].size()[2] == 2
- assert out[1].time_dim == 1
- assert out[2].size()[2] == 1
- assert out[2].time_dim == 1
- assert isinstance(out[0], echotorch.TimeTensor)
- # end test_dsplit
-
- # Test column_stack
- def test_column_stack(self):
- r"""Test :func:`torch.column_stack`.
- """
- # Time length
- time_length = 20
- n_features = 2
- time_dim = 0
-
- # Tensors/TimeTensors
- x = torch.arange(time_length)
- y = torch.arange(time_length * 2).reshape(time_length, n_features)
- z = echotorch.arange(time_length)
-
- # Test 1
- out = torch.column_stack((z, y))
- assert out.size()[0] == time_length
- assert out.size()[1] == n_features + 1
- assert out.time_dim == time_dim
- assert isinstance(out, echotorch.TimeTensor)
-
- # Test 2
- out = torch.column_stack((z, x))
- assert out.size()[0] == time_length
- assert out.size()[1] == 2
- assert out.time_dim == time_dim
- assert isinstance(out, echotorch.TimeTensor)
- # end test_column_stack
-
- # endregion TESTS
-
-# end Test_Torch_Ops_Indexing
diff --git a/test/test_torch_ops_other.py b/test/test_torch_ops_other.py
deleted file mode 100644
index 030748e..0000000
--- a/test/test_torch_ops_other.py
+++ /dev/null
@@ -1,166 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# File : test/test_torch_ops_other.py
-# Description : Test compatibility with PyTorch operations.
-# Date : 3rd of September, 2021
-#
-# This file is part of EchoTorch. EchoTorch is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Nils Schaetti
-
-# Imports
-from typing import Optional, Tuple, Union, List, Callable, Any
-import torch
-import echotorch
-import numpy as np
-import warnings
-
-# Test case
-from . import EchoTorchTestCase
-
-
-# Test PyTorch Other Ops
-class Test_Torch_Ops_Other(EchoTorchTestCase):
- r"""Test PyTorch Other Ops.
- """
-
- # region TESTS
-
- # Test cummax
- def test_cummax(self):
- r"""Test :func:`torch.cummax`.
- """
- # Parameters
- time_length = 10
- n_features = 4
- time_dim = 0
-
- # TimeTensor(s)
- x = echotorch.randn(n_features, length=time_length)
-
- # Test 1
- out = torch.cummax(x, dim=0)
- assert out[0].size()[0] == time_length
- assert out[0].size()[1] == n_features
- assert out[0].time_dim == time_dim
- assert isinstance(out[0], echotorch.TimeTensor)
- assert out[1].size()[0] == time_length
- assert out[1].size()[1] == n_features
- assert out[1].time_dim == time_dim
- assert isinstance(out[1], echotorch.TimeTensor)
-
- # Test 2
- out = torch.cummax(x, dim=1)
- assert out[0].size()[0] == time_length
- assert out[0].size()[1] == n_features
- assert out[0].time_dim == time_dim
- assert isinstance(out[0], echotorch.TimeTensor)
- assert out[1].size()[0] == time_length
- assert out[1].size()[1] == n_features
- assert out[1].time_dim == time_dim
- assert isinstance(out[1], echotorch.TimeTensor)
- # end test_cummax
-
- # Test cummin
- def test_cummin(self):
- r"""Test :func:`torch.cummin`.
- """
- # Parameters
- time_length = 10
- n_features = 4
- time_dim = 0
-
- # TimeTensor(s)
- x = echotorch.randn(n_features, length=time_length)
-
- # Test 1
- out = torch.cummin(x, dim=0)
- assert out[0].size()[0] == time_length
- assert out[0].size()[1] == n_features
- assert out[0].time_dim == time_dim
- assert isinstance(out[0], echotorch.TimeTensor)
- assert out[1].size()[0] == time_length
- assert out[1].size()[1] == n_features
- assert out[1].time_dim == time_dim
- assert isinstance(out[1], echotorch.TimeTensor)
-
- # Test 2
- out = torch.cummin(x, dim=1)
- assert out[0].size()[0] == time_length
- assert out[0].size()[1] == n_features
- assert out[0].time_dim == time_dim
- assert isinstance(out[0], echotorch.TimeTensor)
- assert out[1].size()[0] == time_length
- assert out[1].size()[1] == n_features
- assert out[1].time_dim == time_dim
- assert isinstance(out[1], echotorch.TimeTensor)
- # end test_cummin
-
- # Test cumprod
- def test_cumprod(self):
- r"""Test :func:`torch.cumprod`.
- """
- # Parameters
- time_length = 10
- n_features = 4
- time_dim = 0
-
- # TimeTensor(s)
- x = echotorch.randn(n_features, length=time_length)
-
- # Test 1
- out = torch.cumprod(x, dim=0)
- assert out.size()[0] == time_length
- assert out.size()[1] == n_features
- assert out.time_dim == time_dim
- assert isinstance(out, echotorch.TimeTensor)
-
- # Test 2
- out = torch.cumprod(x, dim=1)
- assert out.size()[0] == time_length
- assert out.size()[1] == n_features
- assert out.time_dim == time_dim
- assert isinstance(out, echotorch.TimeTensor)
- # end test_cumprod
-
- # Test cumsum
- def test_cumsum(self):
- r"""Test :func:`torch.cumsum`.
- """
- # Parameters
- time_length = 10
- n_features = 4
- time_dim = 0
-
- # TimeTensor(s)
- x = echotorch.randn(n_features, length=time_length)
-
- # Test 1
- out = torch.cumsum(x, dim=0)
- assert out.size()[0] == time_length
- assert out.size()[1] == n_features
- assert out.time_dim == time_dim
- assert isinstance(out, echotorch.TimeTensor)
-
- # Test 2
- out = torch.cumsum(x, dim=1)
- assert out.size()[0] == time_length
- assert out.size()[1] == n_features
- assert out.time_dim == time_dim
- assert isinstance(out, echotorch.TimeTensor)
- # end test_cumsum
-
- # endregion TESTS
-
-# end Test_Torch_Ops_Other
diff --git a/torch_ops.ods b/torch_ops.ods
deleted file mode 100644
index d2cbb27..0000000
Binary files a/torch_ops.ods and /dev/null differ