diff --git a/CMakeLists.txt b/CMakeLists.txt index 572e4e0ac..4bb693281 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -172,6 +172,7 @@ else() if(CSP_BUILD_NO_CXX_ABI) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_GLIBCXX_USE_CXX11_ABI=0") endif() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=hidden") if (COVERAGE) # TODO windows add_compile_options(--coverage) diff --git a/Makefile b/Makefile index d37aa8f64..6c8bf66f0 100644 --- a/Makefile +++ b/Makefile @@ -26,6 +26,9 @@ build-debug: ## build the library ( DEBUG ) - May need a make clean when switch build-conda: ## build the library in Conda python setup.py build build_ext --csp-no-vcpkg --inplace +build-conda-debug: ## build the library ( DEBUG ) - in Conda + SKBUILD_CONFIGURE_OPTIONS="" DEBUG=1 python setup.py build build_ext --csp-no-vcpkg --inplace + install: ## install library python -m pip install . diff --git a/conda/dev-environment-unix.yml b/conda/dev-environment-unix.yml index fa3740a5b..d6ad291c8 100644 --- a/conda/dev-environment-unix.yml +++ b/conda/dev-environment-unix.yml @@ -45,7 +45,6 @@ dependencies: - ruamel.yaml - ruff>=0.3,<0.4 - scikit-build - - slack-sdk - sqlalchemy - tar - threadpoolctl diff --git a/conda/dev-environment-win.yml b/conda/dev-environment-win.yml index 8ca2482d8..8c1ce5fdb 100644 --- a/conda/dev-environment-win.yml +++ b/conda/dev-environment-win.yml @@ -44,7 +44,6 @@ dependencies: - ruamel.yaml - ruff>=0.3,<0.4 - scikit-build - - slack-sdk - sqlalchemy - threadpoolctl - tornado diff --git a/cpp/csp/adapters/kafka/KafkaAdapterManager.h b/cpp/csp/adapters/kafka/KafkaAdapterManager.h index 234f5fc11..bf4e23158 100644 --- a/cpp/csp/adapters/kafka/KafkaAdapterManager.h +++ b/cpp/csp/adapters/kafka/KafkaAdapterManager.h @@ -47,7 +47,7 @@ struct KafkaStatusMessageTypeTraits using KafkaStatusMessageType = csp::Enum; //Top level AdapterManager object for all kafka adapters in the engine -class KafkaAdapterManager final : public csp::AdapterManager +class CSP_PUBLIC KafkaAdapterManager final : public csp::AdapterManager { public: KafkaAdapterManager( csp::Engine * engine, const Dictionary & properties ); diff --git a/cpp/csp/adapters/parquet/DialectGenericListReaderInterface.h b/cpp/csp/adapters/parquet/DialectGenericListReaderInterface.h index c88097f6f..61b77cd85 100644 --- a/cpp/csp/adapters/parquet/DialectGenericListReaderInterface.h +++ b/cpp/csp/adapters/parquet/DialectGenericListReaderInterface.h @@ -25,7 +25,7 @@ class DialectGenericListReaderInterface }; template< typename T > -class TypedDialectGenericListReaderInterface : public DialectGenericListReaderInterface +class CSP_PUBLIC TypedDialectGenericListReaderInterface : public DialectGenericListReaderInterface { public: using Ptr = std::shared_ptr>; @@ -45,4 +45,4 @@ class TypedDialectGenericListReaderInterface : public DialectGenericListReaderIn } -#endif \ No newline at end of file +#endif diff --git a/cpp/csp/adapters/parquet/ParquetInputAdapterManager.h b/cpp/csp/adapters/parquet/ParquetInputAdapterManager.h index baa67e19c..8f3effa1e 100644 --- a/cpp/csp/adapters/parquet/ParquetInputAdapterManager.h +++ b/cpp/csp/adapters/parquet/ParquetInputAdapterManager.h @@ -18,7 +18,7 @@ namespace csp::adapters::parquet //Top level AdapterManager object for all parquet adapters in the engine -class ParquetInputAdapterManager final : public csp::AdapterManager +class CSP_PUBLIC ParquetInputAdapterManager final : public csp::AdapterManager { public: using GeneratorPtr = csp::Generator::Ptr; diff --git a/cpp/csp/adapters/parquet/ParquetOutputAdapterManager.h b/cpp/csp/adapters/parquet/ParquetOutputAdapterManager.h index a2b5da200..b7fe029f4 100644 --- a/cpp/csp/adapters/parquet/ParquetOutputAdapterManager.h +++ b/cpp/csp/adapters/parquet/ParquetOutputAdapterManager.h @@ -21,7 +21,7 @@ class ParquetOutputFilenameAdapter; class ParquetDictBasketOutputWriter; //Top level AdapterManager object for all parquet adapters in the engine -class ParquetOutputAdapterManager final : public csp::AdapterManager +class CSP_PUBLIC ParquetOutputAdapterManager final : public csp::AdapterManager { public: using FileVisitorCallback = std::function; diff --git a/cpp/csp/adapters/parquet/ParquetReaderColumnAdapter.cpp b/cpp/csp/adapters/parquet/ParquetReaderColumnAdapter.cpp index 3fe763a60..0525ffbf9 100644 --- a/cpp/csp/adapters/parquet/ParquetReaderColumnAdapter.cpp +++ b/cpp/csp/adapters/parquet/ParquetReaderColumnAdapter.cpp @@ -734,7 +734,7 @@ void ListColumnAdapter::readCurValue() if( this -> m_curChunkArray -> IsValid( curRow ) ) { auto values = this -> m_curChunkArray -> value_slice( curRow ); - auto typedValues = std::dynamic_pointer_cast( values ); + auto typedValues = std::static_pointer_cast( values ); auto arrayValue = m_listReader -> create( typedValues -> length() ); auto* internalBuffer = m_listReader -> getRawDataBuffer( arrayValue ); diff --git a/cpp/csp/adapters/websocket/ClientAdapterManager.h b/cpp/csp/adapters/websocket/ClientAdapterManager.h index 62577d769..b2b15fa78 100644 --- a/cpp/csp/adapters/websocket/ClientAdapterManager.h +++ b/cpp/csp/adapters/websocket/ClientAdapterManager.h @@ -40,10 +40,8 @@ struct WebsocketClientStatusTypeTraits using ClientStatusType = Enum; -class ClientAdapterManager final : public AdapterManager +class CSP_PUBLIC ClientAdapterManager final : public AdapterManager { - - public: ClientAdapterManager( Engine * engine, @@ -78,4 +76,4 @@ class ClientAdapterManager final : public AdapterManager } -#endif \ No newline at end of file +#endif diff --git a/cpp/csp/core/Exception.h b/cpp/csp/core/Exception.h index 074143de7..5227acf34 100644 --- a/cpp/csp/core/Exception.h +++ b/cpp/csp/core/Exception.h @@ -10,7 +10,7 @@ namespace csp { -class Exception : public std::exception +class CSP_PUBLIC Exception : public std::exception { public: Exception( const char * exType, const std::string & description, const char * file, const char * func, int line ) : @@ -59,7 +59,7 @@ class Exception : public std::exception }; #define __FILENAME__ (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) -#define CSP_DECLARE_EXCEPTION( DerivedException, BaseException ) class DerivedException : public BaseException { public: DerivedException( const char * exType, const std::string &r, const char * file, const char * func, int line ) : BaseException( exType, r, file, func, line ) {} }; +#define CSP_DECLARE_EXCEPTION( DerivedException, BaseException ) class CSP_PUBLIC DerivedException : public BaseException { public: DerivedException( const char * exType, const std::string &r, const char * file, const char * func, int line ) : BaseException( exType, r, file, func, line ) {} }; CSP_DECLARE_EXCEPTION( AssertionError, Exception ) CSP_DECLARE_EXCEPTION( RuntimeException, Exception ) diff --git a/cpp/csp/core/Platform.h b/cpp/csp/core/Platform.h index 37474faf6..3ef861259 100644 --- a/cpp/csp/core/Platform.h +++ b/cpp/csp/core/Platform.h @@ -14,7 +14,8 @@ #undef ERROR #undef GetMessage -#define DLL_LOCAL +#define CSP_LOCAL +#define CSP_PUBLIC __declspec(dllexport) #ifdef CSPTYPESIMPL_EXPORTS #define CSPTYPESIMPL_EXPORT __declspec(dllexport) @@ -90,10 +91,11 @@ inline uint8_t ffs(uint64_t n) #else -#define CSPIMPL_EXPORT -#define CSPTYPESIMPL_EXPORT +#define CSPIMPL_EXPORT __attribute__ ((visibility ("default"))) +#define CSPTYPESIMPL_EXPORT __attribute__ ((visibility ("default"))) -#define DLL_LOCAL __attribute__ ((visibility ("hidden"))) +#define CSP_LOCAL __attribute__ ((visibility ("hidden"))) +#define CSP_PUBLIC __attribute__ ((visibility ("default"))) #define START_PACKED #define END_PACKED __attribute__((packed)) diff --git a/cpp/csp/engine/AdapterManager.h b/cpp/csp/engine/AdapterManager.h index a0c1531ee..fd71126c9 100644 --- a/cpp/csp/engine/AdapterManager.h +++ b/cpp/csp/engine/AdapterManager.h @@ -93,7 +93,7 @@ bool ManagedSimInputAdapter::pushNullTick() return true; } -class AdapterManager : public EngineOwned +class CSP_PUBLIC AdapterManager : public EngineOwned { public: AdapterManager( csp::Engine * ); diff --git a/cpp/csp/engine/Feedback.h b/cpp/csp/engine/Feedback.h index 0194d9257..ae02925aa 100644 --- a/cpp/csp/engine/Feedback.h +++ b/cpp/csp/engine/Feedback.h @@ -28,7 +28,7 @@ class FeedbackOutputAdapter final : public OutputAdapter }; template -class FeedbackInputAdapter final : public InputAdapter +class CSP_PUBLIC FeedbackInputAdapter final : public InputAdapter { public: using InputAdapter::InputAdapter; diff --git a/cpp/csp/engine/Struct.h b/cpp/csp/engine/Struct.h index e0653e4a3..1b09cb97b 100644 --- a/cpp/csp/engine/Struct.h +++ b/cpp/csp/engine/Struct.h @@ -756,13 +756,13 @@ class Struct void decref() { //Work around GCC12 bug mis-identifying this code as use-after-free -#ifdef __linux__ +#if defined(__linux__) && !defined(__clang__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wuse-after-free" #endif if( --hidden() -> refcount == 0 ) delete this; -#ifdef __linux__ +#if defined(__linux__) && !defined(__clang__) #pragma GCC diagnostic pop #endif } diff --git a/cpp/csp/python/Exception.h b/cpp/csp/python/Exception.h index 104a3509a..6040c79a2 100644 --- a/cpp/csp/python/Exception.h +++ b/cpp/csp/python/Exception.h @@ -8,7 +8,7 @@ namespace csp::python { -class PythonPassthrough : public csp::Exception +class CSP_PUBLIC PythonPassthrough : public csp::Exception { public: PythonPassthrough( const char * exType, const std::string &r, const char * file, @@ -16,9 +16,20 @@ class PythonPassthrough : public csp::Exception csp::Exception( exType, r, file, func, line ) { //Fetch the current error to clear out the error indicator while the stack gets unwound + //We own the references to all the members assigned in PyErr_Fetch + //We need to hold the reference since PyErr_Restore takes back a reference to each of its arguments PyErr_Fetch( &m_type, &m_value, &m_traceback ); } + PythonPassthrough( PyObject * pyException ) : + csp::Exception( "", "" ) + { + // Note: all of these methods return strong references, so we own them like in the other constructor + m_type = PyObject_Type( pyException ); + m_value = PyObject_Str( pyException ); + m_traceback = PyException_GetTraceback( pyException ); + } + void restore() { if( !description().empty() ) @@ -39,7 +50,6 @@ class PythonPassthrough : public csp::Exception PyObject * m_type; PyObject * m_value; PyObject * m_traceback; - }; CSP_DECLARE_EXCEPTION( AttributeError, ::csp::Exception ); diff --git a/cpp/csp/python/InitHelper.h b/cpp/csp/python/InitHelper.h index e62dd05b7..1665f847f 100644 --- a/cpp/csp/python/InitHelper.h +++ b/cpp/csp/python/InitHelper.h @@ -10,7 +10,7 @@ namespace csp::python { -class DLL_LOCAL InitHelper +class CSP_LOCAL InitHelper { public: ~InitHelper() {} @@ -111,4 +111,21 @@ inline bool InitHelper::execute( PyObject * module ) } } + +//PyMODINIT_FUNC in Python <3.9 doesn't export the function/make visible +//this is required since we build with hidden visibility by default +//the below macro code can be removed once 3.8 support is dropped +// +//see similar issues: +//https://github.com/scipy/scipy/issues/15996 +//https://github.com/mesonbuild/meson/pull/10369 + +#if PY_VERSION_HEX < 0x03090000 +#ifdef PyMODINIT_FUNC +#undef PyMODINIT_FUNC +#endif + +#define PyMODINIT_FUNC extern "C" CSP_PUBLIC PyObject* +#endif + #endif diff --git a/cpp/csp/python/PyAdapterManager.cpp b/cpp/csp/python/PyAdapterManager.cpp index 3a9ec7211..d6547e135 100644 --- a/cpp/csp/python/PyAdapterManager.cpp +++ b/cpp/csp/python/PyAdapterManager.cpp @@ -68,6 +68,15 @@ class PyAdapterManager : public AdapterManager static PyObject * PyAdapterManager_PyObject_starttime( PyAdapterManager_PyObject * self ) { return toPython( self -> manager -> starttime() ); } static PyObject * PyAdapterManager_PyObject_endtime( PyAdapterManager_PyObject * self ) { return toPython( self -> manager -> endtime() ); } +static PyObject * PyAdapterManager_PyObject_shutdown_engine( PyAdapterManager_PyObject * self, PyObject * pyException ) +{ + CSP_BEGIN_METHOD; + + self -> manager -> rootEngine() -> shutdown( PyEngine_shutdown_make_exception( pyException ) ); + + CSP_RETURN_NONE; +} + static int PyAdapterManager_init( PyAdapterManager_PyObject *self, PyObject *args, PyObject *kwds ) { CSP_BEGIN_METHOD; @@ -83,8 +92,9 @@ static int PyAdapterManager_init( PyAdapterManager_PyObject *self, PyObject *arg } static PyMethodDef PyAdapterManager_methods[] = { - { "starttime", (PyCFunction) PyAdapterManager_PyObject_starttime, METH_NOARGS, "starttime" }, - { "endtime", (PyCFunction) PyAdapterManager_PyObject_endtime, METH_NOARGS, "endtime" }, + { "starttime", (PyCFunction) PyAdapterManager_PyObject_starttime, METH_NOARGS, "starttime" }, + { "endtime", (PyCFunction) PyAdapterManager_PyObject_endtime, METH_NOARGS, "endtime" }, + { "shutdown_engine", (PyCFunction) PyAdapterManager_PyObject_shutdown_engine, METH_O, "shutdown_engine" }, {NULL} }; diff --git a/cpp/csp/python/PyCspEnum.h b/cpp/csp/python/PyCspEnum.h index 0da9e9dbc..fb098735c 100644 --- a/cpp/csp/python/PyCspEnum.h +++ b/cpp/csp/python/PyCspEnum.h @@ -27,7 +27,7 @@ struct CSPTYPESIMPL_EXPORT PyCspEnumMeta : public PyHeapTypeObject static PyTypeObject PyType; }; -//TODO Windows - need to figure out why adding DLL_PUBLIC to this class leads to weird compilation errors on CspEnumMeta's unordered_map... +//TODO Windows - need to figure out why adding CSP_PUBLIC to this class leads to weird compilation errors on CspEnumMeta's unordered_map... //This is an extension of csp::CspEnumMeta for python dialect, we need it in order to //keep a reference to the python enum type from conversion to/from csp::CspEnumMeta <-> PyObject properly diff --git a/cpp/csp/python/PyEngine.h b/cpp/csp/python/PyEngine.h index 4a694c0c6..d58289501 100644 --- a/cpp/csp/python/PyEngine.h +++ b/cpp/csp/python/PyEngine.h @@ -2,6 +2,7 @@ #define _IN_CSP_PYTHON_PYENGINE_H #include +#include #include #include #include @@ -54,6 +55,21 @@ class CSPIMPL_EXPORT PyEngine final: public PyObject Engine * m_engine; }; +inline std::exception_ptr PyEngine_shutdown_make_exception( PyObject * pyException ) +{ + if( !PyExceptionInstance_Check( pyException ) ) + { + PyObjectPtr pyExceptionStr = PyObjectPtr::own( PyObject_Str( pyException ) ); + if( !pyExceptionStr.ptr() ) + CSP_THROW( PythonPassthrough, "" ); + std::string pyExceptionString = PyUnicode_AsUTF8( pyExceptionStr.ptr() ); + std::string desc = "Expected Exception object as argument for shutdown_engine: got " + pyExceptionString + " of type " + Py_TYPE( pyException ) -> tp_name; + return std::make_exception_ptr( csp::Exception( "TypeError", desc ) ); + } + else + return std::make_exception_ptr( PythonPassthrough( pyException ) ); +} + }; #endif diff --git a/cpp/csp/python/PyPushInputAdapter.cpp b/cpp/csp/python/PyPushInputAdapter.cpp index 3b5f91b07..cd034844a 100644 --- a/cpp/csp/python/PyPushInputAdapter.cpp +++ b/cpp/csp/python/PyPushInputAdapter.cpp @@ -198,11 +198,21 @@ struct PyPushInputAdapter_PyObject CSP_RETURN_NONE; } + static PyObject * shutdown_engine( PyPushInputAdapter_PyObject * self, PyObject * pyException ) + { + CSP_BEGIN_METHOD; + + self -> adapter -> rootEngine() -> shutdown( PyEngine_shutdown_make_exception( pyException ) ); + + CSP_RETURN_NONE; + } + static PyTypeObject PyType; }; static PyMethodDef PyPushInputAdapter_PyObject_methods[] = { - { "push_tick", (PyCFunction) PyPushInputAdapter_PyObject::pushTick, METH_VARARGS, "push new tick" }, + { "push_tick", (PyCFunction) PyPushInputAdapter_PyObject::pushTick, METH_VARARGS, "push new tick" }, + { "shutdown_engine", (PyCFunction) PyPushInputAdapter_PyObject::shutdown_engine, METH_O, "shutdown_engine" }, {NULL} }; diff --git a/cpp/csp/python/PyPushPullInputAdapter.cpp b/cpp/csp/python/PyPushPullInputAdapter.cpp index d53ed972d..71190660c 100644 --- a/cpp/csp/python/PyPushPullInputAdapter.cpp +++ b/cpp/csp/python/PyPushPullInputAdapter.cpp @@ -119,12 +119,22 @@ struct PyPushPullInputAdapter_PyObject CSP_RETURN_NONE; } + static PyObject * shutdown_engine( PyPushPullInputAdapter_PyObject * self, PyObject * pyException ) + { + CSP_BEGIN_METHOD; + + self -> adapter -> rootEngine() -> shutdown( PyEngine_shutdown_make_exception( pyException ) ); + + CSP_RETURN_NONE; + } + static PyTypeObject PyType; }; static PyMethodDef PyPushPullInputAdapter_PyObject_methods[] = { - { "push_tick", (PyCFunction) PyPushPullInputAdapter_PyObject::pushTick, METH_VARARGS, "push new tick" }, - { "flag_replay_complete", (PyCFunction) PyPushPullInputAdapter_PyObject::flagReplayComplete, METH_VARARGS, "finish replay ticks" }, + { "push_tick", (PyCFunction) PyPushPullInputAdapter_PyObject::pushTick, METH_VARARGS, "push new tick" }, + { "flag_replay_complete", (PyCFunction) PyPushPullInputAdapter_PyObject::flagReplayComplete, METH_VARARGS, "finish replay ticks" }, + { "shutdown_engine", (PyCFunction) PyPushPullInputAdapter_PyObject::shutdown_engine, METH_O, "shutdown engine" }, {NULL} }; diff --git a/cpp/csp/python/adapters/CMakeLists.txt b/cpp/csp/python/adapters/CMakeLists.txt index abcae763e..8cb34950f 100644 --- a/cpp/csp/python/adapters/CMakeLists.txt +++ b/cpp/csp/python/adapters/CMakeLists.txt @@ -35,7 +35,7 @@ if(CSP_BUILD_PARQUET_ADAPTER) endif() target_link_libraries(parquetadapterimpl csp_core csp_engine cspimpl csp_parquet_adapter) target_include_directories(parquetadapterimpl PUBLIC ${ARROW_INCLUDE_DIR} ${PARQUET_INCLUDE_DIR} "${VENDORED_PYARROW_ROOT}") - target_compile_definitions(parquetadapterimpl PUBLIC ARROW_PYTHON_STATIC) + target_compile_definitions(parquetadapterimpl PUBLIC ARROW_PYTHON_STATIC -DARROW_PYTHON_EXPORT=) install(TARGETS parquetadapterimpl RUNTIME DESTINATION ${CSP_RUNTIME_INSTALL_SUBDIR} ) endif() diff --git a/csp/adapters/kafka.py b/csp/adapters/kafka.py index 6871004e3..d2cf97f31 100644 --- a/csp/adapters/kafka.py +++ b/csp/adapters/kafka.py @@ -1,6 +1,6 @@ -import typing from datetime import datetime, timedelta from enum import IntEnum +from typing import TypeVar, Union from uuid import uuid4 import csp @@ -18,7 +18,7 @@ from csp.lib import _kafkaadapterimpl _ = BytesMessageProtoMapper, DateTimeType, JSONTextMessageMapper, RawBytesMessageMapper, RawTextMessageMapper -T = typing.TypeVar("T") +T = TypeVar("T") class KafkaStatusMessageType(IntEnum): @@ -39,7 +39,7 @@ class KafkaAdapterManager: def __init__( self, broker, - start_offset: typing.Union[KafkaStartOffset, timedelta, datetime] = None, + start_offset: Union[KafkaStartOffset, timedelta, datetime] = None, group_id: str = None, group_id_prefix: str = "", max_threads=4, @@ -132,7 +132,7 @@ def subscribe( # Leave key None to subscribe to all messages on the topic # Note that if you subscribe to all messages, they are always flagged as "live" and cant be replayed in engine time key=None, - field_map: typing.Union[dict, str] = None, + field_map: Union[dict, str] = None, meta_field_map: dict = None, push_mode: csp.PushMode = csp.PushMode.LAST_VALUE, adjust_out_of_order_time: bool = False, @@ -154,9 +154,7 @@ def subscribe( return _kafka_input_adapter_def(self, ts_type, properties, push_mode) - def publish( - self, msg_mapper: MsgMapper, topic: str, key: str, x: ts["T"], field_map: typing.Union[dict, str] = None - ): + def publish(self, msg_mapper: MsgMapper, topic: str, key: str, x: ts["T"], field_map: Union[dict, str] = None): if isinstance(field_map, str): field_map = {"": field_map} diff --git a/csp/adapters/perspective.py b/csp/adapters/perspective.py index 0075b6015..aaa17583c 100644 --- a/csp/adapters/perspective.py +++ b/csp/adapters/perspective.py @@ -1,6 +1,6 @@ import threading -import typing from datetime import timedelta +from typing import Dict, Optional, Union import csp from csp import ts @@ -148,7 +148,7 @@ def __init__(self, name, limit, index): self.index = index self.columns = {} - def publish(self, value: ts[object], field_map: typing.Union[typing.Dict[str, str], str, None] = None): + def publish(self, value: ts[object], field_map: Union[Dict[str, str], str, None] = None): """ :param value - timeseries to publish onto this table :param field_map: if publishing structs, a dictionary of struct field -> perspective fieldname ( if None will pass struct fields as is ) @@ -161,7 +161,7 @@ def publish(self, value: ts[object], field_map: typing.Union[typing.Dict[str, st raise TypeError("Expected type str for field_map on single column publish, got %s" % type(field_map)) self._publish_field(value, field_map) - def _publish_struct(self, value: ts[csp.Struct], field_map: typing.Optional[typing.Dict[str, str]]): + def _publish_struct(self, value: ts[csp.Struct], field_map: Optional[Dict[str, str]]): field_map = field_map or {k: k for k in value.tstype.typ.metadata()} for k, v in field_map.items(): self._publish_field(getattr(value, k), v) diff --git a/csp/adapters/slack.py b/csp/adapters/slack.py index 70c5c047f..6838504b4 100644 --- a/csp/adapters/slack.py +++ b/csp/adapters/slack.py @@ -1,372 +1,4 @@ -import threading -from logging import getLogger -from queue import Queue -from ssl import SSLContext -from threading import Thread -from time import sleep -from typing import Dict, List, Optional, TypeVar - -import csp -from csp.impl.adaptermanager import AdapterManagerImpl -from csp.impl.outputadapter import OutputAdapter -from csp.impl.pushadapter import PushInputAdapter -from csp.impl.struct import Struct -from csp.impl.types.tstype import ts -from csp.impl.wiring import py_output_adapter_def, py_push_adapter_def - try: - from slack_sdk.errors import SlackApiError - from slack_sdk.socket_mode import SocketModeClient - from slack_sdk.socket_mode.request import SocketModeRequest - from slack_sdk.socket_mode.response import SocketModeResponse - from slack_sdk.web import WebClient - - _HAVE_SLACK_SDK = True + from csp_adapter_slack import * # noqa: F403 except ImportError: - _HAVE_SLACK_SDK = False - -T = TypeVar("T") -log = getLogger(__file__) - - -__all__ = ("SlackMessage", "mention_user", "SlackAdapterManager", "SlackInputAdapterImpl", "SlackOutputAdapterImpl") - - -class SlackMessage(Struct): - user: str - user_email: str # email of the author - user_id: str # user id of the author - tags: List[str] # list of mentions - - channel: str # name of channel - channel_id: str # id of channel - channel_type: str # type of channel, in "message", "public" (app_mention), "private" (app_mention) - - msg: str # parsed text payload - reaction: str # emoji reacts - thread: str # thread id, if in thread - payload: dict # raw message payload - - -def mention_user(userid: str) -> str: - """Convenience method, more difficult to do in symphony but we want slack to be symmetric""" - return f"<@{userid}>" - - -class SlackAdapterManager(AdapterManagerImpl): - def __init__(self, app_token: str, bot_token: str, ssl: Optional[SSLContext] = None): - if not _HAVE_SLACK_SDK: - raise RuntimeError("Could not find slack-sdk installation") - if not app_token.startswith("xapp-") or not bot_token.startswith("xoxb-"): - raise RuntimeError("Slack app token or bot token looks malformed") - - self._slack_client = SocketModeClient( - app_token=app_token, - web_client=WebClient(token=bot_token, ssl=ssl), - ) - self._slack_client.socket_mode_request_listeners.append(self._process_slack_message) - - # down stream edges - self._subscribers = [] - self._publishers = [] - - # message queues - self._inqueue: Queue[SlackMessage] = Queue() - self._outqueue: Queue[SlackMessage] = Queue() - - # handler thread - self._running: bool = False - self._thread: Thread = None - - # lookups for mentions and redirection - self._room_id_to_room_name: Dict[str, str] = {} - self._room_id_to_room_type: Dict[str, str] = {} - self._room_name_to_room_id: Dict[str, str] = {} - self._user_id_to_user_name: Dict[str, str] = {} - self._user_id_to_user_email: Dict[str, str] = {} - self._user_name_to_user_id: Dict[str, str] = {} - self._user_email_to_user_id: Dict[str, str] = {} - - def subscribe(self): - return _slack_input_adapter(self, push_mode=csp.PushMode.NON_COLLAPSING) - - def publish(self, msg: ts[SlackMessage]): - return _slack_output_adapter(self, msg) - - def _create(self, engine, memo): - # We'll avoid having a second class and make our AdapterManager and AdapterManagerImpl the same - super().__init__(engine) - return self - - def start(self, starttime, endtime): - self._running = True - self._thread = threading.Thread(target=self._run, daemon=True) - self._thread.start() - - def stop(self): - if self._running: - self._running = False - self._slack_client.close() - self._thread.join() - - def register_subscriber(self, adapter): - if adapter not in self._subscribers: - self._subscribers.append(adapter) - - def register_publisher(self, adapter): - if adapter not in self._publishers: - self._publishers.append(adapter) - - def _get_user_from_id(self, user_id): - # try to pull from cache - name = self._user_id_to_user_name.get(user_id, None) - email = self._user_id_to_user_email.get(user_id, None) - - # if none, refresh data via web client - if name is None or email is None: - ret = self._slack_client.web_client.users_info(user=user_id) - if ret.status_code == 200: - # TODO OAuth scopes required - name = ret.data["user"]["profile"].get("real_name_normalized", ret.data["user"]["name"]) - email = ret.data["user"]["profile"]["email"] - self._user_id_to_user_name[user_id] = name - self._user_name_to_user_id[name] = user_id # TODO is this 1-1 in slack? - self._user_id_to_user_email[user_id] = email - self._user_email_to_user_id[email] = user_id - return name, email - - def _get_user_from_name(self, user_name): - # try to pull from cache - user_id = self._user_name_to_user_id.get(user_name, None) - - # if none, refresh data via web client - if user_id is None: - # unfortunately the reverse lookup is not super nice... - # we need to pull all users and build the reverse mapping - ret = self._slack_client.web_client.users_list() - if ret.status_code == 200: - # TODO OAuth scopes required - for user in ret.data["members"]: - name = user["profile"].get("real_name_normalized", user["name"]) - user_id = user["profile"]["id"] - email = user["profile"]["email"] - self._user_id_to_user_name[user_id] = name - self._user_name_to_user_id[name] = user_id # TODO is this 1-1 in slack? - self._user_id_to_user_email[user_id] = email - self._user_email_to_user_id[email] = user_id - return self._user_name_to_user_id.get(user_name, None) - return user_id - - def _channel_data_to_channel_kind(self, data) -> str: - if data.get("is_im", False): - return "message" - if data.get("is_private", False): - return "private" - return "public" - - def _get_channel_from_id(self, channel_id): - # try to pull from cache - name = self._room_id_to_room_name.get(channel_id, None) - kind = self._room_id_to_room_type.get(channel_id, None) - - # if none, refresh data via web client - if name is None: - ret = self._slack_client.web_client.conversations_info(channel=channel_id) - if ret.status_code == 200: - # TODO OAuth scopes required - kind = self._channel_data_to_channel_kind(ret.data["channel"]) - if kind == "message": - # TODO use same behavior as symphony adapter - name = "DM" - else: - name = ret.data["channel"]["name"] - - self._room_id_to_room_name[channel_id] = name - self._room_name_to_room_id[name] = channel_id - self._room_id_to_room_type[channel_id] = kind - return name, kind - - def _get_channel_from_name(self, channel_name): - # try to pull from cache - channel_id = self._room_name_to_room_id.get(channel_name, None) - - # if none, refresh data via web client - if channel_id is None: - # unfortunately the reverse lookup is not super nice... - # we need to pull all channels and build the reverse mapping - ret = self._slack_client.web_client.conversations_list() - if ret.status_code == 200: - # TODO OAuth scopes required - for channel in ret.data["channels"]: - name = channel["name"] - channel_id = channel["id"] - kind = self._channel_data_to_channel_kind(channel) - self._room_id_to_room_name[channel_id] = name - self._room_name_to_room_id[name] = channel_id - self._room_id_to_room_type[channel_id] = kind - return self._room_name_to_room_id.get(channel_name, None) - return channel_id - - def _get_tags_from_message(self, blocks, authorizations=None) -> List[str]: - """extract tags from message, potentially excluding the bot's own @""" - authorizations = authorizations or [] - if len(authorizations) > 0: - bot_id = authorizations[0]["user_id"] # TODO more than one? - else: - bot_id = "" - - tags = [] - to_search = blocks.copy() - - while to_search: - element = to_search.pop() - # add subsections - if element.get("elements", []): - to_search.extend(element.get("elements")) - - if element.get("type", "") == "user": - tag_id = element.get("user_id") - if tag_id != bot_id: - # TODO tag with id or with name? - name, _ = self._get_user_from_id(tag_id) - if name: - tags.append(name) - return tags - - def _process_slack_message(self, client: SocketModeClient, req: SocketModeRequest): - log.info(req.payload) - if req.type == "events_api": - # Acknowledge the request anyway - response = SocketModeResponse(envelope_id=req.envelope_id) - client.send_socket_mode_response(response) - - if ( - req.payload["event"]["type"] in ("message", "app_mention") - and req.payload["event"].get("subtype") is None - ): - user, user_email = self._get_user_from_id(req.payload["event"]["user"]) - channel, channel_type = self._get_channel_from_id(req.payload["event"]["channel"]) - tags = self._get_tags_from_message(req.payload["event"]["blocks"], req.payload["authorizations"]) - slack_msg = SlackMessage( - user=user or "", - user_email=user_email or "", - user_id=req.payload["event"]["user"], - tags=tags, - channel=channel or "", - channel_id=req.payload["event"]["channel"], - channel_type=channel_type or "", - msg=req.payload["event"]["text"], - reaction="", - thread=req.payload["event"]["ts"], - payload=req.payload.copy(), - ) - self._inqueue.put(slack_msg) - - def _run(self): - self._slack_client.connect() - - while self._running: - # drain outbound - while not self._outqueue.empty(): - # pull SlackMessage from queue - slack_msg = self._outqueue.get() - - # refactor into slack command - # grab channel or DM - if hasattr(slack_msg, "channel_id") and slack_msg.channel_id: - channel_id = slack_msg.channel_id - elif hasattr(slack_msg, "channel") and slack_msg.channel: - # TODO DM - channel_id = self._get_channel_from_name(slack_msg.channel) - - # pull text or reaction - if ( - hasattr(slack_msg, "reaction") - and slack_msg.reaction - and hasattr(slack_msg, "thread") - and slack_msg.thread - ): - # TODO - self._slack_client.web_client.reactions_add( - channel=channel_id, - name=slack_msg.reaction, - timestamp=slack_msg.thread, - ) - elif hasattr(slack_msg, "msg") and slack_msg.msg: - try: - # send text to channel - self._slack_client.web_client.chat_postMessage( - channel=channel_id, - text=getattr(slack_msg, "msg", ""), - ) - except SlackApiError: - # TODO - ... - else: - # cannot send empty message, log an error - log.error(f"Received malformed SlackMessage instance: {slack_msg}") - - if not self._inqueue.empty(): - # pull all SlackMessages from queue - # do as burst to match SymphonyAdapter - slack_msgs = [] - while not self._inqueue.empty(): - slack_msgs.append(self._inqueue.get()) - - # push to all the subscribers - for adapter in self._subscribers: - adapter.push_tick(slack_msgs) - - # do short sleep - sleep(0.1) - - # liveness check - if not self._thread.is_alive(): - self._running = False - self._thread.join() - - # shut down socket client - try: - # TODO which one? - self._slack_client.close() - # self._slack_client.disconnect() - except AttributeError: - # TODO bug in slack sdk causes an exception to be thrown - # File "slack_sdk/socket_mode/builtin/connection.py", line 191, in disconnect - # self.sock.close() - # ^^^^^^^^^^^^^^^ - # AttributeError: 'NoneType' object has no attribute 'close' - ... - - def _on_tick(self, value): - self._outqueue.put(value) - - -class SlackInputAdapterImpl(PushInputAdapter): - def __init__(self, manager): - manager.register_subscriber(self) - super().__init__() - - -class SlackOutputAdapterImpl(OutputAdapter): - def __init__(self, manager): - manager.register_publisher(self) - self._manager = manager - super().__init__() - - def on_tick(self, time, value): - self._manager._on_tick(value) - - -_slack_input_adapter = py_push_adapter_def( - name="SlackInputAdapter", - adapterimpl=SlackInputAdapterImpl, - out_type=ts[[SlackMessage]], - manager_type=SlackAdapterManager, -) -_slack_output_adapter = py_output_adapter_def( - name="SlackOutputAdapter", - adapterimpl=SlackOutputAdapterImpl, - manager_type=SlackAdapterManager, - input=ts[SlackMessage], -) + raise ModuleNotFoundError("Install `csp-adapter-slack` to use csp's Slack adapter") diff --git a/csp/adapters/websocket.py b/csp/adapters/websocket.py index 384e4a251..92e7cf0cf 100644 --- a/csp/adapters/websocket.py +++ b/csp/adapters/websocket.py @@ -1,11 +1,10 @@ import logging import math import threading -import typing import urllib from collections import defaultdict from datetime import date, datetime, timedelta -from typing import Dict, List +from typing import Dict, List, Optional, TypeVar, Union import csp from csp import ts @@ -31,7 +30,7 @@ RawBytesMessageMapper, RawTextMessageMapper, ) -T = typing.TypeVar("T") +T = TypeVar("T") try: @@ -331,7 +330,7 @@ def __init__(self, name, index): def publish( self, value: ts[object], - field_map: typing.Union[typing.Dict[str, str], str, None] = None, + field_map: Union[Dict[str, str], str, None] = None, ): """ :param value - timeseries to publish onto this table @@ -345,7 +344,7 @@ def publish( raise TypeError("Expected type str for field_map on single column publish, got %s" % type(field_map)) self._publish_field(value, field_map) - def _publish_struct(self, value: ts[csp.Struct], field_map: typing.Optional[typing.Dict[str, str]]): + def _publish_struct(self, value: ts[csp.Struct], field_map: Optional[Dict[str, str]]): field_map = field_map or {k: k for k in value.tstype.typ.metadata()} for k, v in field_map.items(): self._publish_field(getattr(value, k), v) @@ -427,7 +426,7 @@ def subscribe( self, ts_type: type, msg_mapper: MsgMapper, - field_map: typing.Union[dict, str] = None, + field_map: Union[dict, str] = None, meta_field_map: dict = None, push_mode: csp.PushMode = csp.PushMode.NON_COLLAPSING, ): @@ -445,6 +444,7 @@ def subscribe( return _websocket_input_adapter_def(self, ts_type, properties, push_mode=push_mode) + def send(self, x: ts["T"]): return _websocket_output_adapter_def(self, x) diff --git a/csp/baselib.py b/csp/baselib.py index bf620fb2c..74e874b5e 100644 --- a/csp/baselib.py +++ b/csp/baselib.py @@ -6,8 +6,8 @@ import pytz import queue import threading -import typing from datetime import datetime, timedelta +from typing import Callable, Dict, List, Optional, TypeVar, Union import csp from csp.impl.__cspimpl import _cspimpl @@ -63,11 +63,11 @@ "wrap_feedback", ] -T = typing.TypeVar("T") -K = typing.TypeVar("K") -V = typing.TypeVar("V") -Y = typing.TypeVar("Y") -U = typing.TypeVar("U") +T = TypeVar("T") +K = TypeVar("K") +V = TypeVar("V") +Y = TypeVar("Y") +U = TypeVar("U") const = input_adapter_def("csp.const", _cspimpl._const, ts["T"], value="~T", delay=(timedelta, timedelta())) _timer = input_adapter_def( @@ -156,13 +156,13 @@ def get_instance(cls): @node -def _list_basket_to_string_ts(x: [ts["T"]]) -> ts[str]: +def _list_basket_to_string_ts(x: List[ts["T"]]) -> ts[str]: value = ",".join([str(x[i]) if csp.ticked(x[i]) else "" for i in range(len(x))]) return f"[{value}]" @node -def _dict_basket_to_string_ts(x: {"K": ts[object]}) -> ts[str]: +def _dict_basket_to_string_ts(x: Dict["K", ts[object]]) -> ts[str]: return str({k: x[k] for k in x.tickedkeys()}) @@ -204,7 +204,7 @@ def log( level: int, tag: str, x, - logger: typing.Optional[logging.Logger] = None, + logger: Optional[logging.Logger] = None, logger_tz: object = None, use_thread: bool = False, ): @@ -243,7 +243,7 @@ def _log_ts( level: int, tag: str, x: ts["T"], - logger: typing.Optional[logging.Logger] = None, + logger: Optional[logging.Logger] = None, logger_tz: object = None, use_thread: bool = False, ): @@ -274,8 +274,8 @@ def _log_ts( @graph -def get_basket_field(dict_basket: {"K": ts["V"]}, field_name: str) -> OutputBasket( - {"K": ts[object]}, shape_of="dict_basket" +def get_basket_field(dict_basket: Dict["K", ts["V"]], field_name: str) -> OutputBasket( + Dict["K", ts[object]], shape_of="dict_basket" ): """Given a dict basket of Struct objects, get a dict basket of the given field of struct for the matching key @@ -343,7 +343,7 @@ def _delay_by_ticks(x: ts["T"], delay: int) -> ts["T"]: @graph -def delay(x: ts["T"], delay: typing.Union[timedelta, int]) -> ts["T"]: +def delay(x: ts["T"], delay: Union[timedelta, int]) -> ts["T"]: """delay input ticks by given delay""" if isinstance(delay, int): return _delay_by_ticks(x, delay) @@ -352,7 +352,7 @@ def delay(x: ts["T"], delay: typing.Union[timedelta, int]) -> ts["T"]: @graph -def _lag(x: ts["T"], lag: typing.Union[timedelta, int]) -> ts["T"]: +def _lag(x: ts["T"], lag: Union[timedelta, int]) -> ts["T"]: """ticks when input ticks, but with lagged value of input""" if isinstance(lag, int): return _delay_by_ticks(x, lag) @@ -361,7 +361,7 @@ def _lag(x: ts["T"], lag: typing.Union[timedelta, int]) -> ts["T"]: @graph -def diff(x: ts["T"], lag: typing.Union[timedelta, int]) -> ts["T"]: +def diff(x: ts["T"], lag: Union[timedelta, int]) -> ts["T"]: """diff x against itself lag time/ticks ago""" return x - _lag(x, lag) @@ -396,7 +396,7 @@ def cast_int_to_float(x: ts[int]) -> ts[float]: @node() -def apply(x: ts["T"], f: typing.Callable[["T"], "U"], result_type: "U") -> ts["U"]: +def apply(x: ts["T"], f: Callable[["T"], "U"], result_type: "U") -> ts["U"]: """ :param x: The time series on which the function should be applied :param f: A scalar function that will be applied on each value of x @@ -461,7 +461,7 @@ def drop_nans(x: ts[float]) -> ts[float]: @node(cppimpl=_cspbaselibimpl.unroll) -def unroll(x: ts[["T"]]) -> ts["T"]: +def unroll(x: ts[List["T"]]) -> ts["T"]: """ "unrolls" timeseries of lists of type 'T' into individual ticks of type 'T'""" with csp.alarms(): alarm = csp.alarm("T") @@ -484,14 +484,14 @@ def unroll(x: ts[["T"]]) -> ts["T"]: @node(cppimpl=_cspbaselibimpl.collect) -def collect(x: [ts["T"]]) -> ts[["T"]]: +def collect(x: List[ts["T"]]) -> ts[List["T"]]: """convert basket of timeseries into timeseries of list of ticked values""" if csp.ticked(x): return list(x.tickedvalues()) @graph -def flatten(x: [ts["T"]]) -> ts["T"]: +def flatten(x: List[ts["T"]]) -> ts["T"]: """flatten a basket of inputs into ts[ 'T' ]""" # Minor optimization, if we have a list with just # a single ts, then just emit it as-is. Otherwise, @@ -504,7 +504,7 @@ def flatten(x: [ts["T"]]) -> ts["T"]: # TODO cppimpl @node -def gate(x: ts["T"], release: ts[bool], release_on_tick: bool = False) -> ts[["T"]]: +def gate(x: ts["T"], release: ts[bool], release_on_tick: bool = False) -> ts[List["T"]]: """ "gate" the input. if release is false, input will be held until release is true. when release ticks true, all gated inputs will tick in one shot @@ -551,7 +551,9 @@ def null_ts(typ: "T") -> ts["T"]: @node(cppimpl=_cspbaselibimpl.multiplex) -def multiplex(x: {"K": ts["T"]}, key: ts["K"], tick_on_index: bool = False, raise_on_bad_key: bool = False) -> ts["T"]: +def multiplex( + x: Dict["K", ts["T"]], key: ts["K"], tick_on_index: bool = False, raise_on_bad_key: bool = False +) -> ts["T"]: """ :param x: The basket of time series to multiplex :param key: A @@ -578,8 +580,8 @@ def multiplex(x: {"K": ts["T"]}, key: ts["K"], tick_on_index: bool = False, rais @node(cppimpl=_cspbaselibimpl.demultiplex) -def demultiplex(x: ts["T"], key: ts["K"], keys: ["K"], raise_on_bad_key: bool = False) -> OutputBasket( - {"K": ts["T"]}, shape="keys" +def demultiplex(x: ts["T"], key: ts["K"], keys: List["K"], raise_on_bad_key: bool = False) -> OutputBasket( + Dict["K", ts["T"]], shape="keys" ): """whenever the timeseries input ticks, output a tick on the appropriate basket output""" with csp.state(): @@ -595,7 +597,7 @@ def demultiplex(x: ts["T"], key: ts["K"], keys: ["K"], raise_on_bad_key: bool = # TODO - looks like output annotations arent working for dynamic baskets, needs to be fixed # @node(cppimpl=_cspbaselibimpl.dynamic_demultiplex) @node -def dynamic_demultiplex(x: ts["T"], key: ts["K"]) -> {ts["K"]: ts["T"]}: +def dynamic_demultiplex(x: ts["T"], key: ts["K"]) -> Dict[ts["K"], ts["T"]]: """whenever the timeseries input ticks, output a tick on the appropriate dynamic basket output""" if csp.ticked(x) and csp.valid(key): csp.output({key: x}) @@ -603,7 +605,7 @@ def dynamic_demultiplex(x: ts["T"], key: ts["K"]) -> {ts["K"]: ts["T"]}: # @node(cppimpl=_cspbaselibimpl.dynamic_collect) @node -def dynamic_collect(data: {ts["K"]: ts["V"]}) -> ts[{"K": "V"}]: +def dynamic_collect(data: Dict[ts["K"], ts["V"]]) -> ts[Dict["K", "V"]]: """whenever any input of the dynamic basket ticks, output the key-value pairs in a dictionary""" if csp.ticked(data): return dict(data.tickeditems()) @@ -622,7 +624,7 @@ def accum(x: ts["T"], start: "~T" = 0) -> ts["T"]: @node(cppimpl=_cspbaselibimpl.exprtk_impl) def _csp_exprtk_impl( expression_str: str, - inputs: {str: ts[object]}, + inputs: Dict[str, ts[object]], state_vars: dict, constants: dict, functions: dict, @@ -637,13 +639,13 @@ def _csp_exprtk_impl( @graph def exprtk( expression_str: str, - inputs: {str: ts[object]}, + inputs: Dict[str, ts[object]], state_vars: dict = {}, trigger: ts[object] = None, functions: dict = {}, constants: dict = {}, output_ndarray: bool = False, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """given a mathematical expression, and a set of timeseries corresponding to variables in that expression, tick out the result (a float) of that expression, either every time an input ticks, or on the trigger if provided. @@ -679,7 +681,7 @@ def struct_field(x: ts["T"], field: str, fieldType: "Y") -> ts["Y"]: @node(cppimpl=_cspbaselibimpl.struct_fromts) -def _struct_fromts(cls: "T", inputs: {str: ts[object]}, trigger: ts[object], use_trigger: bool) -> ts["T"]: +def _struct_fromts(cls: "T", inputs: Dict[str, ts[object]], trigger: ts[object], use_trigger: bool) -> ts["T"]: """construct a ticking Struct from the given timeseries. Note structs will be created from all valid items""" with csp.start(): @@ -690,7 +692,7 @@ def _struct_fromts(cls: "T", inputs: {str: ts[object]}, trigger: ts[object], use @graph -def struct_fromts(cls: "T", inputs: {str: ts[object]}, trigger: ts[object] = None) -> ts["T"]: +def struct_fromts(cls: "T", inputs: Dict[str, ts[object]], trigger: ts[object] = None) -> ts["T"]: """construct a ticking Struct from the given timeseries basket. Note structs will be created from all valid items. trigger - Optional timeseries to control when struct gets created ( defaults to any time a basket input ticks )""" @@ -699,7 +701,7 @@ def struct_fromts(cls: "T", inputs: {str: ts[object]}, trigger: ts[object] = Non @node(cppimpl=_cspbaselibimpl.struct_collectts) -def struct_collectts(cls: "T", inputs: {str: ts[object]}) -> ts["T"]: +def struct_collectts(cls: "T", inputs: Dict[str, ts[object]]) -> ts["T"]: """construct a ticking Struct from the given timeseries. Note structs will be created from all ticked items""" if csp.ticked(inputs): @@ -820,7 +822,7 @@ def __init__(self, ts_type, default_to_null: bool = False): """ super().__init__() self._inputs = [] - self._output = DelayedEdge(ts[[ts_type]], default_to_null) + self._output = DelayedEdge(ts[List[ts_type]], default_to_null) def copy(self): res = DelayedCollect() @@ -838,7 +840,7 @@ def add_input(self, x: ts["T"]): self._inputs.append(x) def output(self): - """returns collected inputs as ts[ typing.List[ input_ts_type] ]""" + """returns collected inputs as ts[ List[ input_ts_type] ]""" return self._output def _instantiate(self): diff --git a/csp/basketlib.py b/csp/basketlib.py index 2653ed7b1..bf0414c05 100644 --- a/csp/basketlib.py +++ b/csp/basketlib.py @@ -12,7 +12,7 @@ @csp.node(cppimpl=_cspbasketlibimpl._sync_list) -def sync_list(x: [ts["T"]], threshold: timedelta, output_incomplete: bool = True) -> csp.OutputBasket( +def sync_list(x: List[ts["T"]], threshold: timedelta, output_incomplete: bool = True) -> csp.OutputBasket( List[ts["T"]], shape_of="x" ): with csp.alarms(): @@ -37,7 +37,7 @@ def sync_list(x: [ts["T"]], threshold: timedelta, output_incomplete: bool = True @csp.graph -def sync_dict(x: {"K": ts["T"]}, threshold: timedelta, output_incomplete: bool = True) -> csp.OutputBasket( +def sync_dict(x: Dict["K", ts["T"]], threshold: timedelta, output_incomplete: bool = True) -> csp.OutputBasket( Dict["K", ts["T"]], shape_of="x" ): values = list(x.values()) @@ -54,7 +54,7 @@ def sync(x, threshold: timedelta, output_incomplete: bool = True): @csp.node(cppimpl=_cspbasketlibimpl._sample_list) -def sample_list(trigger: ts["Y"], x: [ts["T"]]) -> csp.OutputBasket(List[ts["T"]], shape_of="x"): +def sample_list(trigger: ts["Y"], x: List[ts["T"]]) -> csp.OutputBasket(List[ts["T"]], shape_of="x"): """will return valid items in x on trigger""" with csp.start(): csp.make_passive(x) @@ -66,7 +66,7 @@ def sample_list(trigger: ts["Y"], x: [ts["T"]]) -> csp.OutputBasket(List[ts["T"] @csp.graph() -def sample_dict(trigger: ts["Y"], x: {"K": ts["T"]}) -> csp.OutputBasket(Dict["K", ts["T"]], shape_of="x"): +def sample_dict(trigger: ts["Y"], x: Dict["K", ts["T"]]) -> csp.OutputBasket(Dict["K", ts["T"]], shape_of="x"): """will return valid items in x on trigger""" values = list(x.values()) sampled_values = sample_list(trigger, values) diff --git a/csp/build/csp_autogen.py b/csp/build/csp_autogen.py index 4e121da5b..71b570dd3 100644 --- a/csp/build/csp_autogen.py +++ b/csp/build/csp_autogen.py @@ -152,7 +152,7 @@ def _generate_enum_class(self, enum_type): cspenum_decls = "\n".join(f" static {enum_name} {x.name};" for x in enum_type) out = f""" -class {enum_name} : public csp::CspEnum +class CSP_PUBLIC {enum_name} : public csp::CspEnum {{ public: // Raw value quick access @@ -315,7 +315,7 @@ def _generate_struct_class(self, struct_type): ) out = f""" -class {struct_name} : public {base_class} +class CSP_PUBLIC {struct_name} : public {base_class} {{ public: diff --git a/csp/curve.py b/csp/curve.py index d4b149db3..32d4032ec 100644 --- a/csp/curve.py +++ b/csp/curve.py @@ -1,8 +1,8 @@ import copy import numpy as np import pytz -import typing from datetime import timedelta +from typing import Union from csp import null_ts from csp.impl.__cspimpl import _cspimpl @@ -48,7 +48,7 @@ def next(self): ) -def curve(typ: type, data: typing.Union[list, tuple], push_mode: PushMode = PushMode.NON_COLLAPSING): +def curve(typ: type, data: Union[list, tuple], push_mode: PushMode = PushMode.NON_COLLAPSING): if isinstance(data, tuple): if len(data) != 2 or not all(isinstance(x, np.ndarray) for x in data): raise ValueError("for numpy curves, must pass tuple of two ndarrays as data") diff --git a/csp/dataframe.py b/csp/dataframe.py index 0fb2bdd17..8aba3814c 100644 --- a/csp/dataframe.py +++ b/csp/dataframe.py @@ -1,5 +1,5 @@ -import typing from datetime import datetime, timedelta +from typing import Dict, Optional import csp.baselib from csp.impl.wiring.edge import Edge @@ -9,7 +9,7 @@ class DataFrame: - def __init__(self, data: typing.Optional[typing.Dict] = None): + def __init__(self, data: Optional[Dict] = None): self._data = data or {} self._columns = list(self._data.keys()) diff --git a/csp/impl/pandas_accessor.py b/csp/impl/pandas_accessor.py index 13445c4b8..3d7a0eb1b 100644 --- a/csp/impl/pandas_accessor.py +++ b/csp/impl/pandas_accessor.py @@ -15,14 +15,14 @@ @csp.node -def _basket_valid(xs: [ts[object]]) -> ts[bool]: +def _basket_valid(xs: List[ts[object]]) -> ts[bool]: if csp.valid(xs): csp.make_passive(xs) return True @csp.node -def _basket_synchronize(xs: [ts["T"]], threshold: timedelta) -> csp.OutputBasket(List[ts["T"]], shape_of="xs"): +def _basket_synchronize(xs: List[ts["T"]], threshold: timedelta) -> csp.OutputBasket(List[ts["T"]], shape_of="xs"): with csp.alarms(): a_end = csp.alarm(bool) @@ -662,7 +662,7 @@ def __call__(self, columns=None, agg="last", drop_na=False): @csp.node -def _collect_numpy(x: [ts[object]], dim: int) -> ts[object]: +def _collect_numpy(x: List[ts[object]], dim: int) -> ts[object]: with csp.state(): s_array = np.array([np.nan for _ in range(dim)], dtype=object) diff --git a/csp/impl/pandas_ext_type.py b/csp/impl/pandas_ext_type.py index 47e9a96d4..c4c681002 100644 --- a/csp/impl/pandas_ext_type.py +++ b/csp/impl/pandas_ext_type.py @@ -556,7 +556,7 @@ def _unary_op(x: ts["T"], op: object) -> ts["T"]: @node -def _reduce(x: [ts["T"]], typ: "T", func: object, args: object = (), kwargs: object = {}) -> ts["T"]: +def _reduce(x: List[ts["T"]], typ: "T", func: object, args: object = (), kwargs: object = {}) -> ts["T"]: # The choice was made to only emit values if all basket elements are valid. # If one wanted to reduce only over valid elements, then in many cases you could pre-apply csp.default # with a nan/sentinal value, and then apply a function which ignores these values @@ -578,14 +578,14 @@ def _reduce(x: [ts["T"]], typ: "T", func: object, args: object = (), kwargs: obj @node -def _reduce_float(x: [ts["T"]], typ: "T", func: object, args: object = (), kwargs: object = {}) -> ts[float]: +def _reduce_float(x: List[ts["T"]], typ: "T", func: object, args: object = (), kwargs: object = {}) -> ts[float]: if csp.valid(x): data = np.fromiter(x.validvalues(), dtype=typ) return func(data, *args, **kwargs) @node -def _reduce_bool(x: [ts["T"]], typ: "T", func: object, args: object = (), kwargs: object = {}) -> ts[bool]: +def _reduce_bool(x: List[ts["T"]], typ: "T", func: object, args: object = (), kwargs: object = {}) -> ts[bool]: if csp.valid(x): data = np.fromiter(x.validvalues(), dtype=typ) return bool(func(data, *args, **kwargs)) diff --git a/csp/impl/struct.py b/csp/impl/struct.py index 502df68fc..777cca8d4 100644 --- a/csp/impl/struct.py +++ b/csp/impl/struct.py @@ -224,7 +224,7 @@ def __deepcopy__(self, memodict={}): return self.deepcopy() def __dir__(self): - return self.__full_metadata_typed__.keys() + return sorted(super().__dir__() + list(self.__full_metadata_typed__.keys())) def define_struct(name, metadata: dict, defaults: dict = {}, base=Struct): diff --git a/csp/impl/types/container_type_normalizer.py b/csp/impl/types/container_type_normalizer.py index b4bb4a327..ab7c94fd4 100644 --- a/csp/impl/types/container_type_normalizer.py +++ b/csp/impl/types/container_type_normalizer.py @@ -27,6 +27,11 @@ def _convert_containers_to_typing_generic_meta(cls, typ, is_within_container): return typ # cls._deep_convert_generic_meta_to_typing_generic_meta(typ, is_within_container) elif isinstance(typ, dict): + # warn( + # "Using {K: V} syntax for type declaration is deprecated. Use Dict[K, V] instead.", + # DeprecationWarning, + # stacklevel=4, + # ) if type(typ) is not dict or len(typ) != 1: # noqa: E721 raise TypeError(f"Invalid type decorator: '{typ}'") t1, t2 = typ.items().__iter__().__next__() @@ -35,11 +40,21 @@ def _convert_containers_to_typing_generic_meta(cls, typ, is_within_container): cls._convert_containers_to_typing_generic_meta(t2, True), ] elif isinstance(typ, set): + # warn( + # "Using {T} syntax for type declaration is deprecated. Use Set[T] instead.", + # DeprecationWarning, + # stacklevel=4, + # ) if type(typ) is not set or len(typ) != 1: # noqa: E721 raise TypeError(f"Invalid type decorator: '{typ}'") t = typ.__iter__().__next__() return typing.Set[cls._convert_containers_to_typing_generic_meta(t, True)] elif isinstance(typ, list): + # warn( + # "Using [T] syntax for type declaration is deprecated. Use List[T] instead.", + # DeprecationWarning, + # stacklevel=4, + # ) if type(typ) is not list or len(typ) != 1: # noqa: E721 raise TypeError(f"Invalid type decorator: '{typ}'") t = typ.__iter__().__next__() diff --git a/csp/impl/wiring/adapters.py b/csp/impl/wiring/adapters.py index 48d0e6ba1..794779e80 100644 --- a/csp/impl/wiring/adapters.py +++ b/csp/impl/wiring/adapters.py @@ -1,4 +1,7 @@ +import inspect from datetime import timedelta +from typing import List +from typing_extensions import override from csp.impl.__cspimpl import _cspimpl from csp.impl.mem_cache import csp_memoized_graph_object @@ -30,7 +33,7 @@ def _instantiate_impl(cls, __forced_tvars, name, args, kwargs): # Note that we augment the returned Edge to be list of expected type, but not the output def # output def remains the original type if kwargs.get("push_mode", None) == PushMode.BURST: - output.tstype = tstype.ts[[output.tstype.typ]] + output.tstype = tstype.ts[List[output.tstype.typ]] return output @@ -43,6 +46,20 @@ def __call__(cls, *args, **kwargs): def using(cls, name=None, **__forced_tvars): return lambda *args, **kwargs: cls._instantiate(__forced_tvars, name, *args, **kwargs) + @property + def __signature__(cls): + # Implement so that `help` works properly on adapter definitions. + parameters = [ + inspect.Parameter( + input_def.name, + inspect.Parameter.POSITIONAL_OR_KEYWORD, + annotation=input_def.typ, + default=cls._signature.defaults.get(input_def.name, inspect.Parameter.empty), + ) + for input_def in cls._signature.inputs + ] + return inspect.Signature(parameters) + # Every AdapterDef instance represents an instance of a wiring-time input or output adapter class AdapterDef: @@ -348,7 +365,18 @@ def impl(mgr, engine, scalars): ) -add_graph_output = output_adapter_def( +@override +def add_graph_output( + key: object, + input: tstype.ts["T"], # noqa: F821 + tick_count: int = -1, + tick_history: timedelta = timedelta(), +): + # Stub for IDE auto-complete/static type checking + ... + + +add_graph_output = output_adapter_def( # noqa: F811 "add_graph_output", _cspimpl._graph_output_adapter, key=object, diff --git a/csp/impl/wiring/signature.py b/csp/impl/wiring/signature.py index 2a3ebdbb0..29165fad5 100644 --- a/csp/impl/wiring/signature.py +++ b/csp/impl/wiring/signature.py @@ -273,3 +273,7 @@ def ts_inputs(self): @property def scalars(self): return self._scalars + + @property + def defaults(self): + return self._defaults diff --git a/csp/math.py b/csp/math.py index aeca33e02..d1415d99c 100644 --- a/csp/math.py +++ b/csp/math.py @@ -1,7 +1,7 @@ import math import numpy as np -import typing from functools import lru_cache +from typing import List, TypeVar, get_origin import csp from csp.impl.types.tstype import ts @@ -53,8 +53,8 @@ "tanh", ] -T = typing.TypeVar("T") -U = typing.TypeVar("U") +T = TypeVar("T") +U = TypeVar("U") @node(cppimpl=_cspmathimpl.bitwise_not) @@ -70,7 +70,7 @@ def not_(x: ts[bool]) -> ts[bool]: @node -def andnode(x: [ts[bool]]) -> ts[bool]: +def andnode(x: List[ts[bool]]) -> ts[bool]: if csp.valid(x): return all(x.validvalues()) @@ -82,7 +82,7 @@ def and_(*inputs): @node -def ornode(x: [ts[bool]]) -> ts[bool]: +def ornode(x: List[ts[bool]]) -> ts[bool]: if csp.valid(x): return any(x.validvalues()) @@ -270,7 +270,7 @@ def generic_type(x: ts["T"], y: ts["T"]) -> ts[generic_out_type]: return op_lambda(x, y) def comp(x: ts["T"], y: ts["U"]): - if typing.get_origin(x.tstype.typ) in [Numpy1DArray, NumpyNDArray] or typing.get_origin(y.tstype.typ) in [ + if get_origin(x.tstype.typ) in [Numpy1DArray, NumpyNDArray] or get_origin(y.tstype.typ) in [ Numpy1DArray, NumpyNDArray, ]: @@ -326,7 +326,7 @@ def generic_type(x: ts["T"]) -> ts[generic_out_type]: return op_lambda(x) def comp(x: ts["T"]): - if typing.get_origin(x.tstype.typ) in [Numpy1DArray, NumpyNDArray]: + if get_origin(x.tstype.typ) in [Numpy1DArray, NumpyNDArray]: return numpy_type(x) elif x.tstype.typ is float: return float_type(x) diff --git a/csp/profiler.py b/csp/profiler.py index 39a8adcf3..56f74825d 100644 --- a/csp/profiler.py +++ b/csp/profiler.py @@ -4,12 +4,12 @@ import pickle import sys import threading -import typing from collections import defaultdict from concurrent.futures import Future from datetime import datetime from functools import reduce from io import BytesIO +from typing import Dict, List import csp from csp.impl.genericpushadapter import GenericPushAdapter @@ -207,7 +207,7 @@ class ProfilerInfo(Struct): graph_info: GraphInfo build_time: float # seconds - def from_engine(self, p: typing.Dict): + def from_engine(self, p: Dict): """ Convert from dictionary to class repr. """ @@ -343,7 +343,7 @@ def memory_snapshot(self, max_size: int = 20): obj_by_size = sorted(obj_info.items(), key=lambda x: x[1][1], reverse=True) return total_size, obj_by_size[:max_size] - def _memory_data_as_df(self, obj_by_size: typing.List): + def _memory_data_as_df(self, obj_by_size: List): import pandas as pd names, data = zip(*obj_by_size) diff --git a/csp/stats.py b/csp/stats.py index 3b30f68bb..d357970f3 100644 --- a/csp/stats.py +++ b/csp/stats.py @@ -1,7 +1,6 @@ import numpy as np -import typing from datetime import datetime, timedelta -from typing import List, TypeVar +from typing import Any, List, Optional, TypeVar, Union import csp from csp import ts @@ -58,7 +57,7 @@ @csp.node(cppimpl=_cspstatsimpl._tick_window_updates) def _tick_window_updates( x: ts[float], interval: int, trigger: ts[object], sampler: ts[object], reset: ts[object], recalc: ts[object] -) -> csp.Outputs(additions=ts[[float]], removals=ts[[float]]): +) -> csp.Outputs(additions=ts[List[float]], removals=ts[List[float]]): raise NotImplementedError("_tick_window_updates only implemented in C++") return csp.output(additions=0, removals=0) @@ -66,7 +65,7 @@ def _tick_window_updates( @csp.node(cppimpl=_cspstatsimpl._time_window_updates) def _time_window_updates( x: ts[float], interval: timedelta, trigger: ts[object], sampler: ts[object], reset: ts[object], recalc: ts[object] -) -> csp.Outputs(additions=ts[[float]], removals=ts[[float]]): +) -> csp.Outputs(additions=ts[List[float]], removals=ts[List[float]]): raise NotImplementedError("_time_window_updates only implemented in C++") return csp.output(additions=0, removals=0) @@ -74,7 +73,7 @@ def _time_window_updates( @csp.node(cppimpl=_cspnpstatsimpl._np_tick_window_updates) def _np_tick_window_updates( x: ts[np.ndarray], interval: int, trigger: ts[object], sampler: ts[object], reset: ts[object], recalc: ts[object] -) -> csp.Outputs(additions=ts[[np.ndarray]], removals=ts[[np.ndarray]]): +) -> csp.Outputs(additions=ts[List[np.ndarray]], removals=ts[List[np.ndarray]]): raise NotImplementedError("_np_tick_window_updates only implemented in C++") return csp.output(additions=0, removals=0) @@ -87,20 +86,20 @@ def _np_time_window_updates( sampler: ts[object], reset: ts[object], recalc: ts[object], -) -> csp.Outputs(additions=ts[[np.ndarray]], removals=ts[[np.ndarray]]): +) -> csp.Outputs(additions=ts[List[np.ndarray]], removals=ts[List[np.ndarray]]): raise NotImplementedError("_np_time_window_updates only implemented in C++") return csp.output(additions=0, removals=0) @csp.graph def _window_updates( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int], + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int], trigger: ts[object], sampler: ts[object], reset: ts[object], recalc: ts[object], -) -> csp.Outputs(additions=ts[[typing.Union[float, np.ndarray]]], removals=ts[[typing.Union[float, np.ndarray]]]): +) -> csp.Outputs(additions=ts[List[Union[float, np.ndarray]]], removals=ts[List[Union[float, np.ndarray]]]): """ :param x: the time-series data :param interval: a tick or timedelta interval to calculate over @@ -140,7 +139,7 @@ def _min_hit_by_tick(x: ts["T"], min_window: int, trigger: ts[object]) -> ts[boo @csp.graph -def _min_hit(x: ts["T"], min_window: typing.Union[timedelta, int], trigger: ts[object]) -> ts[bool]: +def _min_hit(x: ts["T"], min_window: Union[timedelta, int], trigger: ts[object]) -> ts[bool]: if isinstance(min_window, int): return _min_hit_by_tick(x, min_window, trigger) return csp.const(True, delay=min_window) @@ -164,8 +163,8 @@ def _sync_nan_np(x: ts[np.ndarray], y: ts[np.ndarray]) -> csp.Outputs(x_sync=ts[ @csp.graph -def _sync_nan(x: ts[typing.Union[float, np.ndarray]], y: ts[typing.Union[float, np.ndarray]]) -> csp.Outputs( - x_sync=ts[typing.Union[float, np.ndarray]], y_sync=ts[typing.Union[float, np.ndarray]] +def _sync_nan(x: ts[Union[float, np.ndarray]], y: ts[Union[float, np.ndarray]]) -> csp.Outputs( + x_sync=ts[Union[float, np.ndarray]], y_sync=ts[Union[float, np.ndarray]] ): return _sync_nan_f(x, y) if x.tstype.typ is float else _sync_nan_np(x, y) @@ -193,7 +192,7 @@ def _np_exp(x: ts[np.ndarray]) -> ts[np.ndarray]: @csp.node(cppimpl=_cspnpstatsimpl._list_to_np) -def list_to_numpy(x: [ts[float]], fillna: bool = False) -> ts[csp.typing.Numpy1DArray[float]]: +def list_to_numpy(x: [ts[float]], fillna: bool = False) -> ts[Numpy1DArray[float]]: """ x: listbasket of floats fillna: if True, unticked values will hold their previous value in the array. @@ -353,8 +352,8 @@ def _validate_ema(alpha, span, com, halflife, adjust, horizon, recalc): @csp.node(cppimpl=_cspstatsimpl._count) def _count( - additions: ts[[float]], - removals: ts[[float]], + additions: ts[List[float]], + removals: ts[List[float]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -366,8 +365,8 @@ def _count( @csp.node(cppimpl=_cspnpstatsimpl._np_count) def _np_count( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -379,8 +378,8 @@ def _np_count( @csp.node(cppimpl=_cspstatsimpl._sum) def _sum( - additions: ts[[float]], - removals: ts[[float]], + additions: ts[List[float]], + removals: ts[List[float]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -392,8 +391,8 @@ def _sum( @csp.node(cppimpl=_cspstatsimpl._kahan_sum) def _kahan_sum( - additions: ts[[float]], - removals: ts[[float]], + additions: ts[List[float]], + removals: ts[List[float]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -405,8 +404,8 @@ def _kahan_sum( @csp.node(cppimpl=_cspnpstatsimpl._np_sum) def _np_sum( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -418,8 +417,8 @@ def _np_sum( @csp.node(cppimpl=_cspnpstatsimpl._np_kahan_sum) def _np_kahan_sum( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -431,8 +430,8 @@ def _np_kahan_sum( @csp.node(cppimpl=_cspstatsimpl._mean) def _mean( - additions: ts[[float]], - removals: ts[[float]], + additions: ts[List[float]], + removals: ts[List[float]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -444,10 +443,10 @@ def _mean( @csp.node(cppimpl=_cspstatsimpl._weighted_mean) def _weighted_mean( - x_add: ts[[float]], - x_rem: ts[[float]], - y_add: ts[[float]], - y_rem: ts[[float]], + x_add: ts[List[float]], + x_rem: ts[List[float]], + y_add: ts[List[float]], + y_rem: ts[List[float]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -459,8 +458,8 @@ def _weighted_mean( @csp.node(cppimpl=_cspnpstatsimpl._np_mean) def _np_mean( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -472,8 +471,8 @@ def _np_mean( @csp.node(cppimpl=_cspstatsimpl._var) def _var( - additions: ts[[float]], - removals: ts[[float]], + additions: ts[List[float]], + removals: ts[List[float]], trigger: ts[object], reset: ts[object], arg: int, @@ -486,8 +485,8 @@ def _var( @csp.node(cppimpl=_cspstatsimpl._sem) def _sem( - additions: ts[[float]], - removals: ts[[float]], + additions: ts[List[float]], + removals: ts[List[float]], trigger: ts[object], reset: ts[object], arg: int, @@ -500,10 +499,10 @@ def _sem( @csp.node(cppimpl=_cspstatsimpl._weighted_var) def _weighted_var( - x_add: ts[[float]], - x_rem: ts[[float]], - y_add: ts[[float]], - y_rem: ts[[float]], + x_add: ts[List[float]], + x_rem: ts[List[float]], + y_add: ts[List[float]], + y_rem: ts[List[float]], arg: int, trigger: ts[object], reset: ts[object], @@ -516,10 +515,10 @@ def _weighted_var( @csp.node(cppimpl=_cspstatsimpl._weighted_sem) def _weighted_sem( - x_add: ts[[float]], - x_rem: ts[[float]], - y_add: ts[[float]], - y_rem: ts[[float]], + x_add: ts[List[float]], + x_rem: ts[List[float]], + y_add: ts[List[float]], + y_rem: ts[List[float]], arg: int, trigger: ts[object], reset: ts[object], @@ -532,10 +531,10 @@ def _weighted_sem( @csp.node(cppimpl=_cspnpstatsimpl._np_weighted_mean) def _np_weighted_mean( - x_add: ts[[np.ndarray]], - x_rem: ts[[np.ndarray]], - w_add: ts[[np.ndarray]], - w_rem: ts[[np.ndarray]], + x_add: ts[List[np.ndarray]], + x_rem: ts[List[np.ndarray]], + w_add: ts[List[np.ndarray]], + w_rem: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -547,10 +546,10 @@ def _np_weighted_mean( @csp.node(cppimpl=_cspstatsimpl._covar) def _covar( - x_add: ts[[float]], - x_rem: ts[[float]], - y_add: ts[[float]], - y_rem: ts[[float]], + x_add: ts[List[float]], + x_rem: ts[List[float]], + y_add: ts[List[float]], + y_rem: ts[List[float]], trigger: ts[object], reset: ts[object], arg: int, @@ -563,10 +562,10 @@ def _covar( @csp.node(cppimpl=_cspstatsimpl._corr) def _corr( - x_add: ts[[float]], - x_rem: ts[[float]], - y_add: ts[[float]], - y_rem: ts[[float]], + x_add: ts[List[float]], + x_rem: ts[List[float]], + y_add: ts[List[float]], + y_rem: ts[List[float]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -578,12 +577,12 @@ def _corr( @csp.node(cppimpl=_cspstatsimpl._weighted_covar) def _weighted_covar( - x_add: ts[[float]], - x_rem: ts[[float]], - y_add: ts[[float]], - y_rem: ts[[float]], - w_add: ts[[float]], - w_rem: ts[[float]], + x_add: ts[List[float]], + x_rem: ts[List[float]], + y_add: ts[List[float]], + y_rem: ts[List[float]], + w_add: ts[List[float]], + w_rem: ts[List[float]], trigger: ts[object], reset: ts[object], arg: int, @@ -596,12 +595,12 @@ def _weighted_covar( @csp.node(cppimpl=_cspstatsimpl._weighted_corr) def _weighted_corr( - x_add: ts[[float]], - x_rem: ts[[float]], - y_add: ts[[float]], - y_rem: ts[[float]], - w_add: ts[[float]], - w_rem: ts[[float]], + x_add: ts[List[float]], + x_rem: ts[List[float]], + y_add: ts[List[float]], + y_rem: ts[List[float]], + w_add: ts[List[float]], + w_rem: ts[List[float]], trigger: ts[object], reset: ts[object], arg: int, @@ -614,8 +613,8 @@ def _weighted_corr( @csp.node(cppimpl=_cspnpstatsimpl._np_var) def _np_var( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], arg: int, @@ -628,8 +627,8 @@ def _np_var( @csp.node(cppimpl=_cspnpstatsimpl._np_sem) def _np_sem( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], arg: int, @@ -642,10 +641,10 @@ def _np_sem( @csp.node(cppimpl=_cspnpstatsimpl._np_covar) def _np_covar( - x_add: ts[[np.ndarray]], - x_rem: ts[[np.ndarray]], - w_add: ts[[np.ndarray]], - w_rem: ts[[np.ndarray]], + x_add: ts[List[np.ndarray]], + x_rem: ts[List[np.ndarray]], + w_add: ts[List[np.ndarray]], + w_rem: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], arg: int, @@ -658,10 +657,10 @@ def _np_covar( @csp.node(cppimpl=_cspnpstatsimpl._np_corr) def _np_corr( - x_add: ts[[np.ndarray]], - x_rem: ts[[np.ndarray]], - w_add: ts[[np.ndarray]], - w_rem: ts[[np.ndarray]], + x_add: ts[List[np.ndarray]], + x_rem: ts[List[np.ndarray]], + w_add: ts[List[np.ndarray]], + w_rem: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -673,10 +672,10 @@ def _np_corr( @csp.node(cppimpl=_cspnpstatsimpl._np_weighted_var) def _np_weighted_var( - x_add: ts[[np.ndarray]], - x_rem: ts[[np.ndarray]], - w_add: ts[[np.ndarray]], - w_rem: ts[[np.ndarray]], + x_add: ts[List[np.ndarray]], + x_rem: ts[List[np.ndarray]], + w_add: ts[List[np.ndarray]], + w_rem: ts[List[np.ndarray]], arg: int, trigger: ts[object], reset: ts[object], @@ -689,10 +688,10 @@ def _np_weighted_var( @csp.node(cppimpl=_cspnpstatsimpl._np_weighted_sem) def _np_weighted_sem( - x_add: ts[[np.ndarray]], - x_rem: ts[[np.ndarray]], - w_add: ts[[np.ndarray]], - w_rem: ts[[np.ndarray]], + x_add: ts[List[np.ndarray]], + x_rem: ts[List[np.ndarray]], + w_add: ts[List[np.ndarray]], + w_rem: ts[List[np.ndarray]], arg: int, trigger: ts[object], reset: ts[object], @@ -705,12 +704,12 @@ def _np_weighted_sem( @csp.node(cppimpl=_cspnpstatsimpl._np_weighted_covar) def _np_weighted_covar( - x_add: ts[[np.ndarray]], - x_rem: ts[[np.ndarray]], - y_add: ts[[np.ndarray]], - y_rem: ts[[np.ndarray]], - w_add: ts[[np.ndarray]], - w_rem: ts[[np.ndarray]], + x_add: ts[List[np.ndarray]], + x_rem: ts[List[np.ndarray]], + y_add: ts[List[np.ndarray]], + y_rem: ts[List[np.ndarray]], + w_add: ts[List[np.ndarray]], + w_rem: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], arg: int, @@ -723,12 +722,12 @@ def _np_weighted_covar( @csp.node(cppimpl=_cspnpstatsimpl._np_weighted_corr) def _np_weighted_corr( - x_add: ts[[np.ndarray]], - x_rem: ts[[np.ndarray]], - y_add: ts[[np.ndarray]], - y_rem: ts[[np.ndarray]], - w_add: ts[[np.ndarray]], - w_rem: ts[[np.ndarray]], + x_add: ts[List[np.ndarray]], + x_rem: ts[List[np.ndarray]], + y_add: ts[List[np.ndarray]], + y_rem: ts[List[np.ndarray]], + w_add: ts[List[np.ndarray]], + w_rem: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], arg: int, @@ -741,8 +740,8 @@ def _np_weighted_corr( @csp.node(cppimpl=_cspnpstatsimpl._np_cov_matrix) def _np_cov_matrix( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], ddof: int, @@ -755,8 +754,8 @@ def _np_cov_matrix( @csp.node(cppimpl=_cspnpstatsimpl._np_corr_matrix) def _np_corr_matrix( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], ddof: int, @@ -769,10 +768,10 @@ def _np_corr_matrix( @csp.node(cppimpl=_cspnpstatsimpl._np_weighted_cov_matrix) def _np_weighted_cov_matrix( - x_add: ts[[np.ndarray]], - x_rem: ts[[np.ndarray]], - w_add: ts[[float]], - w_rem: ts[[float]], + x_add: ts[List[np.ndarray]], + x_rem: ts[List[np.ndarray]], + w_add: ts[List[float]], + w_rem: ts[List[float]], trigger: ts[object], reset: ts[object], ddof: int, @@ -785,10 +784,10 @@ def _np_weighted_cov_matrix( @csp.node(cppimpl=_cspnpstatsimpl._np_weighted_corr_matrix) def _np_weighted_corr_matrix( - x_add: ts[[np.ndarray]], - x_rem: ts[[np.ndarray]], - w_add: ts[[float]], - w_rem: ts[[float]], + x_add: ts[List[np.ndarray]], + x_rem: ts[List[np.ndarray]], + w_add: ts[List[float]], + w_rem: ts[List[float]], trigger: ts[object], reset: ts[object], ddof: int, @@ -801,8 +800,8 @@ def _np_weighted_corr_matrix( @csp.node(cppimpl=_cspstatsimpl._skew) def _skew( - additions: ts[[float]], - removals: ts[[float]], + additions: ts[List[float]], + removals: ts[List[float]], trigger: ts[object], reset: ts[object], arg: bool, @@ -816,8 +815,8 @@ def _skew( @csp.node(cppimpl=_cspstatsimpl._kurt) def _kurt( - additions: ts[[float]], - removals: ts[[float]], + additions: ts[List[float]], + removals: ts[List[float]], trigger: ts[object], reset: ts[object], arg1: bool, @@ -832,10 +831,10 @@ def _kurt( @csp.node(cppimpl=_cspstatsimpl._weighted_skew) def _weighted_skew( - x_add: ts[[float]], - x_rem: ts[[float]], - y_add: ts[[float]], - y_rem: ts[[float]], + x_add: ts[List[float]], + x_rem: ts[List[float]], + y_add: ts[List[float]], + y_rem: ts[List[float]], trigger: ts[object], reset: ts[object], arg: bool, @@ -848,10 +847,10 @@ def _weighted_skew( @csp.node(cppimpl=_cspstatsimpl._weighted_kurt) def _weighted_kurt( - x_add: ts[[float]], - x_rem: ts[[float]], - y_add: ts[[float]], - y_rem: ts[[float]], + x_add: ts[List[float]], + x_rem: ts[List[float]], + y_add: ts[List[float]], + y_rem: ts[List[float]], trigger: ts[object], reset: ts[object], arg1: bool, @@ -865,8 +864,8 @@ def _weighted_kurt( @csp.node(cppimpl=_cspnpstatsimpl._np_skew) def _np_skew( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], arg: bool, @@ -880,8 +879,8 @@ def _np_skew( @csp.node(cppimpl=_cspnpstatsimpl._np_kurt) def _np_kurt( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], arg1: bool, @@ -896,10 +895,10 @@ def _np_kurt( @csp.node(cppimpl=_cspnpstatsimpl._np_weighted_skew) def _np_weighted_skew( - x_add: ts[[np.ndarray]], - x_rem: ts[[np.ndarray]], - w_add: ts[[np.ndarray]], - w_rem: ts[[np.ndarray]], + x_add: ts[List[np.ndarray]], + x_rem: ts[List[np.ndarray]], + w_add: ts[List[np.ndarray]], + w_rem: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], arg: bool, @@ -912,10 +911,10 @@ def _np_weighted_skew( @csp.node(cppimpl=_cspnpstatsimpl._np_weighted_kurt) def _np_weighted_kurt( - x_add: ts[[np.ndarray]], - x_rem: ts[[np.ndarray]], - w_add: ts[[np.ndarray]], - w_rem: ts[[np.ndarray]], + x_add: ts[List[np.ndarray]], + x_rem: ts[List[np.ndarray]], + w_add: ts[List[np.ndarray]], + w_rem: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], arg1: bool, @@ -929,8 +928,8 @@ def _np_weighted_kurt( @csp.node(cppimpl=_cspstatsimpl._first) def _first( - additions: ts[[float]], - removals: ts[[float]], + additions: ts[List[float]], + removals: ts[List[float]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -942,8 +941,8 @@ def _first( @csp.node(cppimpl=_cspnpstatsimpl._np_first) def _np_first( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -955,8 +954,8 @@ def _np_first( @csp.node(cppimpl=_cspstatsimpl._last) def _last( - additions: ts[[float]], - removals: ts[[float]], + additions: ts[List[float]], + removals: ts[List[float]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -968,8 +967,8 @@ def _last( @csp.node(cppimpl=_cspnpstatsimpl._np_last) def _np_last( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -981,8 +980,8 @@ def _np_last( @csp.node(cppimpl=_cspstatsimpl._unique) def _unique( - additions: ts[[float]], - removals: ts[[float]], + additions: ts[List[float]], + removals: ts[List[float]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -995,8 +994,8 @@ def _unique( @csp.node(cppimpl=_cspnpstatsimpl._np_unique) def _np_unique( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -1009,8 +1008,8 @@ def _np_unique( @csp.node(cppimpl=_cspstatsimpl._prod) def _prod( - additions: ts[[float]], - removals: ts[[float]], + additions: ts[List[float]], + removals: ts[List[float]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -1022,8 +1021,8 @@ def _prod( @csp.node(cppimpl=_cspnpstatsimpl._np_prod) def _np_prod( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -1035,9 +1034,9 @@ def _np_prod( @csp.node(cppimpl=_cspstatsimpl._quantile) def _quantile( - additions: ts[[float]], - removals: ts[[float]], - quants: typing.List[float], + additions: ts[List[float]], + removals: ts[List[float]], + quants: List[float], nq: int, interpolation_type: int, trigger: ts[object], @@ -1051,9 +1050,9 @@ def _quantile( @csp.node(cppimpl=_cspnpstatsimpl._np_quantile) def _np_quantile( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], - quants: typing.List[float], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], + quants: List[float], nq: int, interpolation_type: int, trigger: ts[object], @@ -1067,8 +1066,8 @@ def _np_quantile( @csp.node(cppimpl=_cspstatsimpl._min_max) def _min_max( - additions: ts[[float]], - removals: ts[[float]], + additions: ts[List[float]], + removals: ts[List[float]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -1081,8 +1080,8 @@ def _min_max( @csp.node(cppimpl=_cspnpstatsimpl._np_min_max) def _np_min_max( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -1095,8 +1094,8 @@ def _np_min_max( @csp.node(cppimpl=_cspstatsimpl._rank) def _rank( - additions: ts[[float]], - removals: ts[[float]], + additions: ts[List[float]], + removals: ts[List[float]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -1110,8 +1109,8 @@ def _rank( @csp.node(cppimpl=_cspnpstatsimpl._np_rank) def _np_rank( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object], min_data_points: int, @@ -1126,7 +1125,7 @@ def _np_rank( @csp.node(cppimpl=_cspstatsimpl._arg_min_max) def _arg_min_max( x: ts[float], - removals: ts[[float]], + removals: ts[List[float]], max: bool, recent: bool, trigger: ts[object], @@ -1142,7 +1141,7 @@ def _arg_min_max( @csp.node(cppimpl=_cspnpstatsimpl._np_arg_min_max) def _np_arg_min_max( x: ts[np.ndarray], - removals: ts[[np.ndarray]], + removals: ts[List[np.ndarray]], max: bool, recent: bool, trigger: ts[object], @@ -1157,8 +1156,8 @@ def _np_arg_min_max( @csp.node(cppimpl=_cspstatsimpl._ema_compute) def _ema_compute( - additions: ts[[float]], - removals: ts[[float]], + additions: ts[List[float]], + removals: ts[List[float]], alpha: float, ignore_na: bool, horizon: int, @@ -1173,8 +1172,8 @@ def _ema_compute( @csp.node(cppimpl=_cspnpstatsimpl._np_ema_compute) def _np_ema_compute( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], alpha: float, ignore_na: bool, horizon: int, @@ -1189,8 +1188,8 @@ def _np_ema_compute( @csp.node(cppimpl=_cspstatsimpl._ema_adjusted) def _ema_adjusted( - additions: ts[[float]], - removals: ts[[float]], + additions: ts[List[float]], + removals: ts[List[float]], alpha: float, ignore_na: bool, horizon: int, @@ -1205,8 +1204,8 @@ def _ema_adjusted( @csp.node(cppimpl=_cspnpstatsimpl._np_ema_adjusted) def _np_ema_adjusted( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], alpha: float, ignore_na: bool, horizon: int, @@ -1263,8 +1262,8 @@ def _np_ema_debias_halflife( @csp.node(cppimpl=_cspstatsimpl._ema_debias_alpha) def _ema_debias_alpha( - additions: ts[[float]], - removals: ts[[float]], + additions: ts[List[float]], + removals: ts[List[float]], alpha: float, ignore_na: bool, horizon: int, @@ -1279,8 +1278,8 @@ def _ema_debias_alpha( @csp.node(cppimpl=_cspnpstatsimpl._np_ema_debias_alpha) def _np_ema_debias_alpha( - additions: ts[[np.ndarray]], - removals: ts[[np.ndarray]], + additions: ts[List[np.ndarray]], + removals: ts[List[np.ndarray]], alpha: float, ignore_na: bool, horizon: int, @@ -1295,9 +1294,9 @@ def _np_ema_debias_alpha( @csp.graph def _ema_debias( - x: ts[typing.Union[float, np.ndarray]], - additions: ts[typing.Union[typing.List[float], typing.List[np.ndarray]]], - removals: ts[typing.Union[typing.List[float], typing.List[np.ndarray]]], + x: ts[Union[float, np.ndarray]], + additions: ts[Union[List[float], List[np.ndarray]]], + removals: ts[Union[List[float], List[np.ndarray]]], alpha: float, ignore_na: bool, adjust: bool, @@ -1307,7 +1306,7 @@ def _ema_debias( sampler: ts[object], reset: ts[object], min_data_points: int, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: if alpha: if not horizon: horizon = 0 @@ -1328,15 +1327,15 @@ def _ema_debias( @csp.node(cppimpl=_cspstatsimpl._cross_sectional_as_list) def _cross_sectional_as_list( - additions: ts[[float]], removals: ts[[float]], trigger: ts[object], reset: ts[object] -) -> ts[[float]]: + additions: ts[List[float]], removals: ts[List[float]], trigger: ts[object], reset: ts[object] +) -> ts[List[float]]: raise NotImplementedError("_cross_sectional_as_list only implemented in C++") return 0 @csp.node(cppimpl=_cspnpstatsimpl._cross_sectional_as_np) def _cross_sectional_as_np( - additions: ts[[float]], removals: ts[[float]], trigger: ts[object], reset: ts[object] + additions: ts[List[float]], removals: ts[List[float]], trigger: ts[object], reset: ts[object] ) -> ts[np.ndarray]: raise NotImplementedError("_cross_sectional_as_np only implemented in C++") return 0 @@ -1344,15 +1343,15 @@ def _cross_sectional_as_np( @csp.node(cppimpl=_cspnpstatsimpl._np_cross_sectional_as_list) def _np_cross_sectional_as_list( - additions: ts[[np.ndarray]], removals: ts[[np.ndarray]], trigger: ts[object], reset: ts[object] -) -> ts[[np.ndarray]]: + additions: ts[List[np.ndarray]], removals: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object] +) -> ts[List[np.ndarray]]: raise NotImplementedError("_np_cross_sectional_as_list only implemented in C++") return 0 @csp.node(cppimpl=_cspnpstatsimpl._np_cross_sectional_as_np) def _np_cross_sectional_as_np( - additions: ts[[np.ndarray]], removals: ts[[np.ndarray]], trigger: ts[object], reset: ts[object] + additions: ts[List[np.ndarray]], removals: ts[List[np.ndarray]], trigger: ts[object], reset: ts[object] ) -> ts[np.ndarray]: raise NotImplementedError("_np_cross_sectional_as_np only implemented in C++") return 0 @@ -1364,7 +1363,7 @@ def _np_cross_sectional_as_np( @csp.graph -def _execute_stats(edge: typing.Any = None, min_hit: ts[bool] = None) -> ts[typing.Union[float, datetime, np.ndarray]]: +def _execute_stats(edge: Any = None, min_hit: ts[bool] = None) -> ts[Union[float, datetime, np.ndarray]]: # only filter on min_hit if we need to if min_hit is not None: edge = csp.filter(min_hit, edge) @@ -1373,9 +1372,9 @@ def _execute_stats(edge: typing.Any = None, min_hit: ts[bool] = None) -> ts[typi @csp.graph def _arg_minmax( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, return_most_recent: bool = True, ignore_na: bool = True, trigger: ts[object] = None, @@ -1383,7 +1382,7 @@ def _arg_minmax( reset: ts[object] = None, min_data_points: int = 0, max: bool = True, -) -> ts[typing.Union[datetime, np.ndarray]]: +) -> ts[Union[datetime, np.ndarray]]: series, interval, min_window, trigger, min_hit, updates, sampler, reset, _, _, _ = _setup( x, interval, min_window, trigger, sampler, reset ) @@ -1408,15 +1407,15 @@ def _arg_minmax( @csp.graph def count( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, ignore_na: bool = True, trigger: ts[object] = None, sampler: ts[object] = None, reset: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the count of (non-nan) ticks in the window, either including/ignoring nan values. @@ -1451,15 +1450,15 @@ def count( @csp.graph def unique( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, trigger: ts[object] = None, sampler: ts[object] = None, reset: ts[object] = None, min_data_points: int = 0, precision: int = 10, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the number of unique non-nan values in the current window. @@ -1493,15 +1492,15 @@ def unique( @csp.graph def first( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, trigger: ts[object] = None, sampler: ts[object] = None, reset: ts[object] = None, min_data_points: int = 0, ignore_na: bool = True, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the first non-nan value currently within the window. @@ -1534,15 +1533,15 @@ def first( @csp.graph def last( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, ignore_na: bool = True, trigger: ts[object] = None, sampler: ts[object] = None, reset: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the last value currently within the window. @@ -1576,18 +1575,18 @@ def last( @csp.graph def sum( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, precise: bool = False, ignore_na: bool = True, trigger: ts[object] = None, - weights: ts[typing.Union[float, np.ndarray]] = None, + weights: ts[Union[float, np.ndarray]] = None, sampler: ts[object] = None, reset: ts[object] = None, recalc: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the sum of values over a given window. @@ -1633,17 +1632,17 @@ def sum( @csp.graph def mean( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, ignore_na: bool = True, trigger: ts[object] = None, - weights: ts[typing.Union[float, np.ndarray]] = None, + weights: ts[Union[float, np.ndarray]] = None, sampler: ts[object] = None, reset: ts[object] = None, recalc: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the mean over a rolling window. @@ -1705,16 +1704,16 @@ def mean( @csp.graph def prod( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, ignore_na: bool = True, trigger: ts[object] = None, sampler: ts[object] = None, reset: ts[object] = None, recalc: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the product over a rolling window. @@ -1747,10 +1746,10 @@ def prod( # Not a graph since it has two different return types: list-basket and time-series def quantile( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - quant: typing.Union[float, typing.List[float]] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + quant: Union[float, List[float]] = None, + min_window: Union[timedelta, int] = None, interpolate: str = "linear", ignore_na: bool = True, trigger: ts[object] = None, @@ -1853,10 +1852,10 @@ def quantile( def min_max( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, max: bool = True, - min_window: typing.Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, ignore_na: bool = True, trigger: ts[object] = None, sampler: ts[object] = None, @@ -1877,15 +1876,15 @@ def min_max( @csp.graph def max( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, ignore_na: bool = True, trigger: ts[object] = None, sampler: ts[object] = None, reset: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the maximum value within a given window. @@ -1908,15 +1907,15 @@ def max( @csp.graph def min( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, ignore_na: bool = True, trigger: ts[object] = None, sampler: ts[object] = None, reset: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the minimum value within a given window. @@ -1939,9 +1938,9 @@ def min( @csp.graph def rank( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, method: str = "min", ignore_na: bool = True, trigger: ts[object] = None, @@ -1949,7 +1948,7 @@ def rank( reset: ts[object] = None, min_data_points: int = 0, na_option: str = "keep", -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the rank (0-indexed) of the last tick in relation to all other values in the interval. @@ -2000,16 +1999,16 @@ def rank( @csp.graph def argmax( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, return_most_recent: bool = True, ignore_na: bool = True, trigger: ts[object] = None, sampler: ts[object] = None, reset: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[datetime, np.ndarray]]: +) -> ts[Union[datetime, np.ndarray]]: """ Returns the datetime at which the maximum value in the interval ticked. @@ -2033,16 +2032,16 @@ def argmax( @csp.graph def argmin( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, return_most_recent: bool = True, ignore_na: bool = True, trigger: ts[object] = None, sampler: ts[object] = None, reset: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[datetime, np.ndarray]]: +) -> ts[Union[datetime, np.ndarray]]: """ Returns the datetime at which the minimum value in the interval ticked. @@ -2066,15 +2065,15 @@ def argmin( @csp.graph def gmean( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, ignore_na: bool = True, trigger: ts[object] = None, sampler: ts[object] = None, reset: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the geometric mean of a strictly positive time series over a rolling window. @@ -2122,15 +2121,15 @@ def gmean( @csp.graph def median( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, ignore_na: bool = True, trigger: ts[object] = None, sampler: ts[object] = None, reset: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the median value in the given window. @@ -2169,19 +2168,19 @@ def median( @csp.graph def cov( - x: ts[typing.Union[float, np.ndarray]], - y: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + y: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, ddof: int = 1, ignore_na: bool = True, trigger: ts[object] = None, - weights: ts[typing.Union[float, np.ndarray]] = None, + weights: ts[Union[float, np.ndarray]] = None, sampler: ts[object] = None, reset: ts[object] = None, recalc: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the covariance between two in-sequence time-series within the given window. If the time-series are of type np.ndarray, the covariance is calculated elementwise. @@ -2284,8 +2283,8 @@ def cov( @csp.graph def cov_matrix( x: ts[np.ndarray], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, ddof: int = 1, ignore_na: bool = True, trigger: ts[object] = None, @@ -2342,18 +2341,18 @@ def cov_matrix( @csp.graph def var( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, ddof: int = 1, ignore_na: bool = True, trigger: ts[object] = None, - weights: ts[typing.Union[float, np.ndarray]] = None, + weights: ts[Union[float, np.ndarray]] = None, sampler: ts[object] = None, reset: ts[object] = None, recalc: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the variance within the given window. @@ -2419,18 +2418,18 @@ def var( @csp.graph def stddev( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, ddof: int = 1, ignore_na: bool = True, trigger: ts[object] = None, - weights: ts[typing.Union[float, np.ndarray]] = None, + weights: ts[Union[float, np.ndarray]] = None, sampler: ts[object] = None, reset: ts[object] = None, recalc: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the standard deviation within the given window. @@ -2456,18 +2455,18 @@ def stddev( @csp.graph def sem( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, ddof: int = 1, ignore_na: bool = True, trigger: ts[object] = None, - weights: ts[typing.Union[float, np.ndarray]] = None, + weights: ts[Union[float, np.ndarray]] = None, sampler: ts[object] = None, reset: ts[object] = None, recalc: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the standard error of the mean within the given window. @@ -2532,18 +2531,18 @@ def sem( @csp.graph def corr( - x: ts[typing.Union[float, np.ndarray]], - y: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + y: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, ignore_na: bool = True, trigger: ts[object] = None, - weights: ts[typing.Union[float, np.ndarray]] = None, + weights: ts[Union[float, np.ndarray]] = None, sampler: ts[object] = None, reset: ts[object] = None, recalc: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the correlation between x and y within the given window. If the time-series are of type np.ndarray, the correlation is calculated elementwise. @@ -2640,8 +2639,8 @@ def corr( @csp.graph def corr_matrix( x: ts[np.ndarray], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, ignore_na: bool = True, trigger: ts[object] = None, weights: ts[float] = None, @@ -2693,18 +2692,18 @@ def corr_matrix( @csp.graph def skew( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, ignore_na: bool = True, bias: bool = False, trigger: ts[object] = None, - weights: ts[typing.Union[float, np.ndarray]] = None, + weights: ts[Union[float, np.ndarray]] = None, sampler: ts[object] = None, reset: ts[object] = None, recalc: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the skew within the given window. @@ -2769,19 +2768,19 @@ def skew( @csp.graph def kurt( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[timedelta, int] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[timedelta, int] = None, ignore_na: bool = True, excess: bool = True, bias: bool = False, trigger: ts[object] = None, - weights: ts[typing.Union[float, np.ndarray]] = None, + weights: ts[Union[float, np.ndarray]] = None, sampler: ts[object] = None, reset: ts[object] = None, recalc: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the kurtosis within the given window. @@ -2855,11 +2854,11 @@ def kurt( @csp.graph def ema( - x: ts[typing.Union[float, np.ndarray]], + x: ts[Union[float, np.ndarray]], min_periods: int = 1, - alpha: typing.Optional[float] = None, - span: typing.Optional[float] = None, - com: typing.Optional[float] = None, + alpha: Optional[float] = None, + span: Optional[float] = None, + com: Optional[float] = None, halflife: timedelta = None, adjust: bool = True, horizon: int = None, @@ -2869,7 +2868,7 @@ def ema( reset: ts[object] = None, recalc: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the exponential moving avergae of a time series. @@ -2949,12 +2948,12 @@ def ema( @csp.graph def ema_cov( - x: ts[typing.Union[float, np.ndarray]], - y: ts[typing.Union[float, np.ndarray]], + x: ts[Union[float, np.ndarray]], + y: ts[Union[float, np.ndarray]], min_periods: int = 1, - alpha: typing.Optional[float] = None, - span: typing.Optional[float] = None, - com: typing.Optional[float] = None, + alpha: Optional[float] = None, + span: Optional[float] = None, + com: Optional[float] = None, halflife: timedelta = None, adjust: bool = True, horizon: int = None, @@ -2965,7 +2964,7 @@ def ema_cov( reset: ts[object] = None, recalc: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the exponential moving covariance between two time series. @@ -3065,11 +3064,11 @@ def ema_cov( @csp.graph def ema_var( - x: ts[typing.Union[float, np.ndarray]], + x: ts[Union[float, np.ndarray]], min_periods: int = 1, - alpha: typing.Optional[float] = None, - span: typing.Optional[float] = None, - com: typing.Optional[float] = None, + alpha: Optional[float] = None, + span: Optional[float] = None, + com: Optional[float] = None, halflife: timedelta = None, adjust: bool = True, horizon: int = None, @@ -3080,7 +3079,7 @@ def ema_var( reset: ts[object] = None, recalc: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the exponential moving variance of a time series. @@ -3158,11 +3157,11 @@ def ema_var( @csp.graph def ema_std( - x: ts[typing.Union[float, np.ndarray]], + x: ts[Union[float, np.ndarray]], min_periods: int = 1, - alpha: typing.Optional[float] = None, - span: typing.Optional[float] = None, - com: typing.Optional[float] = None, + alpha: Optional[float] = None, + span: Optional[float] = None, + com: Optional[float] = None, halflife: timedelta = None, adjust: bool = True, horizon: int = None, @@ -3173,7 +3172,7 @@ def ema_std( reset: ts[object] = None, recalc: ts[object] = None, min_data_points: int = 0, -) -> ts[typing.Union[float, np.ndarray]]: +) -> ts[Union[float, np.ndarray]]: """ Returns the exponential moving standard deviation of a time series. @@ -3203,14 +3202,14 @@ def ema_std( @csp.graph def cross_sectional( - x: ts[typing.Union[float, np.ndarray]], - interval: typing.Union[timedelta, int] = None, - min_window: typing.Union[int, timedelta] = None, + x: ts[Union[float, np.ndarray]], + interval: Union[timedelta, int] = None, + min_window: Union[int, timedelta] = None, as_numpy: bool = False, trigger: ts[object] = None, sampler: ts[object] = None, reset: ts[object] = None, -) -> ts[typing.Union[np.ndarray, typing.List[float], typing.List[np.ndarray]]]: +) -> ts[Union[np.ndarray, List[float], List[np.ndarray]]]: """ Returns all data present in the current window so that users can apply their own cross-sectional calculations. diff --git a/csp/tests/adapters/test_numpy.py b/csp/tests/adapters/test_numpy.py index 3be989737..a12f5346d 100644 --- a/csp/tests/adapters/test_numpy.py +++ b/csp/tests/adapters/test_numpy.py @@ -1,6 +1,7 @@ import numpy as np import unittest from datetime import datetime, timedelta +from typing import List import csp @@ -239,13 +240,13 @@ def test_timestamps(self): def test_array(self): raw_vals = [[1, 2], [3], [4, 5, 6]] res = csp.run( - g, typ=[int], values=np.array(raw_vals, dtype="object"), dts=test_dts_ndarray, starttime=test_starttime + g, typ=List[int], values=np.array(raw_vals, dtype="object"), dts=test_dts_ndarray, starttime=test_starttime ) self.assertEqual(res["out"], list(zip(test_dts, raw_vals))) raw_vals = [["hello", "world"], ["hows"], ["it", "going"]] res = csp.run( - g, typ=[str], values=np.array(raw_vals, dtype="object"), dts=test_dts_ndarray, starttime=test_starttime + g, typ=List[str], values=np.array(raw_vals, dtype="object"), dts=test_dts_ndarray, starttime=test_starttime ) self.assertEqual(res["out"], list(zip(test_dts, raw_vals))) diff --git a/csp/tests/adapters/test_slack.py b/csp/tests/adapters/test_slack.py deleted file mode 100644 index a05feb892..000000000 --- a/csp/tests/adapters/test_slack.py +++ /dev/null @@ -1,231 +0,0 @@ -import pytest -from datetime import timedelta -from ssl import create_default_context -from unittest.mock import MagicMock, call, patch - -import csp -from csp import ts -from csp.adapters.slack import SlackAdapterManager, SlackMessage, mention_user - - -@csp.node -def hello(msg: ts[SlackMessage]) -> ts[SlackMessage]: - if csp.ticked(msg): - text = f"Hello <@{msg.user_id}>!" - return SlackMessage( - channel="a new channel", - # reply in thread - thread=msg.thread, - msg=text, - ) - - -@csp.node -def react(msg: ts[SlackMessage]) -> ts[SlackMessage]: - if csp.ticked(msg): - return SlackMessage( - channel=msg.channel, - channel_id=msg.channel_id, - thread=msg.thread, - reaction="eyes", - ) - - -@csp.node -def send_fake_message(clientmock: MagicMock, requestmock: MagicMock, am: SlackAdapterManager) -> ts[bool]: - with csp.alarms(): - a_send = csp.alarm(bool) - with csp.start(): - csp.schedule_alarm(a_send, timedelta(seconds=1), True) - if csp.ticked(a_send): - if a_send: - am._process_slack_message(clientmock, requestmock) - csp.schedule_alarm(a_send, timedelta(seconds=1), False) - else: - return True - - -PUBLIC_CHANNEL_MENTION_PAYLOAD = { - "token": "ABCD", - "team_id": "EFGH", - "api_app_id": "HIJK", - "event": { - "client_msg_id": "1234-5678", - "type": "app_mention", - "text": "<@BOTID> <@USERID> <@USERID2>", - "user": "USERID", - "ts": "1.2", - "blocks": [ - { - "type": "rich_text", - "block_id": "tx381", - "elements": [ - { - "type": "rich_text_section", - "elements": [ - {"type": "user", "user_id": "BOTID"}, - {"type": "text", "text": " "}, - {"type": "user", "user_id": "USERID"}, - {"type": "text", "text": " "}, - {"type": "user", "user_id": "USERID2"}, - ], - } - ], - } - ], - "team": "ABCD", - "channel": "EFGH", - "event_ts": "1.2", - }, - "type": "event_callback", - "event_id": "ABCD", - "event_time": 1707423091, - "authorizations": [ - {"enterprise_id": None, "team_id": "ABCD", "user_id": "BOTID", "is_bot": True, "is_enterprise_install": False} - ], - "is_ext_shared_channel": False, - "event_context": "SOMELONGCONTEXT", -} -DIRECT_MESSAGE_PAYLOAD = { - "token": "ABCD", - "team_id": "EFGH", - "context_team_id": "ABCD", - "context_enterprise_id": None, - "api_app_id": "HIJK", - "event": { - "client_msg_id": "1234-5678", - "type": "message", - "text": "test", - "user": "USERID", - "ts": "2.1", - "blocks": [ - { - "type": "rich_text", - "block_id": "gB9fq", - "elements": [{"type": "rich_text_section", "elements": [{"type": "text", "text": "test"}]}], - } - ], - "team": "ABCD", - "channel": "EFGH", - "event_ts": "2.1", - "channel_type": "im", - }, - "type": "event_callback", - "event_id": "ABCD", - "event_time": 1707423220, - "authorizations": [ - {"enterprise_id": None, "team_id": "ABCD", "user_id": "BOTID", "is_bot": True, "is_enterprise_install": False} - ], - "is_ext_shared_channel": False, - "event_context": "SOMELONGCONTEXT", -} - - -class TestSlack: - def test_slack_tokens(self): - with pytest.raises(RuntimeError): - SlackAdapterManager("abc", "def") - - @pytest.mark.parametrize("payload", (PUBLIC_CHANNEL_MENTION_PAYLOAD, DIRECT_MESSAGE_PAYLOAD)) - def test_slack(self, payload): - with patch("csp.adapters.slack.SocketModeClient") as clientmock: - # mock out the event from the slack sdk - reqmock = MagicMock() - reqmock.type = "events_api" - reqmock.payload = payload - - # mock out the user/room lookup responses - mock_user_response = MagicMock(name="users_info_mock") - mock_user_response.status_code = 200 - mock_user_response.data = { - "user": {"profile": {"real_name_normalized": "johndoe", "email": "johndoe@some.email"}, "name": "blerg"} - } - clientmock.return_value.web_client.users_info.return_value = mock_user_response - mock_room_response = MagicMock(name="conversations_info_mock") - mock_room_response.status_code = 200 - mock_room_response.data = {"channel": {"is_im": False, "is_private": True, "name": "a private channel"}} - clientmock.return_value.web_client.conversations_info.return_value = mock_room_response - mock_list_response = MagicMock(name="conversations_list_mock") - mock_list_response.status_code = 200 - mock_list_response.data = { - "channels": [ - {"name": "a private channel", "id": "EFGH"}, - {"name": "a new channel", "id": "new_channel"}, - ] - } - clientmock.return_value.web_client.conversations_list.return_value = mock_list_response - - def graph(): - am = SlackAdapterManager("xapp-1-dummy", "xoxb-dummy", ssl=create_default_context()) - - # send a fake slack message to the app - stop = send_fake_message(clientmock, reqmock, am) - - # send a response - resp = hello(csp.unroll(am.subscribe())) - am.publish(resp) - - # do a react - rct = react(csp.unroll(am.subscribe())) - am.publish(rct) - - csp.add_graph_output("response", resp) - csp.add_graph_output("react", rct) - - # stop after first messages - done_flag = (csp.count(stop) + csp.count(resp) + csp.count(rct)) == 3 - csp.stop_engine(stop) - - # run the graph - resp = csp.run(graph, realtime=True) - - # check outputs - if payload == PUBLIC_CHANNEL_MENTION_PAYLOAD: - assert resp["react"] - assert resp["response"] - - assert resp["react"][0][1] == SlackMessage( - channel="a private channel", channel_id="EFGH", reaction="eyes", thread="1.2" - ) - assert resp["response"][0][1] == SlackMessage( - channel="a new channel", msg="Hello <@USERID>!", thread="1.2" - ) - else: - assert resp["react"] - assert resp["response"] - - assert resp["react"][0][1] == SlackMessage( - channel="a private channel", channel_id="EFGH", reaction="eyes", thread="2.1" - ) - assert resp["response"][0][1] == SlackMessage( - channel="a new channel", msg="Hello <@USERID>!", thread="2.1" - ) - - # check all inbound mocks got called - if payload == PUBLIC_CHANNEL_MENTION_PAYLOAD: - assert clientmock.return_value.web_client.users_info.call_count == 2 - else: - assert clientmock.return_value.web_client.users_info.call_count == 1 - assert clientmock.return_value.web_client.conversations_info.call_count == 1 - - # check all outbound mocks got called - assert clientmock.return_value.web_client.reactions_add.call_count == 1 - assert clientmock.return_value.web_client.chat_postMessage.call_count == 1 - - if payload == PUBLIC_CHANNEL_MENTION_PAYLOAD: - assert clientmock.return_value.web_client.reactions_add.call_args_list == [ - call(channel="EFGH", name="eyes", timestamp="1.2") - ] - assert clientmock.return_value.web_client.chat_postMessage.call_args_list == [ - call(channel="new_channel", text="Hello <@USERID>!") - ] - else: - assert clientmock.return_value.web_client.reactions_add.call_args_list == [ - call(channel="EFGH", name="eyes", timestamp="2.1") - ] - assert clientmock.return_value.web_client.chat_postMessage.call_args_list == [ - call(channel="new_channel", text="Hello <@USERID>!") - ] - - def test_mention_user(self): - assert mention_user("ABCD") == "<@ABCD>" diff --git a/csp/tests/adapters/test_websocket.py b/csp/tests/adapters/test_websocket.py index 41aeb3311..4b6015042 100644 --- a/csp/tests/adapters/test_websocket.py +++ b/csp/tests/adapters/test_websocket.py @@ -138,18 +138,21 @@ def send_msg_on_open(status: ts[Status]) -> ts[str]: if csp.ticked(status): return MsgStruct(a=1234, b="im a string").to_json() + @csp.node def my_edge_that_handles_burst(objs: ts[List[MsgStruct]]) -> ts[bool]: if csp.ticked(objs): return True - + @csp.graph def g(): ws = WebsocketAdapterManager("ws://localhost:8000/") status = ws.status() ws.send(send_msg_on_open(status)) recv = ws.subscribe(MsgStruct, JSONTextMessageMapper(), push_mode=csp.PushMode.BURST) + _ = my_edge_that_handles_burst(recv) + csp.add_graph_output("recv", recv) csp.stop_engine(recv) diff --git a/csp/tests/impl/test_outputadapter.py b/csp/tests/impl/test_outputadapter.py index c271c428f..ae9c469eb 100644 --- a/csp/tests/impl/test_outputadapter.py +++ b/csp/tests/impl/test_outputadapter.py @@ -1,5 +1,6 @@ """this test is derived from e_14_user_adapters_05 and e_14_user_adapters_06""" +import inspect import random import threading import unittest @@ -198,3 +199,9 @@ def test_with_manager(self): self.assertIn("publication_data_1", entry) elif "symbol=data_3" in entry: self.assertIn("publication_data_3", entry) + + def test_help(self): + # for `help` to work on output adapters, signature must be defined + sig = inspect.signature(MyBufferWriterAdapter) + self.assertEqual(sig.parameters["input"].annotation, ts["T"]) + self.assertEqual(sig.parameters["output_buffer"].annotation, list) diff --git a/csp/tests/impl/test_pulladapter.py b/csp/tests/impl/test_pulladapter.py index 90e6318e0..ee935eed9 100644 --- a/csp/tests/impl/test_pulladapter.py +++ b/csp/tests/impl/test_pulladapter.py @@ -1,6 +1,7 @@ import time import unittest from datetime import datetime, timedelta +from typing import List import csp from csp import PushMode, ts @@ -15,7 +16,7 @@ class TestPullAdapter(unittest.TestCase): def test_basic(self): # We just use the existing curve adapter to test pull since its currently implemented as a python PullInputAdapter @csp.node - def check(burst: ts[["T"]], lv: ts["T"], nc: ts["T"]): + def check(burst: ts[List["T"]], lv: ts["T"], nc: ts["T"]): if csp.ticked(burst): self.assertEqual(len(burst), 2) self.assertEqual(burst[0], nc) diff --git a/csp/tests/impl/test_pushadapter.py b/csp/tests/impl/test_pushadapter.py index 1aa7c0566..9d814f896 100644 --- a/csp/tests/impl/test_pushadapter.py +++ b/csp/tests/impl/test_pushadapter.py @@ -1,7 +1,9 @@ +import inspect import threading import time import unittest from datetime import datetime, timedelta +from typing import List import csp from csp import PushMode, ts @@ -111,7 +113,7 @@ def __init__(self, mgrImpl, typ, id): class TestPushAdapter(unittest.TestCase): def test_basic(self): @csp.node - def check(burst: ts[["T"]], lv: [ts["T"]], nc: ts["T"]): + def check(burst: ts[List["T"]], lv: List[ts["T"]], nc: ts["T"]): # Assert all last values have the same value since theyre injected in the same batch if csp.ticked(lv) and csp.valid(lv): self.assertTrue(all(v == lv[0] for v in lv.validvalues())) @@ -240,6 +242,60 @@ def graph(): result = list(x[1] for x in result) self.assertEqual(result, expected) + def test_adapter_engine_shutdown(self): + class MyPushAdapterImpl(PushInputAdapter): + def __init__(self): + self._thread = None + self._running = False + + def start(self, starttime, endtime): + self._running = True + self._thread = threading.Thread(target=self._run) + self._thread.start() + + def stop(self): + if self._running: + self._running = False + self._thread.join() + + def _run(self): + pushed = False + while self._running: + if pushed: + time.sleep(0.1) + self.shutdown_engine(TypeError("Dummy exception message")) + else: + self.push_tick(0) + pushed = True + + MyPushAdapter = py_push_adapter_def("MyPushAdapter", MyPushAdapterImpl, ts[int]) + + status = {"count": 0} + + @csp.node + def node(x: ts[object]): + if csp.ticked(x): + status["count"] += 1 + + @csp.graph + def graph(): + adapter = MyPushAdapter() + node(adapter) + csp.print("adapter", adapter) + + with self.assertRaisesRegex(TypeError, "Dummy exception message"): + csp.run(graph, starttime=datetime.utcnow(), realtime=True) + self.assertEqual(status["count"], 1) + + def test_help(self): + # for `help` to work on adapters, signature must be defined + sig = inspect.signature(test_adapter) + self.assertEqual(sig.parameters["typ"].annotation, "T") + self.assertEqual(sig.parameters["interval"].annotation, int) + self.assertEqual(sig.parameters["ticks_per_interval"].annotation, int) + self.assertEqual(sig.parameters["push_mode"].annotation, PushMode) + self.assertEqual(sig.parameters["push_group"].annotation, object) + if __name__ == "__main__": unittest.main() diff --git a/csp/tests/impl/test_pushpulladapter.py b/csp/tests/impl/test_pushpulladapter.py index 74faff89f..b52ce0755 100644 --- a/csp/tests/impl/test_pushpulladapter.py +++ b/csp/tests/impl/test_pushpulladapter.py @@ -143,6 +143,64 @@ def graph(): result = [out[1] for out in graph_out[0]] self.assertEqual(result, [1, 2, 3]) + def test_adapter_engine_shutdown(self): + class MyPushPullAdapterImpl(PushPullInputAdapter): + def __init__(self, typ, data, shutdown_before_live): + self._data = data + self._thread = None + self._running = False + self._shutdown_before_live = shutdown_before_live + + def start(self, starttime, endtime): + self._running = True + self._thread = threading.Thread(target=self._run) + self._thread.start() + + def stop(self): + if self._running: + self._running = False + self._thread.join() + + def _run(self): + idx = 0 + while self._running and idx < len(self._data): + if idx and self._shutdown_before_live: + time.sleep(0.1) + self.shutdown_engine(ValueError("Dummy exception message")) + t, v = self._data[idx] + self.push_tick(False, t, v) + idx += 1 + self.flag_replay_complete() + + idx = 0 + while self._running: + self.push_tick(True, datetime.utcnow(), len(self._data) + 1) + if idx and not self._shutdown_before_live: + time.sleep(0.1) + self.shutdown_engine(TypeError("Dummy exception message")) + idx += 1 + + MyPushPullAdapter = py_pushpull_adapter_def( + "MyPushPullAdapter", MyPushPullAdapterImpl, ts["T"], typ="T", data=list, shutdown_before_live=bool + ) + + @csp.graph + def graph(shutdown_before_live: bool): + data = [(datetime(2020, 1, 1, 2), 1), (datetime(2020, 1, 1, 3), 2)] + adapter = MyPushPullAdapter(int, data, shutdown_before_live) + csp.print("adapter", adapter) + + with self.assertRaisesRegex(ValueError, "Dummy exception message"): + csp.run(graph, True, starttime=datetime(2020, 1, 1, 1)) + with self.assertRaisesRegex(TypeError, "Dummy exception message"): + csp.run( + graph, + False, + starttime=datetime(2020, 1, 1, 1), + endtime=datetime.utcnow() + timedelta(seconds=2), + realtime=True, + ) + if __name__ == "__main__": unittest.main() diff --git a/csp/tests/impl/test_struct.py b/csp/tests/impl/test_struct.py index 1fdd21284..ae9f2e452 100644 --- a/csp/tests/impl/test_struct.py +++ b/csp/tests/impl/test_struct.py @@ -6,6 +6,7 @@ import typing import unittest from datetime import date, datetime, time, timedelta +from typing import Dict, List, Set, Tuple import csp from csp.impl.struct import define_nested_struct, define_struct, defineNestedStruct, defineStruct @@ -25,9 +26,9 @@ class StructNoDefaults(csp.Struct): bt: bytes o: object a1: FastList[int] - a2: [str] + a2: List[str] a3: FastList[object] - a4: [bytes] + a4: List[bytes] class StructWithDefaults(csp.Struct): @@ -38,8 +39,8 @@ class StructWithDefaults(csp.Struct): e: MyEnum = MyEnum.FOO o: object a1: FastList[int] = [1, 2, 3] - a2: [str] = ["1", "2", "3"] - a3: [object] = ["hey", 123, (1, 2, 3)] + a2: List[str] = ["1", "2", "3"] + a3: List[object] = ["hey", 123, (1, 2, 3)] np_arr: np.ndarray = np.array([1, 9]) @@ -74,9 +75,9 @@ class BaseMixed(csp.Struct): s: str l: list o: object - a1: [int] + a1: List[int] a2: FastList[str] - a3: [object] + a3: List[object] class DerivedFullyNative(BaseNative): @@ -124,10 +125,10 @@ class StructWithMutableStruct(csp.Struct): class StructWithLists(csp.Struct): - # native_list: [int] - struct_list: [BaseNative] + # native_list: List[int] + struct_list: List[BaseNative] fast_list: FastList[BaseNative] - dialect_generic_list: [list] + dialect_generic_list: List[list] class AllTypes(csp.Struct): @@ -141,7 +142,7 @@ class AllTypes(csp.Struct): s: str = "hello hello" e: MyEnum = MyEnum.FOO struct: BaseNative = BaseNative(i=123, b=True, f=456.789) - arr: [int] = [1, 2, 3] + arr: List[int] = [1, 2, 3] fl: FastList[int] = [1, 2, 3, 4] o: object = {"k": "v"} @@ -172,7 +173,7 @@ def __init__(self, x: int): class SimpleStructForPickleList(csp.Struct): - a: typing.List[int] + a: List[int] class SimpleStructForPickleFastList(csp.Struct): @@ -271,7 +272,7 @@ class SimpleStructForPickleFastList(csp.Struct): None, ], # generic type user-defined } -struct_list_annotation_types = (typing.List, FastList) +struct_list_annotation_types = (List, FastList) class TestCspStruct(unittest.TestCase): @@ -385,7 +386,7 @@ class FOO(csp.Struct): # Was a bug with typed list of struct comparisons class BAR(csp.Struct): a: int - b: [FOO] + b: List[FOO] a = BAR(a=123, b=[FOO(a=1, b="2", c=[1, 2, 3])]) b = BAR(a=123, b=[FOO(a=1, b="2", c=[1, 2, 3])]) @@ -519,7 +520,7 @@ class Inner(csp.Struct): v: int class Outer(csp.Struct): - a3: [object] + a3: List[object] i = Inner(v=5) s = Outer(a3=[i, i, i]) @@ -820,8 +821,8 @@ def test_from_dict_loop_with_defaults(self): def test_from_dict_loop_with_generic_typing(self): class MyStruct(csp.Struct): - foo: typing.Set[int] - bar: typing.Tuple[str] + foo: Set[int] + bar: Tuple[str] np_arr: csp.typing.NumpyNDArray[float] looped = MyStruct.from_dict(MyStruct(foo=set((9, 10)), bar=("a", "b"), np_arr=np.array([1, 3])).to_dict()) @@ -838,13 +839,13 @@ class S1(csp.Struct): default_i: int = 42 class S2(csp.Struct): - value: typing.Tuple[int] - set_value: typing.Set[str] + value: Tuple[int] + set_value: Set[str] class S(csp.Struct): - d: typing.Dict[str, S1] - ls: typing.List[int] - lc: typing.List[S2] + d: Dict[str, S1] + ls: List[int] + lc: List[S2] input = """ d: @@ -932,7 +933,7 @@ class FOO(csp.Struct): def test_list_field_set_iterator(self): class S(csp.Struct): - l: [object] + l: List[object] s = S() expected = list(range(100)) @@ -1078,7 +1079,7 @@ def test_define_nested_struct(self): "b": int, "c": { "x": MyEnum, - "y": [int], + "y": List[int], }, "d": {"s": object, "t": FastList[object]}, } @@ -1188,7 +1189,7 @@ def test_struct_printing(self): class StructA(csp.Struct): a: int b: str - c: typing.List[int] + c: List[int] s1 = StructA(a=1, b="b", c=[1, 2]) exp_repr_s1 = "StructA( a=1, b=b, c=[1, 2] )" @@ -1250,7 +1251,7 @@ class StructD(StructC): # test structs with struct, struct array fields class StructE(csp.Struct): a: StructA - b: typing.List[StructC] + b: List[StructC] s5 = StructE( a=StructA(a=1, b="b", c=[1, 2]), @@ -1262,12 +1263,12 @@ class StructE(csp.Struct): # test array fields class StructF(csp.Struct): - a: typing.List[int] - b: typing.List[bool] - c: typing.List[typing.List[float]] - d: typing.List[ClassA] - e: typing.List[EnumA] - f: typing.List[StructC] # leave unset for test + a: List[int] + b: List[bool] + c: List[List[float]] + d: List[ClassA] + e: List[EnumA] + f: List[StructC] # leave unset for test # str (called by print) will show unset fields, repr (called in logging) will not s6 = StructF(a=[1], b=[True], c=[[1.0]], d=[ClassA()], e=[EnumA.RED, EnumA.BLUE]) @@ -1281,7 +1282,7 @@ class StructF(csp.Struct): # test unset in arrays/nested structs class StructG(csp.Struct): a: StructA - b: typing.List[StructA] + b: List[StructA] c: ClassA s7 = StructG(a=StructA(), b=[StructA(), StructA()]) @@ -1388,7 +1389,7 @@ def test_bool_array(self): """Test [bool] specific functionality since its special cased as vector in C++""" class A(csp.Struct): - l: [bool] + l: List[bool] raw = [True, False, True] a = A(l=raw) @@ -1606,11 +1607,11 @@ class MyStruct(csp.Struct): def test_to_json_list(self): class MyStruct(csp.Struct): i: int = 123 - l_i: typing.List[int] - l_b: typing.List[bool] - l_dt: typing.List[datetime] - l_l_i: typing.List[typing.List[int]] - l_tuple: typing.Tuple[int, float, str] + l_i: List[int] + l_b: List[bool] + l_dt: List[datetime] + l_l_i: List[List[int]] + l_tuple: Tuple[int, float, str] l_any: list test_struct = MyStruct() @@ -1678,10 +1679,10 @@ class MyStruct(csp.Struct): def test_to_json_dict(self): class MyStruct(csp.Struct): i: int = 123 - d_i: typing.Dict[int, int] - d_f: typing.Dict[float, int] - d_dt: typing.Dict[str, datetime] - d_d_s: typing.Dict[str, typing.Dict[str, str]] + d_i: Dict[int, int] + d_f: Dict[float, int] + d_dt: Dict[str, datetime] + d_d_s: Dict[str, Dict[str, str]] d_any: dict test_struct = MyStruct() @@ -1916,14 +1917,14 @@ class MySubSubStruct(csp.Struct): class MySubStruct(csp.Struct): d_s_msss: dict - l_ncsp: typing.List[NonCspStruct] + l_ncsp: List[NonCspStruct] py_l_ncsp: list class MyStruct(csp.Struct): i: int = 789 s: str = "MyStruct" ts: datetime - l_mss: typing.List[MySubStruct] + l_mss: List[MySubStruct] l_msss: list d_i_ncsp: dict @@ -2325,7 +2326,7 @@ class A(csp.Struct): self.assertEqual(s.a, [v[0], v[4], v[2], v[3], v[1], v[5]]) class B(csp.Struct): - a: [MyEnum] + a: List[MyEnum] s = B(a=[MyEnum.A, MyEnum.FOO]) @@ -2765,7 +2766,7 @@ class A(csp.Struct): self.assertEqual(s4.a == s3.a, True) class B(csp.Struct): - a: [MyEnum] + a: List[MyEnum] s = B(a=[MyEnum.A, MyEnum.FOO]) t = B(a=[MyEnum.FOO, MyEnum.FOO]) @@ -2866,15 +2867,15 @@ def test_list_field_correct_type_used(self): """Check that FastList and PyStructList types are used correctly""" class A(csp.Struct): - a: [int] + a: List[int] with self.assertRaises(TypeError): class B(csp.Struct): - a: [int, False] + a: List[int, False] class C(csp.Struct): - a: typing.List[int] + a: List[int] class D(csp.Struct): a: FastList[int] @@ -2882,7 +2883,7 @@ class D(csp.Struct): with self.assertRaises(TypeError): class E(csp.Struct): - a: [int, True] + a: List[int, True] p = A(a=[1, 2]) r = C(a=[1, 2]) @@ -2896,7 +2897,7 @@ def test_list_field_correct_type_passed(self): """Check that FastList can be passed to where Python list is expected""" class A(csp.Struct): - a: [int] + a: List[int] class B(csp.Struct): a: FastList[int] @@ -2933,6 +2934,15 @@ def test_list_field_pickle(self): self.assertEqual(b, s.a) self.assertEqual(type(b), list) + def test_dir(self): + s = SimpleStruct(a=0) + dir_output = dir(s) + self.assertIn("a", dir_output) + self.assertIn("to_dict", dir_output) + self.assertIn("update", dir_output) + self.assertIn("__metadata__", dir_output) + self.assertEqual(dir_output, sorted(dir_output)) + if __name__ == "__main__": unittest.main() diff --git a/csp/tests/test_baselib.py b/csp/tests/test_baselib.py index 20e858fb3..5ab6ba4e8 100644 --- a/csp/tests/test_baselib.py +++ b/csp/tests/test_baselib.py @@ -6,6 +6,7 @@ import unittest from datetime import date, datetime, timedelta, timezone from enum import Enum, auto +from typing import List import csp from csp import ts @@ -210,7 +211,7 @@ def test_unroll(self): st = datetime(2020, 1, 1) td = timedelta(seconds=1) x = csp.curve( - [int], + List[int], [ (st, [1]), (st + td * 1, [2, 3, 4]), @@ -220,7 +221,7 @@ def test_unroll(self): ], ) x2 = csp.curve( - [[int]], + List[List[int]], [ (st, [[1]]), (st + td * 1, [[2, 3, 4]]), @@ -662,7 +663,7 @@ class MyStruct(csp.Struct): @csp.graph def my_graph(): ticks = [MyStruct(key=chr(ord("A") + i % 5), value=i) for i in range(1000)] - ticks = csp.unroll(csp.const.using(T=[MyStruct])(ticks)) + ticks = csp.unroll(csp.const.using(T=List[MyStruct])(ticks)) demux = csp.DelayedDemultiplex(ticks, ticks.key, raise_on_bad_key=False) csp.add_graph_output("A", demux.demultiplex("A")) @@ -785,11 +786,11 @@ def g(): def test_drop_dups(self): @csp.graph def g(d1: list, d2: list, d3: list, d4: list, d5: list): - d1 = csp.unroll(csp.const.using(T=[int])(d1)) - d2 = csp.unroll(csp.const.using(T=[tuple])(d2)) - d3 = csp.unroll(csp.const.using(T=[float])(d3)) - d4 = csp.unroll(csp.const.using(T=[float])(d4)) - d5 = csp.unroll(csp.const.using(T=[float])(d5)) + d1 = csp.unroll(csp.const.using(T=List[int])(d1)) + d2 = csp.unroll(csp.const.using(T=List[tuple])(d2)) + d3 = csp.unroll(csp.const.using(T=List[float])(d3)) + d4 = csp.unroll(csp.const.using(T=List[float])(d4)) + d5 = csp.unroll(csp.const.using(T=List[float])(d5)) csp.add_graph_output("d1", csp.drop_dups(d1)) csp.add_graph_output("d2", csp.drop_dups(d2)) @@ -899,8 +900,8 @@ class MyStruct2(csp.Struct): e: MyEnum st: MyStruct o: MyObject - l: [int] - lb: [bool] + l: List[int] + lb: List[bool] @csp.node def random_gen(trigger: ts[object], typ: "T") -> ts["T"]: @@ -947,7 +948,7 @@ def random_delay(x: ts["T"]) -> ts["T"]: return tick @csp.node - def accum_list(x: ts["T"]) -> ts[["T"]]: + def accum_list(x: ts["T"]) -> ts[List["T"]]: with csp.state(): s_nextcount = 1 s_accum = [] diff --git a/csp/tests/test_baskets.py b/csp/tests/test_baskets.py index 8f3bf7dbc..22c95ef00 100644 --- a/csp/tests/test_baskets.py +++ b/csp/tests/test_baskets.py @@ -1,10 +1,10 @@ import numpy import random import time -import typing import unittest from collections import defaultdict from datetime import datetime, timedelta +from typing import Dict, List import csp import csp.impl @@ -14,15 +14,15 @@ class TestBaskets(unittest.TestCase): def test_functionality(self): @csp.node - def list_basket(x: [ts[int]]) -> csp.Outputs( - tickedvalues=ts[[int]], - tickeditems=ts[[object]], - tickedkeys=ts[[int]], - validvalues=ts[[int]], - validitems=ts[[object]], - validkeys=ts[[int]], + def list_basket(x: List[ts[int]]) -> csp.Outputs( + tickedvalues=ts[List[int]], + tickeditems=ts[List[object]], + tickedkeys=ts[List[int]], + validvalues=ts[List[int]], + validitems=ts[List[object]], + validkeys=ts[List[int]], valid=ts[bool], - iter=ts[[int]], + iter=ts[List[int]], elem_access=ts[int], ): if csp.ticked(x): @@ -41,13 +41,13 @@ def list_basket(x: [ts[int]]) -> csp.Outputs( csp.output(elem_access=x[1]) @csp.node - def dict_basket(x: {str: ts[int]}) -> csp.Outputs( - tickedvalues=ts[[int]], - tickeditems=ts[[object]], - tickedkeys=ts[[str]], - validvalues=ts[[int]], - validitems=ts[[object]], - validkeys=ts[[str]], + def dict_basket(x: Dict[str, ts[int]]) -> csp.Outputs( + tickedvalues=ts[List[int]], + tickeditems=ts[List[object]], + tickedkeys=ts[List[str]], + validvalues=ts[List[int]], + validitems=ts[List[object]], + validkeys=ts[List[str]], valid=ts[bool], elem_access=ts[int], ): @@ -207,7 +207,7 @@ def gen(x: ts[object], t: "T") -> csp.Outputs( csp.output(changes=changes, ticks=ticks) @csp.node - def consume(x: {ts[str]: ts[float]}, changes: ts[list], ticks: ts[list]): + def consume(x: Dict[ts[str], ts[float]], changes: ts[list], ticks: ts[list]): with csp.state(): s_valid = {} @@ -256,10 +256,10 @@ def g(): random.seed(seed) csp.run(g, starttime=datetime(2021, 5, 25), endtime=timedelta(hours=4)) - def test_dynamic_basket_tick_remove_exception(self) -> csp.OutputBasket({ts[str]: ts[int]}): + def test_dynamic_basket_tick_remove_exception(self) -> csp.OutputBasket(Dict[ts[str], ts[int]]): # tick/remove exception check @csp.node - def gen() -> csp.OutputBasket({ts[str]: ts[int]}): + def gen() -> csp.OutputBasket(Dict[ts[str], ts[int]]): with csp.alarms(): a_same_cycle_check = csp.alarm(bool) @@ -271,7 +271,7 @@ def gen() -> csp.OutputBasket({ts[str]: ts[int]}): csp.remove_dynamic_key("FOOBAR") @csp.node - def consume(x: {ts[str]: ts[float]}): + def consume(x: Dict[ts[str], ts[float]]): pass with self.assertRaisesRegex( @@ -281,7 +281,7 @@ def consume(x: {ts[str]: ts[float]}): def test_dynamic_basket_buffering_policy(self): @csp.node - def gen(x: ts[object]) -> csp.OutputBasket({ts[str]: ts[int]}): + def gen(x: ts[object]) -> csp.OutputBasket(Dict[ts[str], ts[int]]): with csp.state(): s_last = defaultdict(lambda: 1) @@ -292,7 +292,7 @@ def gen(x: ts[object]) -> csp.OutputBasket({ts[str]: ts[int]}): csp.output({key: v}) @csp.node - def consume(x: {ts[str]: ts[int]}): + def consume(x: Dict[ts[str], ts[int]]): with csp.start(): csp.set_buffering_policy(x, tick_count=10, tick_history=timedelta(seconds=30)) @@ -311,7 +311,7 @@ def consume(x: {ts[str]: ts[int]}): def test_basket_valid(self): # a basket input that is passive from the start should still register as valid once all its ts have ticked @csp.node - def triggered(x: {str: ts[float]}, y: ts[bool]) -> ts[float]: + def triggered(x: Dict[str, ts[float]], y: ts[bool]) -> ts[float]: with csp.start(): csp.make_passive(x) @@ -333,13 +333,13 @@ def g(): def test_list_basket_np_index(self): @csp.node - def echo_np(x: [ts[float]], num_keys: int) -> csp.OutputBasket(typing.List[ts[float]], shape="num_keys"): + def echo_np(x: List[ts[float]], num_keys: int) -> csp.OutputBasket(List[ts[float]], shape="num_keys"): if csp.ticked(x): all_idx = numpy.arange(num_keys) return dict(zip(all_idx, x)) @csp.node - def echo_int(x: [ts[float]], num_keys: int) -> csp.OutputBasket(typing.List[ts[float]], shape="num_keys"): + def echo_int(x: List[ts[float]], num_keys: int) -> csp.OutputBasket(List[ts[float]], shape="num_keys"): if csp.ticked(x): all_idx = numpy.arange(num_keys) return dict(zip(all_idx.tolist(), x)) # Converts idx from np.int64 -> int diff --git a/csp/tests/test_dynamic.py b/csp/tests/test_dynamic.py index 6e3d692f8..5b55f7823 100644 --- a/csp/tests/test_dynamic.py +++ b/csp/tests/test_dynamic.py @@ -6,7 +6,7 @@ import unittest from collections import defaultdict from datetime import datetime, timedelta -from typing import List +from typing import Dict, List import csp from csp import ts @@ -18,7 +18,7 @@ class DynData(csp.Struct): @csp.node -def gen_basket(keys: ts[[str]], deletes: ts[[str]]) -> csp.DynamicBasket[str, DynData]: +def gen_basket(keys: ts[List[str]], deletes: ts[List[str]]) -> csp.DynamicBasket[str, DynData]: with csp.state(): s_counts = defaultdict(int) if csp.ticked(keys): @@ -32,7 +32,7 @@ def gen_basket(keys: ts[[str]], deletes: ts[[str]]) -> csp.DynamicBasket[str, Dy @csp.node -def random_keys(keys: [str], interval: timedelta, repeat: bool) -> ts[[str]]: +def random_keys(keys: List[str], interval: timedelta, repeat: bool) -> ts[List[str]]: with csp.alarms(): x = csp.alarm(int) with csp.state(): @@ -55,9 +55,9 @@ def random_keys(keys: [str], interval: timedelta, repeat: bool) -> ts[[str]]: @csp.node -def delayed_deletes(keys: ts[[str]], delay: timedelta) -> ts[[str]]: +def delayed_deletes(keys: ts[List[str]], delay: timedelta) -> ts[List[str]]: with csp.alarms(): - delete = csp.alarm([str]) + delete = csp.alarm(List[str]) with csp.state(): s_pending = set() @@ -104,7 +104,7 @@ def dyn_graph(key: str): def g(): keys = random_keys(list(string.ascii_uppercase), timedelta(seconds=1), True) - basket = gen_basket(keys, csp.null_ts([str])) + basket = gen_basket(keys, csp.null_ts(List[str])) csp.dynamic(basket, dyn_graph, csp.snapkey()) csp.add_graph_output("keys", keys) @@ -154,7 +154,7 @@ def test_dynamic_args(self): """test various "special" arguments""" @csp.graph - def dyn_graph(key: str, val: [str], key_ts: ts[DynData], scalar: str): + def dyn_graph(key: str, val: List[str], key_ts: ts[DynData], scalar: str): csp.add_graph_output(f"{key}_key", csp.const(key)) csp.add_graph_output(f"{key}_val", csp.const(val)) csp.add_graph_output(f"{key}_ts", key_ts) @@ -168,7 +168,7 @@ def dyn_graph(key: str, val: [str], key_ts: ts[DynData], scalar: str): def g(): keys = random_keys(list(string.ascii_uppercase), timedelta(seconds=1), True) csp.add_graph_output("keys", keys) - basket = gen_basket(keys, csp.null_ts([str])) + basket = gen_basket(keys, csp.null_ts(List[str])) csp.dynamic(basket, dyn_graph, csp.snapkey(), csp.snap(keys), csp.attach(), "hello world!") res = csp.run(g, starttime=datetime(2021, 6, 22), endtime=timedelta(seconds=60)) @@ -211,7 +211,7 @@ def g(): s = source_node() keys = random_keys(list(string.ascii_uppercase), timedelta(seconds=1), True) csp.add_graph_output("keys", keys) - basket = gen_basket(keys, csp.null_ts([str])) + basket = gen_basket(keys, csp.null_ts(List[str])) csp.dynamic(basket, dyn_graph, csp.snapkey(), s) csp.run(g, starttime=datetime(2021, 6, 22), endtime=timedelta(seconds=60)) @@ -317,7 +317,7 @@ def dyn_graph_inner(parent_key: str, key: str, x: ts[int]) -> ts[DynData]: return v @csp.graph - def dyn_graph(key: str, x: ts[int]) -> ts[{str: DynData}]: + def dyn_graph(key: str, x: ts[int]) -> ts[Dict[str, DynData]]: keys = random_keys(list("ABC"), timedelta(seconds=0.5), False) deletes = delayed_deletes(keys, timedelta(seconds=1.1)) basket = gen_basket(keys, deletes) @@ -420,7 +420,7 @@ def dyn_graph(x: ts[object], y: ts[object], z: ts[object]): def g(): keys = random_keys(list(string.ascii_uppercase), timedelta(seconds=1), False) - basket = gen_basket(keys, csp.null_ts([str])) + basket = gen_basket(keys, csp.null_ts(List[str])) csp.dynamic( basket, dyn_graph, csp.attach(), csp.timer(timedelta(seconds=1)), csp.timer(timedelta(seconds=0.17)) ) @@ -446,12 +446,12 @@ def main(): def test_exceptions(self): # snap / attach in container @csp.graph - def dyn_graph(k: [str]): + def dyn_graph(k: List[str]): pass def g(): keys = random_keys(list(string.ascii_uppercase), timedelta(seconds=1), False) - basket = gen_basket(keys, csp.null_ts([str])) + basket = gen_basket(keys, csp.null_ts(List[str])) csp.dynamic(basket, dyn_graph, [csp.snapkey()]) with self.assertRaisesRegex(TypeError, "csp.snap and csp.attach are not supported as members of containers"): @@ -459,12 +459,12 @@ def g(): # dynamic basket outputs @csp.graph - def dyn_graph(k: str) -> [ts[int]]: + def dyn_graph(k: str) -> List[ts[int]]: return [csp.const(1)] def g(): keys = random_keys(list(string.ascii_uppercase), timedelta(seconds=1), False) - basket = gen_basket(keys, csp.null_ts([str])) + basket = gen_basket(keys, csp.null_ts(List[str])) csp.dynamic(basket, dyn_graph, csp.snapkey()) with self.assertRaisesRegex(TypeError, "csp.dynamic does not support basket outputs of sub_graph"): @@ -477,7 +477,7 @@ def dyn_graph(k: str): def g(): keys = random_keys(list(string.ascii_uppercase), timedelta(seconds=1), False) - basket = gen_basket(keys, csp.null_ts([str])) + basket = gen_basket(keys, csp.null_ts(List[str])) csp.dynamic(basket, dyn_graph, csp.snapkey()) with self.assertRaisesRegex(ValueError, 'graph output key "duplicate" is already bound'): @@ -490,7 +490,7 @@ def dyn_graph(snap: int): def g(): keys = random_keys(list(string.ascii_uppercase), timedelta(seconds=1), False) - basket = gen_basket(keys, csp.null_ts([str])) + basket = gen_basket(keys, csp.null_ts(List[str])) csp.dynamic(basket, dyn_graph, csp.snap(csp.null_ts(int))) with self.assertRaisesRegex(RuntimeError, "csp.snap input \\( sub_graph arg 0 \\) is not valid"): diff --git a/csp/tests/test_engine.py b/csp/tests/test_engine.py index adb67d372..5b5faf278 100644 --- a/csp/tests/test_engine.py +++ b/csp/tests/test_engine.py @@ -12,6 +12,7 @@ import typing import unittest from datetime import datetime, timedelta +from typing import Callable, Dict, List import csp from csp import PushMode, ts @@ -451,7 +452,7 @@ def graph(): def test_with_support(self): # This test case tests a parsing bug that we had, where "with" statement at the main function block was causing parse error class ValueSetter(object): - def __init__(self, l: typing.List[int]): + def __init__(self, l: List[int]): self._l = l def __enter__(self): @@ -461,7 +462,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): self._l.append(2) @csp.node - def my_node(inp: ts[bool]) -> ts[[int]]: + def my_node(inp: ts[bool]) -> ts[List[int]]: with csp.state(): l = [] with ValueSetter(l): @@ -799,6 +800,55 @@ def graph(): b[t].append(v) self.assertEqual(results["b"], list(b.items())) + def test_adapter_manager_engine_shutdown(self): + from csp.impl.adaptermanager import AdapterManagerImpl, ManagedSimInputAdapter + from csp.impl.wiring import py_managed_adapter_def + + class TestAdapterManager: + def __init__(self): + self._impl = None + + def subscribe(self): + return TestAdapter(self) + + def _create(self, engine, memo): + self._impl = TestAdapterManagerImpl(engine) + return self._impl + + class TestAdapterManagerImpl(AdapterManagerImpl): + def __init__(self, engine): + super().__init__(engine) + + def start(self, starttime, endtime): + pass + + def stop(self): + pass + + def process_next_sim_timeslice(self, now): + try: + [].pop() + except IndexError as e: + self.shutdown_engine(e) + + class TestAdapterImpl(ManagedSimInputAdapter): + def __init__(self, manager_impl): + pass + + TestAdapter = py_managed_adapter_def("TestAdapter", TestAdapterImpl, ts[int], TestAdapterManager) + + def graph(): + adapter = TestAdapterManager() + nc = adapter.subscribe() + csp.add_graph_output("nc", nc) + + try: + csp.run(graph, starttime=datetime(2020, 1, 1), endtime=timedelta(seconds=1)) + except IndexError: + tb = traceback.format_exc() + + self.assertTrue("[].pop()" in tb and "process_next_sim_timeslice" in tb) + def test_feedback(self): # Dummy example class Request(csp.Struct): @@ -876,8 +926,8 @@ def unbound_graph(): def test_list_feedback_typecheck(self): @csp.graph - def g() -> csp.ts[[int]]: - fb = csp.feedback([int]) + def g() -> csp.ts[List[int]]: + fb = csp.feedback(List[int]) with self.assertRaisesRegex( TypeError, re.escape(r"""Expected ts[T] for argument 'x', got ts[int](T=typing.List[int])""") ): @@ -891,8 +941,8 @@ def g() -> csp.ts[[int]]: # Test Typing.List which was a bug "crash on feedback tick" @csp.graph - def g() -> csp.ts[typing.List[int]]: - fb = csp.feedback(typing.List[int]) + def g() -> csp.ts[List[int]]: + fb = csp.feedback(List[int]) with self.assertRaisesRegex( TypeError, re.escape(r"""Expected ts[T] for argument 'x', got ts[int](T=typing.List[int])""") ): @@ -908,7 +958,7 @@ def test_list_inside_callable(self): '''was a bug "Empty list inside callable annotation raises exception"''' @csp.graph - def graph(v: typing.Dict[str, typing.Callable[[], str]]): + def graph(v: Dict[str, Callable[[], str]]): pass csp.run(graph, {"x": (lambda v: v)}, starttime=datetime(2020, 6, 17)) @@ -1086,7 +1136,7 @@ def test_timer_exception(self): def test_list_comprehension_bug(self): @csp.node - def list_comprehension_bug_node(n_seconds: int, input: csp.ts["T"]) -> csp.ts[["T"]]: + def list_comprehension_bug_node(n_seconds: int, input: csp.ts["T"]) -> csp.ts[List["T"]]: with csp.start(): csp.set_buffering_policy(input, tick_history=timedelta(seconds=30)) @@ -1426,8 +1476,8 @@ def eq(i: csp.ts["T1"], f: csp.ts["T2"]): @csp.node def basket_wrapper(l: [csp.ts[float]], d: {str: csp.ts[float]}) -> csp.Outputs( - l=csp.OutputBasket(typing.List[csp.ts[float]], shape_of="l"), - d=csp.OutputBasket(typing.Dict[str, csp.ts[float]], shape_of="d"), + l=csp.OutputBasket(List[csp.ts[float]], shape_of="l"), + d=csp.OutputBasket(Dict[str, csp.ts[float]], shape_of="d"), ): if csp.ticked(l): ticked_value_types = set(map(type, l.tickedvalues())) @@ -1563,7 +1613,7 @@ def my_graph(x: csp.ts[int]) -> csp.ts[str]: self.assertEqual(str(ctxt.exception), "In function my_graph: Expected ts[str] for return value, got ts[int]") @csp.graph - def dictbasket_graph(x: csp.ts[int]) -> {str: csp.ts[str]}: + def dictbasket_graph(x: csp.ts[int]) -> Dict[str, csp.ts[str]]: return csp.output({"a": x}) with self.assertRaises(ArgTypeMismatchError) as ctxt: @@ -1574,7 +1624,7 @@ def dictbasket_graph(x: csp.ts[int]) -> {str: csp.ts[str]}: ) @csp.graph - def listbasket_graph(x: csp.ts[int]) -> [csp.ts[str]]: + def listbasket_graph(x: csp.ts[int]) -> List[csp.ts[str]]: return csp.output([x]) with self.assertRaises(ArgTypeMismatchError) as ctxt: @@ -1688,12 +1738,12 @@ def g() -> csp.Outputs(a=csp.ts[int], b=csp.ts[int]): def test_unnamed_basket_return(self): @csp.node - def n(x: {str: csp.ts["T"]}) -> csp.OutputBasket(typing.Dict[str, csp.ts["T"]], shape_of="x"): + def n(x: {str: csp.ts["T"]}) -> csp.OutputBasket(Dict[str, csp.ts["T"]], shape_of="x"): if csp.ticked(x): return csp.output({k: v for k, v in x.tickeditems()}) @csp.node - def n2(x: [csp.ts["T"]]) -> csp.OutputBasket(typing.List[csp.ts["T"]], shape_of="x"): + def n2(x: [csp.ts["T"]]) -> csp.OutputBasket(List[csp.ts["T"]], shape_of="x"): if csp.ticked(x): return csp.output({k: v for k, v in x.tickeditems()}) @@ -1817,15 +1867,13 @@ def test_graph_shape_bug(self): @csp.graph def aux(x: [ts[float]], y: {str: ts[float]}) -> csp.Outputs( - o1=csp.OutputBasket(typing.List[ts[float]], shape_of="x"), - o2=csp.OutputBasket(typing.Dict[str, ts[float]], shape_of="y"), + o1=csp.OutputBasket(List[ts[float]], shape_of="x"), + o2=csp.OutputBasket(Dict[str, ts[float]], shape_of="y"), ): return csp.output(o1=x, o2=y) @csp.graph - def g() -> ( - csp.Outputs(o1=csp.OutputBasket(typing.List[ts[float]]), o2=csp.OutputBasket(typing.Dict[str, ts[float]])) - ): + def g() -> csp.Outputs(o1=csp.OutputBasket(List[ts[float]]), o2=csp.OutputBasket(Dict[str, ts[float]])): res = aux([csp.const(1.0), csp.const(2.0)], {"3": csp.const(3.0), "4": csp.const(4.0)}) return csp.output(o1=res.o1, o2=res.o2) diff --git a/csp/tests/test_history.py b/csp/tests/test_history.py index 34510924a..291da8bb5 100644 --- a/csp/tests/test_history.py +++ b/csp/tests/test_history.py @@ -1,9 +1,9 @@ import numpy as np import os import psutil -import typing import unittest from datetime import datetime, timedelta +from typing import List import csp from csp import ts @@ -636,7 +636,7 @@ def n(x: csp.ts[float]): i += 1 def g(): - n(csp.unroll(csp.const.using(T=[int])(list(range(100000))))) + n(csp.unroll(csp.const.using(T=List[int])(list(range(100000))))) process = psutil.Process(os.getpid()) mem_before = process.memory_info().rss diff --git a/csp/tests/test_parsing.py b/csp/tests/test_parsing.py index 18ec0c6d5..2adc04ea9 100644 --- a/csp/tests/test_parsing.py +++ b/csp/tests/test_parsing.py @@ -47,21 +47,21 @@ def foo(x: ts[int]): with self.assertRaisesRegex(CspParseError, "Invalid use of 'with_shape'"): @csp.node - def foo(x: [str]): - __outputs__([ts[int]].with_shape(x=1)) + def foo(x: List[str]): + __outputs__(List[ts[int]].with_shape(x=1)) pass with self.assertRaisesRegex(CspParseError, "__outputs__ must all be named or be single output, cant be both"): @csp.node - def foo(x: [str]): + def foo(x: List[str]): __outputs__(ts[int], x=ts[bool]) pass with self.assertRaisesRegex(CspParseError, "__outputs__ single unnamed arg only"): @csp.node - def foo(x: [str]): + def foo(x: List[str]): __outputs__(ts[int], ts[bool]) pass @@ -70,7 +70,7 @@ def foo(x: [str]): ): @csp.node - def foo(x: [str]) -> Outputs(ts[int], ts[bool]): + def foo(x: List[str]) -> Outputs(ts[int], ts[bool]): pass with self.assertRaisesRegex( @@ -78,7 +78,7 @@ def foo(x: [str]) -> Outputs(ts[int], ts[bool]): ): @csp.node - def foo(x: [str]) -> Outputs(ts[int], x=ts[bool]): + def foo(x: List[str]) -> Outputs(ts[int], x=ts[bool]): pass with self.assertRaisesRegex( @@ -86,7 +86,7 @@ def foo(x: [str]) -> Outputs(ts[int], x=ts[bool]): ): @csp.node - def foo(x: [str]) -> Outputs(ts[int]): + def foo(x: List[str]) -> Outputs(ts[int]): __outputs__(ts[int]) pass @@ -96,7 +96,7 @@ def foo(x: [str]) -> Outputs(ts[int]): ): @csp.node - def foo(x: [str]): + def foo(x: List[str]): x = 1 __outputs__(ts[int]) @@ -1751,11 +1751,11 @@ def g_gen(): @csp.node def n_cont() -> ts[bool]: with csp.alarms(): - a: ts[[bool]] = csp.alarm([bool]) - b: ts[[[int]]] = csp.alarm([[int]]) - c: ts[{str: int}] = csp.alarm({str: int}) - d: ts[{str: [int]}] = csp.alarm({str: [int]}) # dict of lists - e: ts[[{str: bool}]] = csp.alarm([{str: bool}]) # list of dicts + a: ts[List[bool]] = csp.alarm(List[bool]) + b: ts[List[List[int]]] = csp.alarm(List[List[int]]) + c: ts[Dict[str, int]] = csp.alarm(Dict[str, int]) + d: ts[Dict[str : List[int]]] = csp.alarm(Dict[str, List[int]]) # dict of lists + e: ts[List[Dict[str, bool]]] = csp.alarm(List[Dict[str, bool]]) # list of dicts with csp.start(): csp.schedule_alarm(a, timedelta(seconds=1), [True]) diff --git a/csp/tests/test_profiler.py b/csp/tests/test_profiler.py index 5b8468557..80ad1e871 100644 --- a/csp/tests/test_profiler.py +++ b/csp/tests/test_profiler.py @@ -9,6 +9,7 @@ import unittest from datetime import date, datetime, time, timedelta from functools import reduce +from typing import List import csp import csp.stats as stats @@ -143,7 +144,7 @@ def graph1(): # From test_dynamic.py @csp.graph - def dyn(key: str, val: [str], key_ts: ts[DynData], scalar: str): + def dyn(key: str, val: List[str], key_ts: ts[DynData], scalar: str): csp.add_graph_output(f"{key}_key", csp.const(key)) csp.add_graph_output(f"{key}_val", csp.const(val)) csp.add_graph_output(f"{key}_ts", key_ts) @@ -154,7 +155,7 @@ def dyn(key: str, val: [str], key_ts: ts[DynData], scalar: str): def graph3(): keys = random_keys(list(string.ascii_uppercase), timedelta(seconds=1), True) csp.add_graph_output("keys", keys) - basket = gen_basket(keys, csp.null_ts([str])) + basket = gen_basket(keys, csp.null_ts(List[str])) csp.dynamic(basket, dyn, csp.snapkey(), csp.snap(keys), csp.attach(), "hello world!") with profiler.Profiler() as p: @@ -227,7 +228,7 @@ def test_file_output(self): max_times = df_node.groupby("Node Type").max().reset_index() self.assertEqual( round(prof_info.node_stats["cast_int_to_float"]["max_time"], 4), - round(float(max_times.loc[max_times["Node Type"] == "cast_int_to_float"]["Execution Time"]), 4), + round(float(max_times.loc[max_times["Node Type"] == "cast_int_to_float"]["Execution Time"].iloc[0]), 4), ) # Cleanup files diff --git a/docs/wiki/api-references/Base-Nodes-API.md b/docs/wiki/api-references/Base-Nodes-API.md index 2df3ec982..6fac0b2ee 100644 --- a/docs/wiki/api-references/Base-Nodes-API.md +++ b/docs/wiki/api-references/Base-Nodes-API.md @@ -174,6 +174,7 @@ csp.unroll(x: ts[['T']]) → ts['T'] Given a timeseries of a *list* of values, unroll will "unroll" the values in the list into a timeseries of the elements. `unroll` will ensure to preserve the order across all list ticks. Ticks will be unrolled in subsequent engine cycles. +For a detailed explanation of this behavior, see the documentation on [duplicate timestamps](Execution-Modes#handling-duplicate-timestamps). ## `csp.collect` diff --git a/docs/wiki/api-references/Input-Output-Adapters-API.md b/docs/wiki/api-references/Input-Output-Adapters-API.md index 7ed5976e7..edecae372 100644 --- a/docs/wiki/api-references/Input-Output-Adapters-API.md +++ b/docs/wiki/api-references/Input-Output-Adapters-API.md @@ -15,7 +15,6 @@ - [Publishing](#publishing) - [DBReader](#dbreader) - [TimeAccessor](#timeaccessor) -- [Slack](#slack) ## Kafka @@ -349,7 +348,3 @@ Both of these calls expect `typ` to be a `csp.Struct` type. `subscribe` is used to subscribe to a stream for the given symbol (symbol_column is required when creating DBReader) `subscribe_all` is used to retrieve all the data resulting from the request as a single timeseries. - -## Slack - -The Slack adapter allows for reading and writing of messages from the [Slack](https://slack.com) message platform using the [Slack Python SDK](https://slack.dev/python-slack-sdk/). diff --git a/docs/wiki/concepts/Common-Mistakes.md b/docs/wiki/concepts/Common-Mistakes.md index 6012e04fa..4958fbd85 100644 --- a/docs/wiki/concepts/Common-Mistakes.md +++ b/docs/wiki/concepts/Common-Mistakes.md @@ -74,7 +74,7 @@ from typing import List def next_movie_showing(show_times: ts[List[datetime]]) -> ts[datetime]: next_showing = None for time in show_times: - if time >= csp.now(): # list may include some shows today that have already past, so let's filter those out + if time >= datetime.now(): # list may include some shows today that have already past, so let's filter those out if next_showing is None or time < next_showing: next_showing = time diff --git a/docs/wiki/concepts/Execution-Modes.md b/docs/wiki/concepts/Execution-Modes.md index 5d288f8c0..0e06d223f 100644 --- a/docs/wiki/concepts/Execution-Modes.md +++ b/docs/wiki/concepts/Execution-Modes.md @@ -6,7 +6,7 @@ All inputs in simulation are driven off the provided timestamped data of its inp In realtime mode, the engine runs in wallclock time as of "now". Realtime engines can get data from realtime adapters which source data on separate threads and pass them through to the engine (ie think of activeMQ events happening on an activeMQ thread and being passed along to the engine in "realtime"). -Since engines can run in both simulated and realtime mode, users should **always** use **`csp.now()`** to get the current time in `csp.node`s. +Since engines can run in both simulated and realtime mode, users should **always** use **`csp.now()`** to get the current time in a `csp.node`. ## Table of Contents @@ -14,6 +14,7 @@ Since engines can run in both simulated and realtime mode, users should **always - [Simulation Mode](#simulation-mode) - [Realtime Mode](#realtime-mode) - [csp.PushMode](#csppushmode) +- [Handling Duplicate Timestamps](#handling-duplicate-timestamps) - [Realtime Group Event Synchronization](#realtime-group-event-synchronization) ## Simulation Mode @@ -50,6 +51,72 @@ When consuming data from input adapters there are three choices on how one can c | **BURST** | Simulation | all ticks from input source with duplicate timestamps (on the same timeseries) will tick once with a list of all values | | | Realtime | all ticks that occurred since previous engine cycle will tick once with a list of all the values | +## Handling duplicate timestamps + +In `csp`, there can be multiple engine cycles that occur at the same engine time. This is often the case when using nodes with internal alarms (e.g. [`csp.unroll`](Base-Nodes-API#cspunroll)) or using feedback edges ([`csp.feedback`](Feedback-and-Delayed-Edge#cspfeedback)). +If multiple events are scheduled at the same timestamp on a single time-series edge, they will be executed on separate cycles *in the order* they were scheduled. For example, consider the code snippet below: + +```python +import csp +from csp import ts +from datetime import datetime, timedelta + +@csp.node +def ticks_n_times(x: ts[int], n: int) -> ts[int]: + # Ticks out a value n times, incrementing it each time + with csp.alarms(): + alarm = csp.alarm(int) + + if csp.ticked(x): + for i in range(n): + csp.schedule_alarm(alarm, timedelta(), x+i) + + if csp.ticked(alarm): + return alarm + +@csp.graph +def duplicate_timestamps(): + v = csp.const(1) + csp.print('ticks_once', ticks_n_times(v, 1)) + csp.print('ticks_twice', ticks_n_times(v, 2)) + csp.print('ticks_thrice', ticks_n_times(v, 3)) + +csp.run(duplicate_timestamps, starttime=datetime(2020,1,1)) +``` + +When we run this graph, the output is: + +```raw +2020-01-01 00:00:00 ticks_once:1 +2020-01-01 00:00:00 ticks_twice:1 +2020-01-01 00:00:00 ticks_thrice:1 +2020-01-01 00:00:00 ticks_twice:2 +2020-01-01 00:00:00 ticks_thrice:2 +2020-01-01 00:00:00 ticks_thrice:3 +``` + +A real life example is when using `csp.unroll` to tick out a list of values on separate engine cycles. If we were to use `csp.sample` on the output, we would get the *first* value that is unrolled at each timestamp. Why? +The event that is scheduled on the sampling timer is its first (and only) event at that time; thus, it is executed on the first engine cycle, and samples the first unrolled value. + +```python +def sampling_unroll(): + u = csp.unroll(csp.const.using(T=[int])([1, 2, 3])) + s = csp.sample(csp.const(True), u) + csp.print('unrolled', u) + csp.print('sampled', s) + +csp.run(sampling_unroll, starttime=datetime(2020,1,1)) +``` + +Output: + +```raw +2020-01-01 00:00:00 unrolled:1 +2020-01-01 00:00:00 sampled:1 +2020-01-01 00:00:00 unrolled:2 +2020-01-01 00:00:00 unrolled:3 +``` + ## Realtime Group Event Synchronization The CSP framework supports properly synchronizing events across multiple timeseries that are sourced from the same realtime adapter. diff --git a/docs/wiki/dev-guides/Roadmap.md b/docs/wiki/dev-guides/Roadmap.md index 25bb34c4d..694336fb7 100644 --- a/docs/wiki/dev-guides/Roadmap.md +++ b/docs/wiki/dev-guides/Roadmap.md @@ -2,7 +2,6 @@ We do not have a formal roadmap, but we're happy to discuss features, improvemen Here are some high level items we hope to accomplish in the next few months: -- Support `msvc` compiler and full Windows support ([#109](https://github.com/Point72/csp/issues/109)) - Establish a better pattern for adapters ([#165](https://github.com/Point72/csp/discussions/165)) - Parallelization to improve runtime, for historical/offline distributions - Support for cross-process communication in realtime distributions @@ -10,6 +9,7 @@ Here are some high level items we hope to accomplish in the next few months: ## Adapters and Extensions - C++-based HTTP/SSE adapter +- C++-based Redis adapter - Add support for other graph viewers, including interactive / standalone / Jupyter ## Other Open Source Projects diff --git a/docs/wiki/how-tos/Write-Realtime-Input-Adapters.md b/docs/wiki/how-tos/Write-Realtime-Input-Adapters.md index 2f0195d5a..3576c2509 100644 --- a/docs/wiki/how-tos/Write-Realtime-Input-Adapters.md +++ b/docs/wiki/how-tos/Write-Realtime-Input-Adapters.md @@ -405,3 +405,18 @@ csp.run(my_graph, starttime=datetime.utcnow(), endtime=timedelta(seconds=10), re ``` Do note that realtime adapters will only run in realtime engines (note the `realtime=True` argument to `csp.run`). + +## Engine shutdown + +In case a pushing thread hits a terminal error, an exception can be passed to the main engine thread to shut down gracefully through a `shutdown_engine(exc: Exception)` method exposed by `PushInputAdapter`, `PushPullInputAdapter` and `AdapterManagerImpl`. + +For example: + +```python +def _run(self): + while self._running: + try: + requests.get(endpoint) # API call over a network, may fail + except Exception as exc: + self.shutdown_engine(exc) +``` diff --git a/pyproject.toml b/pyproject.toml index 511405e8e..fb02c2fb5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -79,7 +79,6 @@ develop = [ "httpx>=0.20,<1", # kafka "polars", # parquet "psutil", # test_engine/test_history - "slack-sdk>=3", # slack "sqlalchemy", # db "threadpoolctl", # test_random "tornado", # profiler, perspective, websocket @@ -108,7 +107,7 @@ symphony = [ "csp-adapter-symphony", ] slack = [ - "slack-sdk>=3", + "csp-adapter-slack", ] [tool.check-manifest]