diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 29f88b23..1b4c489d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -31,7 +31,7 @@ repos: ] - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.3.0 + rev: v1.4.1 hooks: - id: mypy args: [--strict, --ignore-missing-imports] diff --git a/docs/manual.md b/docs/manual.md index 2cff0005..a8de5cc2 100644 --- a/docs/manual.md +++ b/docs/manual.md @@ -28,8 +28,8 @@ Tagreader is intended to be easy to use, and present the same interface to the u - [Caching results](#caching-results) - [Time zones](#time-zones) - [Fetching metadata](#fetching-metadata) - - [get_units()](#get_units) - - [get_description()](#get_description) + - [get_units()](#getunits) + - [get_description()](#getdescription) - [Performing raw queries](#performing-raw-queries) # Requirements diff --git a/tagreader/cache.py b/tagreader/cache.py index 80278d60..3747c999 100644 --- a/tagreader/cache.py +++ b/tagreader/cache.py @@ -104,44 +104,44 @@ def delete_metadata(self, key: str) -> None: class BucketCache(BaseCache): + @staticmethod def _key_path( - self, tagname: str, - readtype: ReaderType, + read_type: ReaderType, ts: Union[int, timedelta], stepped: bool, - status: bool, - start_time: Optional[datetime] = None, - end_time: Optional[datetime] = None, + get_status: bool, + start: Optional[datetime] = None, + end: Optional[datetime] = None, ) -> str: """Return a string on the form - $tagname$readtype[$sample_time][$stepped][$status]$_start_time_end_time + $tagname$read_type[$sample_time][$stepped][$get_status]$_start_end tagname: safe tagname sample_time: integer value. Empty for RAW. stepped: "stepped" if value was read as stepped. Empty if not. - status: "status" if value contains status. Empty if not. - start_time: The start_time of the query that created the bucket. - end_time: The end_time of the query that created the bucket. + get_status: "status" if value contains status. Empty if not. + start: The start of the query that created the bucket. + end: The end of the query that created the bucket. """ tagname = safe_tagname(tagname) ts = ( int(ts.total_seconds()) - if readtype != ReaderType.RAW and isinstance(ts, timedelta) + if read_type != ReaderType.RAW and isinstance(ts, timedelta) else ts ) timespan = "" - if start_time is not None: - start_time_epoch = timestamp_to_epoch(start_time) - end_time_epoch = timestamp_to_epoch(end_time) if end_time else end_time - timespan = f"$_{start_time_epoch}_{end_time_epoch}" + if start is not None: + start_epoch = timestamp_to_epoch(start) + end_epoch = timestamp_to_epoch(end) if end else end + timespan = f"$_{start_epoch}_{end_epoch}" keyval = ( f"${tagname}" - f"${readtype.name}" - f"{(ts is not None and readtype != ReaderType.RAW) * f'$s{ts}'}" + f"${read_type.name}" + f"{(ts is not None and read_type != ReaderType.RAW) * f'$s{ts}'}" f"{stepped * '$stepped'}" - f"{status * '$status'}" + f"{get_status * '$get_status'}" f"{timespan}" ) return keyval @@ -150,42 +150,42 @@ def store( self, df: pd.DataFrame, tagname: str, - readtype: ReaderType, + read_type: ReaderType, ts: timedelta, stepped: bool, - status: bool, - start_time: datetime, - end_time: datetime, + get_status: bool, + start: datetime, + end: datetime, ) -> None: if df.empty: return intersecting = self.get_intersecting_datasets( tagname=tagname, - readtype=readtype, + read_type=read_type, ts=ts, stepped=stepped, - status=status, - start_time=start_time, - end_time=end_time, + get_status=get_status, + start=start, + end=end, ) if len(intersecting) > 0: for dataset in intersecting: this_start, this_end = self._get_intervals_from_dataset_name(dataset) - start_time = min(start_time, this_start if this_start else start_time) - end_time = max(end_time, this_end if this_end else end_time) + start = min(start, this_start if this_start else start) + end = max(end, this_end if this_end else end) df2 = self.get(dataset) if df2 is not None: df = pd.concat([df, df2], axis=0) self.delete(dataset) key = self._key_path( tagname=tagname, - readtype=readtype, + read_type=read_type, ts=ts, stepped=stepped, - status=status, - start_time=start_time, - end_time=end_time, + get_status=get_status, + start=start, + end=end, ) self.put(key=key, value=clean_dataframe(df)) @@ -195,21 +195,21 @@ def _get_intervals_from_dataset_name( ) -> Tuple[datetime, datetime]: name_with_times = name.split("$")[-1] if not name_with_times.count("_") == 2: - return (None, None) # type: ignore[return-value] - _, start_time_epoch, end_time_epoch = name_with_times.split("_") - start_time = pd.to_datetime(int(start_time_epoch), unit="s").tz_localize("UTC") - end_time = pd.to_datetime(int(end_time_epoch), unit="s").tz_localize("UTC") - return start_time, end_time + return None, None # type: ignore[return-value] + _, start_epoch, end_epoch = name_with_times.split("_") + start = pd.to_datetime(int(start_epoch), unit="s").tz_localize("UTC") + end = pd.to_datetime(int(end_epoch), unit="s").tz_localize("UTC") + return start, end def get_intersecting_datasets( self, tagname: str, - readtype: ReaderType, + read_type: ReaderType, ts: Union[int, timedelta], stepped: bool, - status: bool, - start_time: datetime, - end_time: datetime, + get_status: bool, + start: datetime, + end: datetime, ) -> List[str]: if not len(self) > 0: return [] @@ -217,41 +217,39 @@ def get_intersecting_datasets( for dataset in self.iterkeys(): target_key = self._key_path( tagname=tagname, - readtype=readtype, - start_time=None, - end_time=None, + read_type=read_type, + start=None, + end=None, ts=ts, stepped=stepped, - status=status, + get_status=get_status, ) if target_key in dataset: - start_time_ds, end_time_ds = self._get_intervals_from_dataset_name( - dataset - ) - if end_time_ds >= start_time and end_time >= start_time_ds: + start_ds, end_ds = self._get_intervals_from_dataset_name(dataset) + if end_ds >= start and end >= start_ds: intersecting_datasets.append(dataset) return intersecting_datasets def get_missing_intervals( self, tagname: str, - readtype: ReaderType, + read_type: ReaderType, ts: Union[int, timedelta], stepped: bool, - status: bool, - start_time: datetime, - end_time: datetime, + get_status: bool, + start: datetime, + end: datetime, ) -> List[Tuple[datetime, datetime]]: datasets = self.get_intersecting_datasets( tagname=tagname, - readtype=readtype, + read_type=read_type, ts=ts, stepped=stepped, - status=status, - start_time=start_time, - end_time=end_time, + get_status=get_status, + start=start, + end=end, ) - missing_intervals = [(start_time, end_time)] + missing_intervals = [(start, end)] for dataset in datasets: b = self._get_intervals_from_dataset_name(dataset) for _ in range(0, len(missing_intervals)): @@ -277,12 +275,12 @@ def get_missing_intervals( def fetch( self, tagname: str, - readtype: ReaderType, + read_type: ReaderType, ts: Union[int, timedelta], stepped: bool, - status: bool, - start_time: datetime, - end_time: datetime, + get_status: bool, + start: datetime, + end: datetime, ) -> pd.DataFrame: df = pd.DataFrame() if not len(self) > 0: @@ -293,18 +291,18 @@ def fetch( datasets = self.get_intersecting_datasets( tagname=tagname, - readtype=readtype, + read_type=read_type, ts=ts, stepped=stepped, - status=status, - start_time=start_time, - end_time=end_time, + get_status=get_status, + start=start, + end=end, ) for dataset in datasets: df2 = self.get(dataset) if df2 is not None: - df = pd.concat([df, df2.loc[start_time:end_time]], axis=0) # type: ignore[call-overload, misc] + df = pd.concat([df, df2.loc[start:end]], axis=0) # type: ignore[call-overload, misc] return clean_dataframe(df) @@ -313,7 +311,7 @@ class SmartCache(BaseCache): @staticmethod def key_path( df: Union[str, pd.DataFrame], - readtype: ReaderType, + read_type: ReaderType, ts: Optional[Union[int, timedelta]] = None, ) -> str: """Return a string on the form @@ -323,7 +321,7 @@ def key_path( name = str(list(df)[0]) if isinstance(df, pd.DataFrame) else df name = safe_tagname(name) ts = int(ts.total_seconds()) if isinstance(ts, timedelta) else ts - if readtype != ReaderType.RAW: + if read_type != ReaderType.RAW: if ts is None: # Determine sample time by reading interval between first two # samples of dataframe. @@ -333,17 +331,17 @@ def key_path( raise TypeError else: interval = int(ts) - return f"{readtype.name}$s{interval}${name}" + return f"{read_type.name}$s{interval}${name}" else: - return f"{readtype.name}${name}" + return f"{read_type.name}${name}" def store( self, df: pd.DataFrame, - readtype: ReaderType, + read_type: ReaderType, ts: Optional[Union[int, timedelta]] = None, ) -> None: - key = self.key_path(df=df, readtype=readtype, ts=ts) + key = self.key_path(df=df, read_type=read_type, ts=ts) if df.empty: return # Weirdness ensues when using empty df in select statement below if key in self: @@ -361,17 +359,17 @@ def store( def fetch( self, tagname: str, - readtype: ReaderType, + read_type: ReaderType, ts: Optional[Union[int, timedelta]] = None, - start_time: Optional[datetime] = None, - stop_time: Optional[datetime] = None, + start: Optional[datetime] = None, + end: Optional[datetime] = None, ) -> pd.DataFrame: - key = self.key_path(df=tagname, readtype=readtype, ts=ts) + key = self.key_path(df=tagname, read_type=read_type, ts=ts) df = cast(Optional[pd.DataFrame], self.get(key=key)) if df is None: return pd.DataFrame() - if start_time is not None: - df = df.loc[df.index >= start_time] - if stop_time is not None: - df = df.loc[df.index <= stop_time] + if start is not None: + df = df.loc[df.index >= start] + if end is not None: + df = df.loc[df.index <= end] return df diff --git a/tagreader/clients.py b/tagreader/clients.py index aa858364..aaceeb9c 100644 --- a/tagreader/clients.py +++ b/tagreader/clients.py @@ -78,17 +78,17 @@ def list_sources( def get_missing_intervals( df: pd.DataFrame, - start_time: datetime, - stop_time: datetime, + start: datetime, + end: datetime, ts: Optional[timedelta], read_type: ReaderType, ): if ( read_type == ReaderType.RAW ): # Fixme: How to check for completeness for RAW data? - return [[start_time, stop_time]] + return [[start, end]] seconds = int(ts.total_seconds()) - tvec = pd.date_range(start=start_time, end=stop_time, freq=f"{seconds}s") + tvec = pd.date_range(start=start, end=end, freq=f"{seconds}s") if len(df) == len(tvec): # Short-circuit if dataset is complete return [] values_in_df = tvec.isin(df.index) @@ -103,29 +103,29 @@ def get_missing_intervals( ) ) # Should be unnecessary to fetch overlapping points since get_next_timeslice - # ensures start <= t <= stop + # ensures start <= t <= end # missingintervals.append((pd.Timestamp(tvec[seq[0]]), # pd.Timestamp(tvec[min(seq[-1]+1, len(tvec)-1)]))) return missing_intervals def get_next_timeslice( - start_time: datetime, - stop_time: datetime, + start: datetime, + end: datetime, ts: Optional[timedelta], max_steps: Optional[int], ) -> Tuple[datetime, datetime]: if max_steps is None: - calc_stop_time = stop_time + calc_end = end else: - calc_stop_time = start_time + ts * max_steps - calc_stop_time = min(stop_time, calc_stop_time) + calc_end = start + ts * max_steps + calc_end = min(end, calc_end) # Ensure we include the last data point. # Discrepancies between Aspen and Pi for +ts # Discrepancies between IMS and cache for e.g. ts. - # if calc_stop_time == stop_time: - # calc_stop_time += ts / 2 - return start_time, calc_stop_time + # if calc_end == end: + # calc_end += ts / 2 + return start, calc_end def get_server_address_aspen(datasource: str) -> Optional[Tuple[str, int]]: @@ -149,7 +149,7 @@ def get_server_address_aspen(datasource: str) -> Optional[Tuple[str, int]]: ) regkey_implemented_categories = winreg.OpenKeyEx(regkey, "Implemented Categories") - _, aspen_UUID = find_registry_key_from_name( + _, aspen_uuid = find_registry_key_from_name( regkey_implemented_categories, "Aspen SQLplus services" ) @@ -159,7 +159,7 @@ def get_server_address_aspen(datasource: str) -> Optional[Tuple[str, int]]: ) try: - reg_site_key = winreg.OpenKey(reg_adsa, datasource + "\\" + aspen_UUID) + reg_site_key = winreg.OpenKey(reg_adsa, datasource + "\\" + aspen_uuid) host = winreg.QueryValueEx(reg_site_key, "Host")[0] port = int(winreg.QueryValueEx(reg_site_key, "Port")[0]) return host, port @@ -365,8 +365,8 @@ def _get_metadata(self, tag: str): def _read_single_tag( self, tag: str, - start_time: Optional[datetime], - stop_time: Optional[datetime], + start: Optional[datetime], + end: Optional[datetime], ts: timedelta, read_type: ReaderType, get_status: bool, @@ -376,8 +376,8 @@ def _read_single_tag( metadata = self._get_metadata(tag) df = self.handler.read_tag( tag=tag, - start_time=start_time, - stop_time=stop_time, + start=start, + end=end, sample_time=ts, read_type=read_type, metadata=metadata, @@ -385,7 +385,7 @@ def _read_single_tag( ) else: stepped = False - missing_intervals = [(start_time, stop_time)] + missing_intervals = [(start, end)] df = pd.DataFrame() if ( @@ -394,19 +394,19 @@ def _read_single_tag( and not get_status ): time_slice = get_next_timeslice( - start_time=start_time, stop_time=stop_time, ts=ts, max_steps=None + start=start, end=end, ts=ts, max_steps=None ) df = cache.fetch( tagname=tag, - readtype=read_type, + read_type=read_type, ts=ts, - start_time=time_slice[0], - end_time=time_slice[1], + start=time_slice[0], + end=time_slice[1], ) missing_intervals = get_missing_intervals( df=df, - start_time=start_time, - stop_time=stop_time, + start=start, + end=end, ts=ts, read_type=read_type, ) @@ -415,33 +415,33 @@ def _read_single_tag( elif isinstance(cache, BucketCache): df = cache.fetch( tagname=tag, - readtype=read_type, + read_type=read_type, ts=ts, stepped=stepped, - status=get_status, - starttime=start_time, - endtime=stop_time, + get_status=get_status, + start=start, + end=end, ) missing_intervals = cache.get_missing_intervals( tagname=tag, - readtype=read_type, + read_type=read_type, ts=ts, stepped=stepped, - status=get_status, - start_time=start_time, - end_time=stop_time, + get_status=get_status, + start=start, + end=end, ) if not missing_intervals: return df.tz_convert(self.tz).sort_index() metadata = self._get_metadata(tag) frames = [df] - for start, stop in missing_intervals: + for start, end in missing_intervals: while True: df = self.handler.read_tag( tag=tag, - start_time=start, - stop_time=stop, + start=start, + end=end, sample_time=ts, read_type=read_type, metadata=metadata, @@ -457,16 +457,16 @@ def _read_single_tag( ] and not get_status ): - cache.store(df=df, readtype=read_type, ts=ts) + cache.store(df=df, read_type=read_type, ts=ts) frames.append(df) if len(df) < self.handler._max_rows: break start = df.index[-1] # if read_type != ReaderType.RAW: # time_slice = [start, start] - # while time_slice[1] < stop: + # while time_slice[1] < end: # time_slice = get_next_timeslice( - # time_slice[1], stop, ts, self.handler._max_rows + # time_slice[1], end, ts, self.handler._max_rows # ) # df = self.handler.read_tag( # tag, time_slice[0], time_slice[1], ts, read_type, metadata @@ -528,6 +528,8 @@ def read_tags( read_type: ReaderType = ReaderType.INT, get_status: bool = False, ): + start = start_time + end = stop_time logger.warn( ( "This function has been renamed to read() and is deprecated. " @@ -536,8 +538,8 @@ def read_tags( ) return self.read( tags=tags, - start_time=start_time, - end_time=stop_time, + start_time=start, + end_time=end, ts=ts, read_type=read_type, get_status=get_status, @@ -567,6 +569,8 @@ def read( Values for ReaderType.* that should work for all handlers are: INT, RAW, MIN, MAX, RNG, AVG, VAR, STD and SNAPSHOT """ + start = start_time + end = end_time if isinstance(tags, str): tags = [tags] if isinstance(read_type, str): @@ -574,7 +578,7 @@ def read( read_type = getattr(ReaderType, read_type) except AttributeError: ValueError( - "readtype needs to be of type ReaderType.* or a legal value. Please refer to the docstring." + "read_type needs to be of type ReaderType.* or a legal value. Please refer to the docstring." ) if read_type in [ReaderType.RAW, ReaderType.SNAPSHOT] and len(tags) > 1: raise RuntimeError( @@ -585,17 +589,17 @@ def read( if isinstance(tags, str): tags = [tags] - if start_time is None: - start_time = NONE_START_TIME - elif isinstance(start_time, (str, pd.Timestamp)): + if start is None: + start = NONE_START_TIME + elif isinstance(start, (str, pd.Timestamp)): try: - start_time = convert_to_pydatetime(start_time) + start = convert_to_pydatetime(start) except ValueError: - start_time = convert_to_pydatetime(start_time) - if end_time is None: - end_time = datetime.utcnow() - elif isinstance(end_time, (str, pd.Timestamp)): - end_time = convert_to_pydatetime(end_time) + start = convert_to_pydatetime(start) + if end is None: + end = datetime.utcnow() + elif isinstance(end, (str, pd.Timestamp)): + end = convert_to_pydatetime(end) if isinstance(ts, pd.Timedelta): ts = ts.to_pytimedelta() @@ -608,14 +612,14 @@ def read( ) if read_type != ReaderType.SNAPSHOT: - start_time = ensure_datetime_with_tz(start_time, tz=self.tz) - if end_time: - end_time = ensure_datetime_with_tz(end_time, tz=self.tz) + start = ensure_datetime_with_tz(start, tz=self.tz) + if end: + end = ensure_datetime_with_tz(end, tz=self.tz) - oldtags = tags + old_tags = tags tags = list(dict.fromkeys(tags)) - if len(oldtags) > len(tags): - duplicates = set([x for n, x in enumerate(oldtags) if x in oldtags[:n]]) + if len(old_tags) > len(tags): + duplicates = set([x for n, x in enumerate(old_tags) if x in old_tags[:n]]) logger.warning( f"Duplicate tags found, removed duplicates: {', '.join(duplicates)}" ) @@ -625,8 +629,8 @@ def read( results.append( self._read_single_tag( tag=tag, - start_time=start_time, - stop_time=end_time, + start=start, + end=end, ts=ts, read_type=read_type, get_status=get_status, diff --git a/tagreader/odbc_handlers.py b/tagreader/odbc_handlers.py index 0bc4feaf..9695ceca 100644 --- a/tagreader/odbc_handlers.py +++ b/tagreader/odbc_handlers.py @@ -88,8 +88,8 @@ def generate_connection_string(self) -> str: def generate_read_query( tag: str, mapdef: Optional[Dict[str, str]], - start_time: datetime, - stop_time: datetime, + start: datetime, + end: datetime, sample_time: Optional[timedelta], read_type: ReaderType, get_status: bool = False, @@ -112,8 +112,8 @@ def generate_read_query( if get_status and read_type == ReaderType.SNAPSHOT: raise NotImplementedError - if read_type == ReaderType.SNAPSHOT and stop_time is not None: - stop_time = None + if read_type == ReaderType.SNAPSHOT and end is not None: + end = None logger.warning( "End time is not supported for Aspen ODBC connection using 'SNAPSHOT'." "Try the web API 'piwebapi' instead." @@ -121,16 +121,16 @@ def generate_read_query( seconds = 0 if read_type != ReaderType.SNAPSHOT: - start_time = start_time.astimezone(pytz.UTC) - if stop_time: - stop_time = stop_time.astimezone(pytz.UTC) + start = start.astimezone(pytz.UTC) + if end: + end = end.astimezone(pytz.UTC) seconds = int(sample_time.total_seconds()) if read_type == ReaderType.SAMPLED: seconds = 0 else: if seconds <= 0: raise NotImplementedError - # sample_time = (stop_time-start_time).totalseconds + # sample_time = (end-start).totalseconds timecast_format_query = "%Y-%m-%dT%H:%M:%SZ" # 05-jan-18 14:00:00 @@ -154,7 +154,8 @@ def generate_read_query( ReaderType.SNAPSHOT: '"' + str(tag) + '"', }.get(read_type, "aggregates") # For RAW: historyevent? - # Ref https://help.sap.com/saphelp_pco151/helpdata/en/4c/72e34ee631469ee10000000a15822d/content.htm?no_cache=true + # Ref: + # https://help.sap.com/saphelp_pco151/helpdata/en/4c/72e34ee631469ee10000000a15822d/content.htm?no_cache=true ts = "ts" if from_column == "aggregates": @@ -187,8 +188,8 @@ def generate_read_query( query.extend([f"FROM {from_column}"]) if ReaderType.SNAPSHOT != read_type: - start = start_time.strftime(timecast_format_query) - stop = stop_time.strftime(timecast_format_query) + start = start.strftime(timecast_format_query) + end = end.strftime(timecast_format_query) query.extend([f"WHERE name = {tag!r}"]) if mapdef: query.extend([f'AND FIELD_ID = FT({mapdef["MAP_HistoryValue"]!r})']) @@ -197,7 +198,7 @@ def generate_read_query( query.extend( [ f"AND (request = {request_num})", - f"AND (ts BETWEEN {start!r} AND {stop!r})", + f"AND (ts BETWEEN {start!r} AND {end!r})", "ORDER BY ts", ] ) @@ -357,8 +358,8 @@ def _get_tag_description(self, tag: str): def read_tag( self, tag: str, - start_time: datetime, - stop_time: datetime, + start: datetime, + end: datetime, sample_time: Optional[timedelta], read_type: ReaderType, metadata: Optional[Dict[str, str]], @@ -372,8 +373,8 @@ def read_tag( query = self.generate_read_query( tag=cleantag, mapdef=mapdef, - start_time=start_time, - stop_time=stop_time, + start=start, + end=end, sample_time=sample_time, read_type=read_type, get_status=get_status, @@ -459,8 +460,8 @@ def generate_search_query(tag: Optional[str], desc: Optional[str]): def generate_read_query( self, tag: str, - start_time: datetime, - stop_time: datetime, + start: datetime, + end: datetime, sample_time: Optional[timedelta], read_type: ReaderType, metadata: Optional[Dict[str, str]], @@ -476,8 +477,8 @@ def generate_read_query( ]: raise NotImplementedError - if read_type == ReaderType.SNAPSHOT and stop_time is not None: - stop_time = None + if read_type == ReaderType.SNAPSHOT and end is not None: + end = None logger.warning( "End time is not supported for PI ODBC connection using 'SNAPSHOT'." "Try the web API 'piwebapi' instead." @@ -485,16 +486,16 @@ def generate_read_query( seconds = 0 if read_type != ReaderType.SNAPSHOT: - start_time = start_time.astimezone(pytz.UTC) - if stop_time: - stop_time = stop_time.astimezone(pytz.UTC) + start = start.astimezone(pytz.UTC) + if end: + end = end.astimezone(pytz.UTC) seconds = int(sample_time.total_seconds()) if ReaderType.SAMPLED == read_type: seconds = 0 else: if seconds <= 0: pass # Fixme: Not implemented - # sample_time = (stop_time-start_time).totalseconds + # sample_time = (end-start).totalseconds timecast_format_query = "%d-%b-%y %H:%M:%S" # 05-jan-18 14:00:00 # timecast_format_output = "yyyy-MM-dd HH:mm:ss" @@ -527,7 +528,9 @@ def generate_read_query( else: query = ["SELECT CAST(value as FLOAT32)"] - # query.extend([f"AS value, FORMAT(time, '{timecast_format_output}') AS timestamp FROM {source} WHERE tag='{tag}'"]) + # query.extend( + # [f"AS value, FORMAT(time, '{timecast_format_output}') AS timestamp FROM {source} WHERE tag='{tag}'"] + # ) query.extend(["AS value,"]) if get_status: @@ -536,9 +539,9 @@ def generate_read_query( query.extend([f"time FROM {source} WHERE tag='{tag}'"]) # __utctime also works if ReaderType.SNAPSHOT != read_type: - start = start_time.strftime(timecast_format_query) - stop = stop_time.strftime(timecast_format_query) - query.extend([f"AND (time BETWEEN '{start}' AND '{stop}')"]) + start = start.strftime(timecast_format_query) + end = end.strftime(timecast_format_query) + query.extend([f"AND (time BETWEEN '{start}' AND '{end}')"]) if ReaderType.GOOD == read_type: query.extend(["AND questionable = FALSE"]) @@ -547,7 +550,7 @@ def generate_read_query( elif ReaderType.SHAPEPRESERVING == read_type: query.extend( [ - f"AND (intervalcount = {int((stop_time - start_time).total_seconds() / seconds)})" + f"AND (intervalcount = {int((end - start).total_seconds() / seconds)})" ] ) elif ReaderType.RAW == read_type: @@ -615,8 +618,8 @@ def _is_summary(read_type: ReaderType): def read_tag( self, tag: str, - start_time: datetime, - stop_time: datetime, + start: datetime, + end: datetime, sample_time: Optional[timedelta], read_type: ReaderType, metadata: Optional[Dict[str, str]], @@ -629,8 +632,8 @@ def read_tag( query = self.generate_read_query( tag=tag, - start_time=start_time, - stop_time=stop_time, + start=start, + end=end, sample_time=sample_time, read_type=read_type, get_status=get_status, diff --git a/tagreader/utils.py b/tagreader/utils.py index 4c4bb848..231256c3 100644 --- a/tagreader/utils.py +++ b/tagreader/utils.py @@ -91,11 +91,10 @@ def ensure_datetime_with_tz( def urljoin(*args) -> str: - """Joins components of URL. Ensures slashes are inserted or removed where + """ + Joins components of URL. Ensures slashes are inserted or removed where needed, and does not strip trailing slash of last element. - Arguments: - str Returns: str -- Generated URL """ diff --git a/tagreader/web_handlers.py b/tagreader/web_handlers.py index 50ec6a7e..8bdc403c 100644 --- a/tagreader/web_handlers.py +++ b/tagreader/web_handlers.py @@ -19,7 +19,7 @@ from tagreader.utils import ReaderType, is_mac, is_windows, urljoin -def get_verifySSL() -> Union[bool, str]: +def get_verify_ssl() -> Union[bool, str]: if is_windows() or is_mac(): return True return "/etc/ssl/certs/ca-bundle.trust.crt" @@ -56,7 +56,7 @@ def list_aspenone_sources( if verifySSL is False: urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) elif verifySSL is None: - verifySSL = get_verifySSL() + verifySSL = get_verify_ssl() url_ = urljoin(url, "DataSources") params = {"service": "ProcessData", "allQuotes": 1} @@ -84,7 +84,7 @@ def list_piwebapi_sources( if verifySSL is False: urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) elif verifySSL is None: - verifySSL = get_verifySSL() + verifySSL = get_verify_ssl() url_ = urljoin(url, "dataservers") res = requests.get(url_, auth=auth, verify=verifySSL) @@ -111,7 +111,7 @@ def __init__( self.session.auth = auth if auth is not None else get_auth_aspen() if verifySSL is False: urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) - self.session.verify = verifySSL if verifySSL is not None else get_verifySSL() + self.session.verify = verifySSL if verifySSL is not None else get_verify_ssl() def fetch(self, url, params: Optional[Union[str, Dict[str, str]]] = None) -> Dict: res = self.session.get(url, params=params) @@ -195,8 +195,8 @@ def generate_read_query( self, tagname: str, mapname: Optional[str], - start_time: datetime, - stop_time: datetime, + start: datetime, + end: datetime, sample_time: Optional[timedelta], read_type: ReaderType, metadata: Any, @@ -226,14 +226,14 @@ def generate_read_query( }.get(read_type, -1) if read_type == ReaderType.SNAPSHOT: - if stop_time is not None: + if end is not None: use_current = 0 - end_time = int(stop_time.timestamp()) * 1000 + end = int(end.timestamp()) * 1000 else: use_current = 1 - end_time = 0 + end = 0 - query = f'' + query = f'' else: query = '' @@ -249,8 +249,8 @@ def generate_read_query( else: query += ( "0" # History format: 0=Raw, 1=RecordAsString - f"{int(start_time.timestamp()) * 1000}" - f"{int(stop_time.timestamp()) * 1000}" + f"{int(start.timestamp()) * 1000}" + f"{int(end.timestamp()) * 1000}" f"{rt}" ) if read_type in [ReaderType.RAW, ReaderType.SHAPEPRESERVING]: @@ -443,15 +443,15 @@ def _get_tag_description(self, tag: str): data = self.fetch(url, params=query) try: desc = data["data"]["tags"][0]["attrData"][0]["samples"][0]["v"] - except Exception: + except KeyError: desc = "" return desc def read_tag( self, tag: str, - start_time: Optional[datetime], - stop_time: Optional[datetime], + start: Optional[datetime], + end: Optional[datetime], sample_time: Optional[timedelta], read_type: ReaderType, metadata: Optional[Dict[str, str]], @@ -478,20 +478,20 @@ def read_tag( # Actual and bestfit read types allow specifying maxpoints. # Aggregate reads limit to 10 000 points and issue a moredata-token. # TODO: May need to look into using this later - most likely more - # efficient than creating new query starting at previous stoptime. + # efficient than creating new query starting at previous end time. # Interpolated reads return error message if more than 100 000 points, # so we need to limit the range. Note -1 because INT normally includes # both start and end time. if read_type == ReaderType.INT: - stop_time = min(stop_time, start_time + sample_time * (self._max_rows - 1)) + end = min(end, start + sample_time * (self._max_rows - 1)) tagname, mapname = self.split_tagmap(tag) params = self.generate_read_query( tagname=tagname, mapname=mapname, - start_time=start_time, - stop_time=stop_time, + start=start, + end=end, sample_time=sample_time, read_type=read_type, metadata={}, @@ -669,8 +669,8 @@ def generate_search_query( def generate_read_query( self, tag: str, - start_time: datetime, - stop_time: datetime, + start: datetime, + end: datetime, sample_time: timedelta, read_type: ReaderType, metadata: Optional[Dict[str, str]], @@ -703,11 +703,11 @@ def generate_read_query( params = {} if read_type != ReaderType.SNAPSHOT: - params["startTime"] = self._time_to_UTC_string(start_time) - params["endTime"] = self._time_to_UTC_string(stop_time) + params["startTime"] = self._time_to_UTC_string(start) + params["endTime"] = self._time_to_UTC_string(end) params["timeZone"] = "UTC" - elif read_type == ReaderType.SNAPSHOT and stop_time is not None: - params["time"] = self._time_to_UTC_string(stop_time) + elif read_type == ReaderType.SNAPSHOT and end is not None: + params["time"] = self._time_to_UTC_string(end) params["timeZone"] = "UTC" summary_type = { @@ -782,7 +782,7 @@ def search( ret.append((item["Name"], description)) next_start = int(data["Links"]["Next"].split("=")[-1]) if int(data["Links"]["Last"].split("=")[-1]) >= next_start: - params["start"] = next_start + params["start"] = next_start # noqa else: done = True return ret @@ -861,8 +861,8 @@ def _is_summary(read_type: ReaderType) -> bool: def read_tag( self, tag: str, - start_time: Optional[datetime], - stop_time: Optional[datetime], + start: Optional[datetime], + end: Optional[datetime], sample_time: timedelta, read_type: ReaderType, metadata: Optional[Dict[str, str]], @@ -874,8 +874,8 @@ def read_tag( (url, params) = self.generate_read_query( tag=webid, - start_time=start_time, - stop_time=stop_time, + start=start, + end=end, sample_time=sample_time, read_type=read_type, metadata={}, @@ -949,7 +949,7 @@ def read_tag( # Correct weird bug in PI Web API where MAX timestamps end of interval while # all the other summaries stamp start of interval by shifting all timestamps # one interval down. - if read_type == ReaderType.MAX and df.index[0] > start_time: + if read_type == ReaderType.MAX and df.index[0] > start: df.index = df.index - sample_time if get_status: diff --git a/tests/extratests b/tests/extratests index 1e683be2..18b4bce1 160000 --- a/tests/extratests +++ b/tests/extratests @@ -1 +1 @@ -Subproject commit 1e683be273f0d463cf60939fb709b6d64a29b299 +Subproject commit 18b4bce194cc1c7c36e4187c90a5b95cd3735d52 diff --git a/tests/test_AspenHandlerODBC.py b/tests/test_AspenHandlerODBC.py index 04ce6c9b..597370b6 100644 --- a/tests/test_AspenHandlerODBC.py +++ b/tests/test_AspenHandlerODBC.py @@ -57,25 +57,25 @@ def test_generate_connection_string(aspen_handler: AspenHandlerODBC) -> None: ) def test_generate_tag_read_query(read_type_str: str) -> None: read_type = getattr(ReaderType, read_type_str) - starttime = ensure_datetime_with_tz(START_TIME) + start = ensure_datetime_with_tz(START_TIME) stoptime = ensure_datetime_with_tz(STOP_TIME) ts = timedelta(seconds=SAMPLE_TIME) if read_type == ReaderType.SNAPSHOT: res = AspenHandlerODBC.generate_read_query( - tag="thetag", + tag="the_tag", mapdef=None, - start_time=None, # type: ignore[arg-type] - stop_time=None, # type: ignore[arg-type] + start=None, # type: ignore[arg-type] + end=None, # type: ignore[arg-type] sample_time=None, read_type=read_type, ) else: res = AspenHandlerODBC.generate_read_query( - tag="thetag", + tag="the_tag", mapdef=None, - start_time=starttime, - stop_time=stoptime, + start=start, + end=stoptime, sample_time=ts, read_type=read_type, ) @@ -83,55 +83,55 @@ def test_generate_tag_read_query(read_type_str: str) -> None: expected = { "RAW": ( 'SELECT ISO8601(ts) AS "time", value AS "value" FROM history WHERE ' - "name = 'thetag' AND (request = 4) " + "name = 'the_tag' AND (request = 4) " "AND (ts BETWEEN '2018-01-17T15:00:00Z' AND '2018-01-17T16:00:00Z') " "ORDER BY ts" ), "INT": ( 'SELECT ISO8601(ts) AS "time", value AS "value" FROM history WHERE ' - "name = 'thetag' AND (period = 600) AND (request = 7) " + "name = 'the_tag' AND (period = 600) AND (request = 7) " "AND (ts BETWEEN '2018-01-17T15:00:00Z' AND '2018-01-17T16:00:00Z') " "ORDER BY ts" ), "MIN": ( 'SELECT ISO8601(ts_start) AS "time", min AS "value" FROM aggregates WHERE ' - "name = 'thetag' AND (period = 600) AND (request = 1) " + "name = 'the_tag' AND (period = 600) AND (request = 1) " "AND (ts BETWEEN '2018-01-17T15:00:00Z' AND '2018-01-17T16:00:00Z') " "ORDER BY ts" ), "MAX": ( 'SELECT ISO8601(ts_start) AS "time", max AS "value" FROM aggregates WHERE ' - "name = 'thetag' AND (period = 600) AND (request = 1) " + "name = 'the_tag' AND (period = 600) AND (request = 1) " "AND (ts BETWEEN '2018-01-17T15:00:00Z' AND '2018-01-17T16:00:00Z') " "ORDER BY ts" ), "RNG": ( 'SELECT ISO8601(ts_start) AS "time", rng AS "value" FROM aggregates WHERE ' - "name = 'thetag' AND (period = 600) AND (request = 1) " + "name = 'the_tag' AND (period = 600) AND (request = 1) " "AND (ts BETWEEN '2018-01-17T15:00:00Z' AND '2018-01-17T16:00:00Z') " "ORDER BY ts" ), "AVG": ( 'SELECT ISO8601(ts_start) AS "time", avg AS "value" FROM aggregates WHERE ' - "name = 'thetag' AND (period = 600) AND (request = 1) " + "name = 'the_tag' AND (period = 600) AND (request = 1) " "AND (ts BETWEEN '2018-01-17T15:00:00Z' AND '2018-01-17T16:00:00Z') " "ORDER BY ts" ), "STD": ( 'SELECT ISO8601(ts_start) AS "time", std AS "value" FROM aggregates WHERE ' - "name = 'thetag' AND (period = 600) AND (request = 1) " + "name = 'the_tag' AND (period = 600) AND (request = 1) " "AND (ts BETWEEN '2018-01-17T15:00:00Z' AND '2018-01-17T16:00:00Z') " "ORDER BY ts" ), "VAR": ( 'SELECT ISO8601(ts_start) AS "time", var AS "value" FROM aggregates WHERE ' - "name = 'thetag' AND (period = 600) AND (request = 1) " + "name = 'the_tag' AND (period = 600) AND (request = 1) " "AND (ts BETWEEN '2018-01-17T15:00:00Z' AND '2018-01-17T16:00:00Z') " "ORDER BY ts" ), "SNAPSHOT": ( 'SELECT ISO8601(IP_INPUT_TIME) AS "time", IP_INPUT_VALUE AS "value" ' - 'FROM "thetag"' + 'FROM "the_tag"' ), } @@ -162,26 +162,26 @@ def test_generate_tag_read_query(read_type_str: str) -> None: ) def test_generate_tag_read_query_with_status(read_type_str: str) -> None: read_type = getattr(ReaderType, read_type_str) - starttime = ensure_datetime_with_tz(START_TIME) - endtime = ensure_datetime_with_tz(STOP_TIME) + start = ensure_datetime_with_tz(START_TIME) + end = ensure_datetime_with_tz(STOP_TIME) ts = timedelta(seconds=SAMPLE_TIME) if read_type == ReaderType.SNAPSHOT: res = AspenHandlerODBC.generate_read_query( - tag="thetag", + tag="the_tag", mapdef=None, - start_time=None, # type: ignore[arg-type] - stop_time=None, # type: ignore[arg-type] + start=None, # type: ignore[arg-type] + end=None, # type: ignore[arg-type] sample_time=None, read_type=read_type, get_status=True, ) else: res = AspenHandlerODBC.generate_read_query( - tag="thetag", + tag="the_tag", mapdef=None, - start_time=starttime, - stop_time=endtime, + start=start, + end=end, sample_time=ts, read_type=read_type, get_status=True, @@ -191,56 +191,56 @@ def test_generate_tag_read_query_with_status(read_type_str: str) -> None: "RAW": ( 'SELECT ISO8601(ts) AS "time", value AS "value" ' ', status AS "status" FROM history WHERE ' - "name = 'thetag' AND (request = 4) " + "name = 'the_tag' AND (request = 4) " "AND (ts BETWEEN '2018-01-17T15:00:00Z' AND '2018-01-17T16:00:00Z') " "ORDER BY ts" ), "INT": ( 'SELECT ISO8601(ts) AS "time", value AS "value" ' ', status AS "status" FROM history WHERE ' - "name = 'thetag' AND (period = 600) AND (request = 7) " + "name = 'the_tag' AND (period = 600) AND (request = 7) " "AND (ts BETWEEN '2018-01-17T15:00:00Z' AND '2018-01-17T16:00:00Z') " "ORDER BY ts" ), "MIN": ( 'SELECT ISO8601(ts_start) AS "time", min AS "value" ' ', status AS "status" FROM aggregates WHERE ' - "name = 'thetag' AND (period = 600) AND (request = 1) " + "name = 'the_tag' AND (period = 600) AND (request = 1) " "AND (ts BETWEEN '2018-01-17T15:00:00Z' AND '2018-01-17T16:00:00Z') " "ORDER BY ts" ), "MAX": ( 'SELECT ISO8601(ts_start) AS "time", max AS "value" ' ', status AS "status" FROM aggregates WHERE ' - "name = 'thetag' AND (period = 600) AND (request = 1) " + "name = 'the_tag' AND (period = 600) AND (request = 1) " "AND (ts BETWEEN '2018-01-17T15:00:00Z' AND '2018-01-17T16:00:00Z') " "ORDER BY ts" ), "RNG": ( 'SELECT ISO8601(ts_start) AS "time", rng AS "value" ' ', status AS "status" FROM aggregates WHERE ' - "name = 'thetag' AND (period = 600) AND (request = 1) " + "name = 'the_tag' AND (period = 600) AND (request = 1) " "AND (ts BETWEEN '2018-01-17T15:00:00Z' AND '2018-01-17T16:00:00Z') " "ORDER BY ts" ), "AVG": ( 'SELECT ISO8601(ts_start) AS "time", avg AS "value" ' ', status AS "status" FROM aggregates WHERE ' - "name = 'thetag' AND (period = 600) AND (request = 1) " + "name = 'the_tag' AND (period = 600) AND (request = 1) " "AND (ts BETWEEN '2018-01-17T15:00:00Z' AND '2018-01-17T16:00:00Z') " "ORDER BY ts" ), "STD": ( 'SELECT ISO8601(ts_start) AS "time", std AS "value" ' ', status AS "status" FROM aggregates WHERE ' - "name = 'thetag' AND (period = 600) AND (request = 1) " + "name = 'the_tag' AND (period = 600) AND (request = 1) " "AND (ts BETWEEN '2018-01-17T15:00:00Z' AND '2018-01-17T16:00:00Z') " "ORDER BY ts" ), "VAR": ( 'SELECT ISO8601(ts_start) AS "time", var AS "value" ' ', status AS "status" FROM aggregates WHERE ' - "name = 'thetag' AND (period = 600) AND (request = 1) " + "name = 'the_tag' AND (period = 600) AND (request = 1) " "AND (ts BETWEEN '2018-01-17T15:00:00Z' AND '2018-01-17T16:00:00Z') " "ORDER BY ts" ), @@ -249,23 +249,23 @@ def test_generate_tag_read_query_with_status(read_type_str: str) -> None: assert expected[read_type.name] == res -def test_genreadquery_long_sampletime() -> None: - starttime = ensure_datetime_with_tz(START_TIME) - stoptime = ensure_datetime_with_tz(STOP_TIME) +def test_generate_read_query_long_sample_time() -> None: + start = ensure_datetime_with_tz(START_TIME) + end = ensure_datetime_with_tz(STOP_TIME) ts = timedelta(seconds=86401) res = AspenHandlerODBC.generate_read_query( - tag="thetag", + tag="the_tag", mapdef=None, - start_time=starttime, - stop_time=stoptime, + start=start, + end=end, sample_time=ts, read_type=ReaderType.INT, ) expected = ( 'SELECT ISO8601(ts) AS "time", value AS "value" FROM history WHERE ' - "name = 'thetag' AND (period = 864010) AND (request = 7) " + "name = 'the_tag' AND (period = 864010) AND (request = 7) " "AND (ts BETWEEN '2018-01-17T15:00:00Z' AND '2018-01-17T16:00:00Z') " "ORDER BY ts" ) diff --git a/tests/test_AspenHandlerODBC_connect.py b/tests/test_AspenHandlerODBC_connect.py index 19e772bf..f72da0cb 100644 --- a/tests/test_AspenHandlerODBC_connect.py +++ b/tests/test_AspenHandlerODBC_connect.py @@ -14,9 +14,9 @@ from tagreader.odbc_handlers import list_aspen_sources -is_GITHUBACTION = "GITHUB_ACTION" in os.environ +is_GITHUB_ACTIONS = "GITHUB_ACTION" in os.environ -if is_GITHUBACTION: +if is_GITHUB_ACTIONS: pytest.skip( "All tests in module require connection to Aspen server", allow_module_level=True, @@ -29,7 +29,7 @@ @pytest.fixture # type: ignore[misc] -def Client() -> Generator[IMSClient, None, None]: +def client() -> Generator[IMSClient, None, None]: c = IMSClient(datasource=SOURCE, imstype="ip21") c.cache = None # type: ignore[assignment] c.connect() @@ -56,24 +56,13 @@ def test_list_sources_aspen() -> None: assert 3 <= len(r) <= 20 -# def test_read_unknown_tag(Client): -# with pytest.warns(UserWarning): -# df = Client.read(["sorandomitcantexist"], START_TIME, STOP_TIME) -# assert len(df.index) == 0 -# assert len(df.columns) == 0 -# with pytest.warns(UserWarning): -# df = Client.read(["ATCAI", "sorandomitcantexist"], START_TIME, STOP_TIME) -# assert len(df.index) > 0 -# assert len(df.columns) == 1 - - -def test_query_sql(Client: IMSClient) -> None: +def test_query_sql(client: IMSClient) -> None: query = "SELECT name, ip_description FROM ip_analogdef WHERE name LIKE 'ATC%'" - res = Client.query_sql(query=query, parse=True) + res = client.query_sql(query=query, parse=True) assert isinstance(res, pd.DataFrame) assert res.shape[0] >= 1 assert res.shape[1] == 2 - res = Client.query_sql(query=query, parse=False) + res = client.query_sql(query=query, parse=False) assert isinstance(res, pyodbc.Cursor) rows = res.fetchall() assert len(rows) >= 1 diff --git a/tests/test_AspenHandlerREST.py b/tests/test_AspenHandlerREST.py index 2d0e7e0c..6f11dc36 100644 --- a/tests/test_AspenHandlerREST.py +++ b/tests/test_AspenHandlerREST.py @@ -12,7 +12,7 @@ @pytest.fixture # type: ignore[misc] def aspen_handler() -> AspenHandlerWeb: # type: ignore[misc] h = AspenHandlerWeb( - datasource="sourcename", auth=None, options={}, url=None, verifySSL=None + datasource="source_name", auth=None, options={}, url=None, verifySSL=None ) yield h @@ -21,24 +21,24 @@ def test_generate_search_query() -> None: with pytest.raises(ValueError): AspenHandlerWeb.generate_search_query(tag="ATCAI", desc=None, datasource=None) assert AspenHandlerWeb.generate_search_query( - tag="ATCAI", datasource="sourcename", desc=None + tag="ATCAI", datasource="source_name", desc=None ) == { - "datasource": "sourcename", + "datasource": "source_name", "tag": "ATCAI", "max": 100, "getTrendable": 0, } assert AspenHandlerWeb.generate_search_query( - tag="ATC*", datasource="sourcename", desc=None + tag="ATC*", datasource="source_name", desc=None ) == { - "datasource": "sourcename", + "datasource": "source_name", "tag": "ATC*", "max": 100, "getTrendable": 0, } assert AspenHandlerWeb.generate_search_query( - tag="ATCAI", datasource="sourcename", desc=None - ) == {"datasource": "sourcename", "tag": "ATCAI", "max": 100, "getTrendable": 0} + tag="ATCAI", datasource="source_name", desc=None + ) == {"datasource": "source_name", "tag": "ATCAI", "max": 100, "getTrendable": 0} def test_split_tagmap() -> None: @@ -52,7 +52,7 @@ def test_split_tagmap() -> None: def test_generate_description_query(aspen_handler: AspenHandlerWeb) -> None: assert aspen_handler.generate_get_description_query("ATCAI") == ( '0' - "DSCR" + "DSCR" "0" ) @@ -60,7 +60,7 @@ def test_generate_description_query(aspen_handler: AspenHandlerWeb) -> None: def test_generate_unit_query(aspen_handler: AspenHandlerWeb) -> None: assert aspen_handler.generate_get_unit_query("ATCAI") == ( '0' - "Units" + "Units" "MAP_Units0" ) @@ -68,22 +68,22 @@ def test_generate_unit_query(aspen_handler: AspenHandlerWeb) -> None: def test_generate_map_query(aspen_handler: AspenHandlerWeb) -> None: assert aspen_handler.generate_get_map_query("ATCAI") == ( '0' - "" + "" ) @pytest.mark.parametrize( # type: ignore[misc] - ("read_type"), + "read_type", [ - ("RAW"), - ("SHAPEPRESERVING"), - ("INT"), - ("MIN"), - ("MAX"), - ("RNG"), - ("AVG"), - ("VAR"), - ("STD"), + "RAW", + "SHAPEPRESERVING", + "INT", + "MIN", + "MAX", + "RNG", + "AVG", + "VAR", + "STD", # pytest.param("COUNT", 0, marks=pytest.mark.skip), # pytest.param("GOOD", 0, marks=pytest.mark.skip), # pytest.param("BAD", 0, marks=pytest.mark.skip), @@ -95,14 +95,14 @@ def test_generate_map_query(aspen_handler: AspenHandlerWeb) -> None: def test_generate_tag_read_query( aspen_handler: AspenHandlerWeb, read_type: str ) -> None: - starttime = utils.ensure_datetime_with_tz("2020-06-24 17:00:00") - endtime = utils.ensure_datetime_with_tz("2020-06-24 18:00:00") + start = utils.ensure_datetime_with_tz("2020-06-24 17:00:00") + end = utils.ensure_datetime_with_tz("2020-06-24 18:00:00") ts = SAMPLE_TIME res = aspen_handler.generate_read_query( tagname="ATCAI", mapname=None, - start_time=starttime, - stop_time=endtime, + start=start, + end=end, sample_time=ts, read_type=getattr(ReaderType, read_type), metadata={}, @@ -110,96 +110,96 @@ def test_generate_tag_read_query( expected = { "RAW": ( '' - "" + "" "015930108000001593014400000" "01000000" ), "SHAPEPRESERVING": ( '' - "" + "" "015930108000001593014400000" "210000000" ), "INT": ( '' - "" + "" "015930108000001593014400000" "10

60

3
" ), "MIN": ( '' - "" + "" "015930108000001593014400000" "1400

60

30" "000
" ), "MAX": ( '' - "" + "" "015930108000001593014400000" "1300

60

30" "000
" ), "RNG": ( '' - "" + "" "015930108000001593014400000" "1500

60

30" "000
" ), "AVG": ( '' - "" + "" "015930108000001593014400000" "1200

60

30" "000
" ), "VAR": ( '' - "" + "" "015930108000001593014400000" "1800

60

30" "000
" ), "STD": ( '' - "" + "" "015930108000001593014400000" "1700

60

30" "000
" ), - "COUNT": ("whatever"), - "GOOD": ("whatever"), - "BAD": ("whatever"), - "TOTAL": ("whatever"), - "SUM": ("whatever"), + "COUNT": "whatever", + "GOOD": "whatever", + "BAD": "whatever", + "TOTAL": "whatever", + "SUM": "whatever", "SNAPSHOT": ( '' "" - "" + "" "10" ), } assert expected[read_type] == res -def test_genreadquery_long_sampletime(aspen_handler: AspenHandlerWeb) -> None: - starttime = utils.ensure_datetime_with_tz("2020-06-24 17:00:00") - endtime = utils.ensure_datetime_with_tz("2020-06-24 18:00:00") +def test_generate_read_query_long_sample_time(aspen_handler: AspenHandlerWeb) -> None: + start = utils.ensure_datetime_with_tz("2020-06-24 17:00:00") + end = utils.ensure_datetime_with_tz("2020-06-24 18:00:00") ts = timedelta(seconds=86401) res = aspen_handler.generate_read_query( tagname="ATCAI", mapname=None, - start_time=starttime, - stop_time=endtime, + start=start, + end=end, sample_time=ts, read_type=ReaderType.INT, metadata={}, ) expected = ( '' - "" + "" "015930108000001593014400000" "10

86401

3
" ) @@ -210,12 +210,13 @@ def test_genreadquery_long_sampletime(aspen_handler: AspenHandlerWeb) -> None: def test_generate_sql_query(aspen_handler: AspenHandlerWeb) -> None: res = aspen_handler.generate_sql_query( datasource=None, - connection_string="myconnstring", + connection_string="my_connection_stringing", query="myquery", max_rows=9999, ) expected = ( - '' "" + '' + "" ) assert res == expected res = aspen_handler.generate_sql_query( @@ -233,16 +234,16 @@ def test_generate_sql_query(aspen_handler: AspenHandlerWeb) -> None: assert res == expected -def test_initialize_connectionstring(aspen_handler: AspenHandlerWeb) -> None: +def test_initialize_connection_string(aspen_handler: AspenHandlerWeb) -> None: aspen_handler.initialize_connectionstring( - host="myhost", port=999, connection_string="myconnstr" + host="my_host", port=999, connection_string="my_connection_string" ) - assert aspen_handler._connection_string == "myconnstr" + assert aspen_handler._connection_string == "my_connection_string" aspen_handler.initialize_connectionstring( - host="myhost", + host="my_host", port=999, ) assert aspen_handler._connection_string == ( - "DRIVER=AspenTech SQLPlus;HOST=myhost;PORT=999;" + "DRIVER=AspenTech SQLPlus;HOST=my_host;PORT=999;" "CHARINT=N;CHARFLOAT=N;CHARTIME=N;CONVERTERRORS=N" ) diff --git a/tests/test_AspenHandlerREST_connect.py b/tests/test_AspenHandlerREST_connect.py index 4577ec39..ed7f8607 100644 --- a/tests/test_AspenHandlerREST_connect.py +++ b/tests/test_AspenHandlerREST_connect.py @@ -7,18 +7,22 @@ from tagreader.clients import IMSClient, list_sources from tagreader.utils import IMSType -from tagreader.web_handlers import AspenHandlerWeb, get_verifySSL, list_aspenone_sources +from tagreader.web_handlers import ( + AspenHandlerWeb, + get_verify_ssl, + list_aspenone_sources, +) -is_GITHUBACTION = "GITHUB_ACTION" in os.environ -is_AZUREPIPELINE = "TF_BUILD" in os.environ +is_GITHUB_ACTIONS = "GITHUB_ACTION" in os.environ +is_AZURE_PIPELINE = "TF_BUILD" in os.environ -if is_GITHUBACTION: +if is_GITHUB_ACTIONS: pytest.skip( "All tests in module require connection to Aspen server", allow_module_level=True, ) -VERIFY_SSL = False if is_AZUREPIPELINE else get_verifySSL() +VERIFY_SSL = False if is_AZURE_PIPELINE else get_verify_ssl() SOURCE = "SNA" TAG = "ATCAI" @@ -28,7 +32,7 @@ @pytest.fixture # type: ignore[misc] -def Client() -> Generator[IMSClient, None, None]: +def client() -> Generator[IMSClient, None, None]: c = IMSClient(datasource=SOURCE, imstype="aspenone", verifySSL=bool(VERIFY_SSL)) c.cache = None # type: ignore[assignment] c.connect() @@ -45,7 +49,7 @@ def aspen_handler() -> Generator[AspenHandlerWeb, None, None]: yield h -def test_list_all_aspenone_sources() -> None: +def test_list_all_aspen_one_sources() -> None: res = list_aspenone_sources(verifySSL=bool(VERIFY_SSL), auth=None, url=None) assert isinstance(res, list) assert len(res) >= 1 @@ -54,7 +58,7 @@ def test_list_all_aspenone_sources() -> None: assert 3 <= len(r) <= 20 -def test_list_sources_aspenone() -> None: +def test_list_sources_aspen_one() -> None: res = list_sources(imstype=IMSType.ASPENONE, verifySSL=bool(VERIFY_SSL)) assert isinstance(res, list) assert len(res) >= 1 @@ -65,57 +69,57 @@ def test_list_sources_aspenone() -> None: def test_verify_connection(aspen_handler: AspenHandlerWeb) -> None: assert aspen_handler.verify_connection(SOURCE) is True - assert aspen_handler.verify_connection("somerandomstuffhere") is False + assert aspen_handler.verify_connection("some_random_stuff_here") is False -def test_search_tag(Client: IMSClient) -> None: - res = Client.search(tag="sospecificitcannotpossiblyexist", desc=None) +def test_search_tag(client: IMSClient) -> None: + res = client.search(tag="so_specific_it_cannot_possibly_exist", desc=None) assert 0 == len(res) - res = Client.search(tag="ATCAI", desc=None) + res = client.search(tag="ATCAI", desc=None) assert res == [("ATCAI", "Sine Input")] - res = Client.search(tag="ATCM*", desc=None) + res = client.search(tag="ATCM*", desc=None) assert 5 <= len(res) [taglist, desclist] = zip(*res) assert "ATCMIXTIME1" in taglist assert desclist[taglist.index("ATCMIXTIME1")] == "MIX TANK 1 TIMER" - res = Client.search(tag="ATCM*", desc=None) + res = client.search(tag="ATCM*", desc=None) assert 5 <= len(res) - res = Client.search("AspenCalcTrigger1", desc=None) + res = client.search("AspenCalcTrigger1", desc=None) assert res == [("AspenCalcTrigger1", "")] - res = Client.search("ATC*", "Sine*") + res = client.search("ATC*", "Sine*") assert res == [("ATCAI", "Sine Input")] with pytest.raises(ValueError): - _ = Client.search(desc="Sine Input") # noqa + _ = client.search(desc="Sine Input") # noqa -def test_read_unknown_tag(Client: IMSClient) -> None: - df = Client.read( - tags=["sorandomitcantexist"], start_time=START_TIME, end_time=STOP_TIME +def test_read_unknown_tag(client: IMSClient) -> None: + df = client.read( + tags=["so_random_it_cant_exist"], start_time=START_TIME, end_time=STOP_TIME ) assert len(df.index) == 0 - df = Client.read( - tags=[TAG, "sorandomitcantexist"], start_time=START_TIME, end_time=STOP_TIME + df = client.read( + tags=[TAG, "so_random_it_cant_exist"], start_time=START_TIME, end_time=STOP_TIME ) assert len(df.index) > 0 assert len(df.columns == 1) -def test_query_sql(Client: IMSClient) -> None: +def test_query_sql(client: IMSClient) -> None: # The % causes WC_E_SYNTAX error in result. Tried "everything" but no go. # Leaving it for now. # query = "SELECT name, ip_description FROM ip_analogdef WHERE name LIKE 'ATC%'" query = "Select name, ip_description from ip_analogdef where name = 'atcai'" - res = Client.query_sql(query=query, parse=False) + res = client.query_sql(query=query, parse=False) print(res) assert isinstance(res, str) with raises(NotImplementedError): - res = Client.query_sql(query=query, parse=True) + res = client.query_sql(query=query, parse=True) assert isinstance(res, str) - Client.handler.initialize_connectionstring(host="SNA-IMS.statoil.net") + client.handler.initialize_connectionstring(host="SNA-IMS.statoil.net") query = "Select name, ip_description from ip_analogdef where name = 'atcai'" - res = Client.query_sql(query=query, parse=False) + res = client.query_sql(query=query, parse=False) print(res) assert isinstance(res, str) with raises(NotImplementedError): - res = Client.query_sql(query=query, parse=True) + res = client.query_sql(query=query, parse=True) assert isinstance(res, str) diff --git a/tests/test_PIHandlerODBC.py b/tests/test_PIHandlerODBC.py index 10730ee4..377de39c 100644 --- a/tests/test_PIHandlerODBC.py +++ b/tests/test_PIHandlerODBC.py @@ -23,7 +23,7 @@ @pytest.fixture(scope="module") # type: ignore[misc] -def PIHandler() -> Generator[PIHandlerODBC, None, None]: +def pi_handler() -> Generator[PIHandlerODBC, None, None]: yield PIHandlerODBC( host="thehostname.statoil.net", port=1234, @@ -31,8 +31,8 @@ def PIHandler() -> Generator[PIHandlerODBC, None, None]: ) -def test_generate_connection_string(PIHandler: PIHandlerODBC) -> None: - res = PIHandler.generate_connection_string() +def test_generate_connection_string(pi_handler: PIHandlerODBC) -> None: + res = pi_handler.generate_connection_string() expected = ( "DRIVER={PI ODBC Driver};Server=the_das_server;Trusted_Connection=Yes;" "Command Timeout=1800;Provider Type=PIOLEDB;" @@ -64,26 +64,26 @@ def test_generate_connection_string(PIHandler: PIHandlerODBC) -> None: "SNAPSHOT", ], ) -def test_generate_tag_read_query(PIHandler: PIHandlerODBC, read_type_str: str) -> None: +def test_generate_tag_read_query(pi_handler: PIHandlerODBC, read_type_str: str) -> None: read_type = getattr(ReaderType, read_type_str) - starttime = utils.ensure_datetime_with_tz(START_TIME) + start = utils.ensure_datetime_with_tz(START_TIME) stoptime = utils.ensure_datetime_with_tz(STOP_TIME) ts = SAMPLE_TIME if read_type == ReaderType.SNAPSHOT: - res = PIHandler.generate_read_query( + res = pi_handler.generate_read_query( tag="thetag", - start_time=None, # type: ignore[arg-type] - stop_time=None, # type: ignore[arg-type] + start=None, # type: ignore[arg-type] + end=None, # type: ignore[arg-type] sample_time=None, read_type=read_type, metadata={}, ) else: - res = PIHandler.generate_read_query( + res = pi_handler.generate_read_query( tag="thetag", - start_time=starttime, - stop_time=stoptime, + start=start, + end=stoptime, sample_time=ts, read_type=read_type, metadata={}, @@ -169,28 +169,28 @@ def test_generate_tag_read_query(PIHandler: PIHandlerODBC, read_type_str: str) - ], ) def test_generate_tag_read_query_with_status( - PIHandler: PIHandlerODBC, read_type_str: str + pi_handler: PIHandlerODBC, read_type_str: str ) -> None: read_type = getattr(ReaderType, read_type_str) - starttime = utils.ensure_datetime_with_tz(START_TIME) + start = utils.ensure_datetime_with_tz(START_TIME) stoptime = utils.ensure_datetime_with_tz(STOP_TIME) ts = SAMPLE_TIME if read_type == read_type.SNAPSHOT: - res = PIHandler.generate_read_query( + res = pi_handler.generate_read_query( tag="thetag", - start_time=None, # type: ignore[arg-type] - stop_time=None, # type: ignore[arg-type] + start=None, # type: ignore[arg-type] + end=None, # type: ignore[arg-type] sample_time=None, read_type=read_type, get_status=True, metadata={}, ) else: - res = PIHandler.generate_read_query( + res = pi_handler.generate_read_query( tag="thetag", - start_time=starttime, - stop_time=stoptime, + start=start, + end=stoptime, sample_time=ts, read_type=read_type, get_status=True, @@ -263,15 +263,15 @@ def test_generate_tag_read_query_with_status( assert expected[read_type.name] == res -def test_genreadquery_long_sampletime(PIHandler: PIHandlerODBC) -> None: - starttime = utils.ensure_datetime_with_tz(START_TIME) +def test_genreadquery_long_sampletime(pi_handler: PIHandlerODBC) -> None: + start = utils.ensure_datetime_with_tz(START_TIME) stoptime = utils.ensure_datetime_with_tz(STOP_TIME) ts = timedelta(seconds=86401) - res = PIHandler.generate_read_query( + res = pi_handler.generate_read_query( tag="thetag", - start_time=starttime, - stop_time=stoptime, + start=start, + end=stoptime, sample_time=ts, read_type=ReaderType.INT, metadata={}, diff --git a/tests/test_PIHandlerODBC_connect.py b/tests/test_PIHandlerODBC_connect.py index 3f5dd5f5..d47feb47 100644 --- a/tests/test_PIHandlerODBC_connect.py +++ b/tests/test_PIHandlerODBC_connect.py @@ -36,7 +36,7 @@ @pytest.fixture # type: ignore[misc] -def Client() -> Generator[IMSClient, None, None]: +def client() -> Generator[IMSClient, None, None]: c = IMSClient(datasource=SOURCE, imstype="pi") c.cache = None # type: ignore[assignment] c.connect() @@ -63,17 +63,17 @@ def test_list_sources_pi() -> None: assert 3 <= len(r) -def test_search_tag(Client: IMSClient) -> None: - res = Client.search(tag="SINUSOID") +def test_search_tag(client: IMSClient) -> None: + res = client.search(tag="SINUSOID") assert 1 == len(res) - res = Client.search(tag="SIN*") + res = client.search(tag="SIN*") assert 3 <= len(res) [taglist, desclist] = zip(*res) assert "SINUSOIDU" in taglist assert desclist[taglist.index("SINUSOID")] == "12 Hour Sine Wave" - res = Client.search(tag=None, desc="12 Hour Sine Wave") + res = client.search(tag=None, desc="12 Hour Sine Wave") assert 1 <= len(res) - res = Client.search("SINUSOID", desc="*Sine*") + res = client.search("SINUSOID", desc="*Sine*") assert 1 <= len(res) @@ -99,16 +99,16 @@ def test_search_tag(Client: IMSClient) -> None: ("SNAPSHOT", 1), ], ) -def test_read(Client: IMSClient, read_type: str, size: int) -> None: +def test_read(client: IMSClient, read_type: str, size: int) -> None: if read_type == "SNAPSHOT": - df = Client.read( + df = client.read( tags=TAGS["Float32"], read_type=getattr(ReaderType, read_type), start_time=None, end_time=None, ) else: - df = Client.read( + df = client.read( tags=TAGS["Float32"], start_time=START_TIME, end_time=STOP_TIME, @@ -124,8 +124,8 @@ def test_read(Client: IMSClient, read_type: str, size: int) -> None: assert df.index[-1] <= ensure_datetime_with_tz(STOP_TIME) # type: ignore[operator] -def test_read_with_status(Client: IMSClient) -> None: - df = Client.read( +def test_read_with_status(client: IMSClient) -> None: + df = client.read( tags=TAGS["Float32"], start_time=START_TIME, end_time=STOP_TIME, @@ -137,9 +137,9 @@ def test_read_with_status(Client: IMSClient) -> None: assert df[TAGS["Float32"] + "::status"].iloc[0] == 0 -def test_digitalread_yields_integers(Client: IMSClient) -> None: +def test_digitalread_yields_integers(client: IMSClient) -> None: tag = TAGS["Digital"] - df = Client.read( + df = client.read( tags=tag, start_time=START_TIME, end_time=STOP_TIME, @@ -149,26 +149,26 @@ def test_digitalread_yields_integers(Client: IMSClient) -> None: assert all(x.is_integer() for x in df[tag]) -def test_get_unit(Client: IMSClient) -> None: - res = Client.get_units(list(TAGS.values())) +def test_get_unit(client: IMSClient) -> None: + res = client.get_units(list(TAGS.values())) assert res[TAGS["Float32"]] == "DEG. C" assert res[TAGS["Digital"]] == "STATE" assert res[TAGS["Int32"]] == "" -def test_get_description(Client: IMSClient) -> None: - res = Client.get_descriptions(list(TAGS.values())) +def test_get_description(client: IMSClient) -> None: + res = client.get_descriptions(list(TAGS.values())) assert res[TAGS["Float32"]] == "Atmospheric Tower OH Vapor" assert res[TAGS["Digital"]] == "Light Naphtha End Point Control" assert res[TAGS["Int32"]] == "Light Naphtha End Point" -def test_from_DST_folds_time(Client: IMSClient) -> None: +def test_from_dst_folds_time(client: IMSClient) -> None: if os.path.exists(SOURCE + ".h5"): os.remove(SOURCE + ".h5") tag = TAGS["Float32"] interval = ["2017-10-29 00:30:00", "2017-10-29 04:30:00"] - df = Client.read(tags=[tag], start_time=interval[0], end_time=interval[1], ts=600) + df = client.read(tags=[tag], start_time=interval[0], end_time=interval[1], ts=600) assert len(df) == (4 + 1) * 6 + 1 # Time exists inside fold: assert ( @@ -180,20 +180,20 @@ def test_from_DST_folds_time(Client: IMSClient) -> None: ) -def test_to_DST_skips_time(Client: IMSClient) -> None: +def test_to_dst_skips_time(client: IMSClient) -> None: if os.path.exists(SOURCE + ".h5"): os.remove(SOURCE + ".h5") tag = TAGS["Float32"] interval = ["2018-03-25 00:30:00", "2018-03-25 03:30:00"] - df = Client.read(tags=[tag], start_time=interval[0], end_time=interval[1], ts=600) + df = client.read(tags=[tag], start_time=interval[0], end_time=interval[1], ts=600) # Lose one hour: assert ( df.loc["2018-03-25 01:50:00":"2018-03-25 03:10:00"].size == (2 + 1 * 6 + 1) - 6 # type: ignore[misc] ) -def test_tags_with_no_data_included_in_results(Client: IMSClient) -> None: - df = Client.read( +def test_tags_with_no_data_included_in_results(client: IMSClient) -> None: + df = client.read( tags=[TAGS["Float32"]], start_time="2099-01-01 00:00:00", end_time="2099-01-02 00:00:00", @@ -201,14 +201,14 @@ def test_tags_with_no_data_included_in_results(Client: IMSClient) -> None: assert len(df.columns) == 1 -def test_query_sql(Client: IMSClient) -> None: +def test_query_sql(client: IMSClient) -> None: tag = TAGS["Float32"] query = f"SELECT descriptor, engunits FROM pipoint.pipoint2 WHERE tag='{tag}'" - res = Client.query_sql(query=query, parse=True) + res = client.query_sql(query=query, parse=True) assert isinstance(res, pd.DataFrame) assert res.shape[0] >= 1 assert res.shape[1] == 2 - res = Client.query_sql(query=query, parse=False) + res = client.query_sql(query=query, parse=False) assert isinstance(res, pyodbc.Cursor) rows = res.fetchall() assert len(rows) >= 1 diff --git a/tests/test_PIHandlerREST.py b/tests/test_PIHandlerREST.py index 7bcfc0f8..070a36fe 100644 --- a/tests/test_PIHandlerREST.py +++ b/tests/test_PIHandlerREST.py @@ -12,7 +12,7 @@ @pytest.fixture # type: ignore[misc] -def PIHandler() -> Generator[PIHandlerWeb, None, None]: +def pi_handler() -> Generator[PIHandlerWeb, None, None]: h = PIHandlerWeb( datasource="sourcename", auth=None, options={}, url=None, verifySSL=True ) @@ -51,19 +51,19 @@ def test_generate_search_query() -> None: ) == {"q": r"name:BA\:*.1 AND description:Concentration\ Reactor\ 1"} -def test_is_summary(PIHandler: PIHandlerWeb) -> None: - assert PIHandler._is_summary(ReaderType.AVG) - assert PIHandler._is_summary(ReaderType.MIN) - assert PIHandler._is_summary(ReaderType.MAX) - assert PIHandler._is_summary(ReaderType.RNG) - assert PIHandler._is_summary(ReaderType.STD) - assert PIHandler._is_summary(ReaderType.VAR) - assert not PIHandler._is_summary(ReaderType.RAW) - assert not PIHandler._is_summary(ReaderType.SHAPEPRESERVING) - assert not PIHandler._is_summary(ReaderType.INT) - assert not PIHandler._is_summary(ReaderType.GOOD) - assert not PIHandler._is_summary(ReaderType.BAD) - assert not PIHandler._is_summary(ReaderType.SNAPSHOT) +def test_is_summary(pi_handler: PIHandlerWeb) -> None: + assert pi_handler._is_summary(ReaderType.AVG) + assert pi_handler._is_summary(ReaderType.MIN) + assert pi_handler._is_summary(ReaderType.MAX) + assert pi_handler._is_summary(ReaderType.RNG) + assert pi_handler._is_summary(ReaderType.STD) + assert pi_handler._is_summary(ReaderType.VAR) + assert not pi_handler._is_summary(ReaderType.RAW) + assert not pi_handler._is_summary(ReaderType.SHAPEPRESERVING) + assert not pi_handler._is_summary(ReaderType.INT) + assert not pi_handler._is_summary(ReaderType.GOOD) + assert not pi_handler._is_summary(ReaderType.BAD) + assert not pi_handler._is_summary(ReaderType.SNAPSHOT) @pytest.mark.parametrize( # type: ignore[misc] @@ -88,15 +88,15 @@ def test_is_summary(PIHandler: PIHandlerWeb) -> None: "SNAPSHOT", ], ) -def test_generate_read_query(PIHandler: PIHandlerWeb, read_type: str) -> None: - starttime = ensure_datetime_with_tz(START_TIME) +def test_generate_read_query(pi_handler: PIHandlerWeb, read_type: str) -> None: + start = ensure_datetime_with_tz(START_TIME) stoptime = ensure_datetime_with_tz(STOP_TIME) ts = timedelta(seconds=SAMPLE_TIME) - (url, params) = PIHandler.generate_read_query( - tag=PIHandler.tag_to_webid(tag="alreadyknowntag"), # type: ignore[arg-type] - start_time=starttime, - stop_time=stoptime, + (url, params) = pi_handler.generate_read_query( + tag=pi_handler.tag_to_webid(tag="alreadyknowntag"), # type: ignore[arg-type] + start=start, + end=stoptime, sample_time=ts, read_type=getattr(ReaderType, read_type), metadata=None, @@ -107,11 +107,11 @@ def test_generate_read_query(PIHandler: PIHandlerWeb, read_type: str) -> None: assert params["timeZone"] == "UTC" if read_type == "INT": - assert url == f"streams/{PIHandler.webidcache['alreadyknowntag']}/interpolated" + assert url == f"streams/{pi_handler.webidcache['alreadyknowntag']}/interpolated" assert params["selectedFields"] == "Links;Items.Timestamp;Items.Value" assert params["interval"] == f"{SAMPLE_TIME}s" elif read_type in ["AVG", "MIN", "MAX", "RNG", "STD", "VAR"]: - assert url == f"streams/{PIHandler.webidcache['alreadyknowntag']}/summary" + assert url == f"streams/{pi_handler.webidcache['alreadyknowntag']}/summary" assert ( params["selectedFields"] == "Links;Items.Value.Timestamp;Items.Value.Value" ) @@ -125,11 +125,11 @@ def test_generate_read_query(PIHandler: PIHandlerWeb, read_type: str) -> None: }.get(read_type) == params["summaryType"] assert params["summaryDuration"] == f"{SAMPLE_TIME}s" elif read_type == "SNAPSHOT": - assert url == f"streams/{PIHandler.webidcache['alreadyknowntag']}/value" + assert url == f"streams/{pi_handler.webidcache['alreadyknowntag']}/value" assert params["selectedFields"] == "Timestamp;Value" assert len(params) == 3 elif read_type == "RAW": - assert url == f"streams/{PIHandler.webidcache['alreadyknowntag']}/recorded" + assert url == f"streams/{pi_handler.webidcache['alreadyknowntag']}/recorded" assert params["selectedFields"] == "Links;Items.Timestamp;Items.Value" assert params["maxCount"] == 10000 # type: ignore[comparison-overlap] @@ -157,16 +157,16 @@ def test_generate_read_query(PIHandler: PIHandlerWeb, read_type: str) -> None: ], ) def test_generate_read_query_with_status( - PIHandler: PIHandlerWeb, read_type: str + pi_handler: PIHandlerWeb, read_type: str ) -> None: - starttime = ensure_datetime_with_tz(START_TIME) + start = ensure_datetime_with_tz(START_TIME) stoptime = ensure_datetime_with_tz(STOP_TIME) ts = timedelta(seconds=SAMPLE_TIME) - (url, params) = PIHandler.generate_read_query( - tag=PIHandler.tag_to_webid("alreadyknowntag"), # type: ignore[arg-type] - start_time=starttime, - stop_time=stoptime, + (url, params) = pi_handler.generate_read_query( + tag=pi_handler.tag_to_webid("alreadyknowntag"), # type: ignore[arg-type] + start=start, + end=stoptime, sample_time=ts, read_type=getattr(ReaderType, read_type), get_status=True, @@ -178,14 +178,14 @@ def test_generate_read_query_with_status( assert params["timeZone"] == "UTC" if read_type == "INT": - assert url == f"streams/{PIHandler.webidcache['alreadyknowntag']}/interpolated" + assert url == f"streams/{pi_handler.webidcache['alreadyknowntag']}/interpolated" assert params["selectedFields"] == ( "Links;Items.Timestamp;Items.Value;" "Items.Good;Items.Questionable;Items.Substituted" ) assert params["interval"] == f"{SAMPLE_TIME}s" elif read_type in ["AVG", "MIN", "MAX", "RNG", "STD", "VAR"]: - assert url == f"streams/{PIHandler.webidcache['alreadyknowntag']}/summary" + assert url == f"streams/{pi_handler.webidcache['alreadyknowntag']}/summary" assert params["selectedFields"] == ( "Links;Items.Value.Timestamp;Items.Value.Value;" "Items.Value.Good;Items.Value.Questionable;Items.Value.Substituted" @@ -200,13 +200,13 @@ def test_generate_read_query_with_status( }.get(read_type) == params["summaryType"] assert params["summaryDuration"] == f"{SAMPLE_TIME}s" elif read_type == "SNAPSHOT": - assert url == f"streams/{PIHandler.webidcache['alreadyknowntag']}/value" + assert url == f"streams/{pi_handler.webidcache['alreadyknowntag']}/value" assert ( params["selectedFields"] == "Timestamp;Value;Good;Questionable;Substituted" ) assert len(params) == 3 elif read_type == "RAW": - assert url == f"streams/{PIHandler.webidcache['alreadyknowntag']}/recorded" + assert url == f"streams/{pi_handler.webidcache['alreadyknowntag']}/recorded" assert params["selectedFields"] == ( "Links;Items.Timestamp;Items.Value;" "Items.Good;Items.Questionable;Items.Substituted" @@ -214,15 +214,15 @@ def test_generate_read_query_with_status( assert params["maxCount"] == 10000 # type: ignore[comparison-overlap] -def test_genreadquery_long_sampletime(PIHandler: PIHandlerWeb) -> None: - starttime = ensure_datetime_with_tz(START_TIME) +def test_genreadquery_long_sampletime(pi_handler: PIHandlerWeb) -> None: + start = ensure_datetime_with_tz(START_TIME) stoptime = ensure_datetime_with_tz(STOP_TIME) ts = timedelta(seconds=86410) - (url, params) = PIHandler.generate_read_query( - tag=PIHandler.tag_to_webid("alreadyknowntag"), # type: ignore[arg-type] - start_time=starttime, - stop_time=stoptime, + (url, params) = pi_handler.generate_read_query( + tag=pi_handler.tag_to_webid("alreadyknowntag"), # type: ignore[arg-type] + start=start, + end=stoptime, sample_time=ts, read_type=ReaderType.INT, metadata=None, diff --git a/tests/test_PIHandlerREST_connect.py b/tests/test_PIHandlerREST_connect.py index 63349546..2423baa8 100644 --- a/tests/test_PIHandlerREST_connect.py +++ b/tests/test_PIHandlerREST_connect.py @@ -6,7 +6,7 @@ from tagreader.clients import IMSClient, list_sources from tagreader.utils import ReaderType, ensure_datetime_with_tz -from tagreader.web_handlers import PIHandlerWeb, get_verifySSL, list_piwebapi_sources +from tagreader.web_handlers import PIHandlerWeb, get_verify_ssl, list_piwebapi_sources is_GITHUBACTION = "GITHUB_ACTION" in os.environ is_AZUREPIPELINE = "TF_BUILD" in os.environ @@ -16,7 +16,7 @@ "All tests in module require connection to PI server", allow_module_level=True ) -verifySSL = False if is_AZUREPIPELINE else get_verifySSL() +verifySSL = False if is_AZUREPIPELINE else get_verify_ssl() SOURCE = "PIMAM" TAGS = { @@ -42,7 +42,7 @@ def client() -> Generator[IMSClient, None, None]: @pytest.fixture # type: ignore[misc] -def PIHandler() -> Generator[PIHandlerWeb, None, None]: +def pi_handler() -> Generator[PIHandlerWeb, None, None]: h = PIHandlerWeb( datasource=SOURCE, verifySSL=bool(verifySSL), auth=None, options={}, url=None ) @@ -68,9 +68,9 @@ def test_list_sources_piwebapi() -> None: assert 3 <= len(r) -def test_verify_connection(PIHandler: IMSClient) -> None: - assert PIHandler.verify_connection("PIMAM") is True # type: ignore[attr-defined] - assert PIHandler.verify_connection("somerandomstuffhere") is False # type: ignore[attr-defined] +def test_verify_connection(pi_handler: IMSClient) -> None: + assert pi_handler.verify_connection("PIMAM") is True # type: ignore[attr-defined] + assert pi_handler.verify_connection("somerandomstuffhere") is False # type: ignore[attr-defined] def test_search_tag(client: IMSClient) -> None: @@ -87,13 +87,14 @@ def test_search_tag(client: IMSClient) -> None: assert 1 <= len(res) -def test_tag_to_webid(PIHandler: PIHandlerWeb) -> None: - res = PIHandler.tag_to_webid("SINUSOID") +def test_tag_to_web_id(pi_handler: PIHandlerWeb) -> None: + res = pi_handler.tag_to_webid("SINUSOID") assert isinstance(res, str) assert len(res) >= 20 with pytest.raises(AssertionError): - res = PIHandler.tag_to_webid("SINUSOID*") - res = PIHandler.tag_to_webid("somerandomgarbage") + _ = pi_handler.tag_to_webid("SINUSOID*") + res = pi_handler.tag_to_webid("somerandomgarbage") + assert not res @pytest.mark.parametrize( # type: ignore[misc] @@ -224,7 +225,7 @@ def test_get_description(client: IMSClient) -> None: assert res[TAGS["Int32"]] == "Light Naphtha End Point" -def test_from_DST_folds_time(client: IMSClient) -> None: +def test_from_dst_folds_time(client: IMSClient) -> None: if os.path.exists(SOURCE + ".h5"): os.remove(SOURCE + ".h5") tag = TAGS["Float32"] @@ -241,7 +242,7 @@ def test_from_DST_folds_time(client: IMSClient) -> None: ) -def test_to_DST_skips_time(client: IMSClient) -> None: +def test_to_dst_skips_time(client: IMSClient) -> None: if os.path.exists(SOURCE + ".h5"): os.remove(SOURCE + ".h5") tag = TAGS["Float32"] diff --git a/tests/test_bucketcache.py b/tests/test_bucketcache.py index fb474813..13370555 100644 --- a/tests/test_bucketcache.py +++ b/tests/test_bucketcache.py @@ -8,40 +8,44 @@ from tagreader.utils import ReaderType TAGNAME = "tag1" -READERTYPE = ReaderType.INT +READE_TYPE = ReaderType.INT TZ = "UTC" TS = pd.Timedelta(seconds=300) FREQ = f"{int(TS.total_seconds())}s" -STARTTIME_1 = pd.to_datetime("2020-01-01 12:00:00", utc=True) -ENDTIME_1 = pd.to_datetime("2020-01-01 13:00:00", utc=True) -idx = pd.date_range(start=STARTTIME_1, end=ENDTIME_1, freq=FREQ, name="time") -DF1 = pd.DataFrame({TAGNAME: range(0, len(idx))}, index=idx) +START_TIME_1 = pd.to_datetime("2020-01-01 12:00:00", utc=True) +END_TIME_1 = pd.to_datetime("2020-01-01 13:00:00", utc=True) +index = pd.date_range(start=START_TIME_1, end=END_TIME_1, freq=FREQ, name="time") +DF1 = pd.DataFrame({TAGNAME: range(0, len(index))}, index=index) -STARTTIME_1_EPOCH = ( - STARTTIME_1 - pd.to_datetime("1970-01-01", utc=True) +START_TIME_1_EPOCH = ( + START_TIME_1 - pd.to_datetime("1970-01-01", utc=True) ) // pd.Timedelta( "1s" ) # 1577880000 -ENDTIME_1_EPOCH = (ENDTIME_1 - pd.to_datetime("1970-01-01", utc=True)) // pd.Timedelta( +END_TIME_1_EPOCH = ( + END_TIME_1 - pd.to_datetime("1970-01-01", utc=True) +) // pd.Timedelta( "1s" ) # 1577883600 -STARTTIME_2 = pd.to_datetime("2020-01-01 13:30:00", utc=True) -ENDTIME_2 = pd.to_datetime("2020-01-01 14:00:00", utc=True) -idx = pd.date_range(start=STARTTIME_2, end=ENDTIME_2, freq=FREQ, name="time") -DF2 = pd.DataFrame({TAGNAME: range(0, len(idx))}, index=idx) +START_TIME_2 = pd.to_datetime("2020-01-01 13:30:00", utc=True) +END_TIME_2 = pd.to_datetime("2020-01-01 14:00:00", utc=True) +index = pd.date_range(start=START_TIME_2, end=END_TIME_2, freq=FREQ, name="time") +DF2 = pd.DataFrame({TAGNAME: range(0, len(index))}, index=index) -ENDTIME_2_EPOCH = (ENDTIME_2 - pd.to_datetime("1970-01-01", utc=True)) // pd.Timedelta( +END_TIME_2_EPOCH = ( + END_TIME_2 - pd.to_datetime("1970-01-01", utc=True) +) // pd.Timedelta( "1s" ) # 1577887200 -STARTTIME_3 = pd.to_datetime("2020-01-01 12:40:00", utc=True) -ENDTIME_3 = pd.to_datetime("2020-01-01 13:40:00", utc=True) -idx = pd.date_range(start=STARTTIME_3, end=ENDTIME_3, freq=FREQ, name="time") -DF3 = pd.DataFrame({TAGNAME: range(0, len(idx))}, index=idx) +START_TIME_3 = pd.to_datetime("2020-01-01 12:40:00", utc=True) +END_TIME_3 = pd.to_datetime("2020-01-01 13:40:00", utc=True) +index = pd.date_range(start=START_TIME_3, end=END_TIME_3, freq=FREQ, name="time") +DF3 = pd.DataFrame({TAGNAME: range(0, len(index))}, index=index) @pytest.fixture(autouse=True) # type: ignore[misc] @@ -66,28 +70,28 @@ def test_safe_tagname() -> None: def test_get_intervals_from_dataset_name(cache: BucketCache) -> None: - badtag = f"/tag1/INT/{STARTTIME_1_EPOCH}_{ENDTIME_1_EPOCH}" - goodtag = f"/tag1/INT/_{STARTTIME_1_EPOCH}_{ENDTIME_1_EPOCH}" - starttime, endtime = cache._get_intervals_from_dataset_name(badtag) - assert starttime is None - assert endtime is None # type: ignore[unreachable] - starttime, endtime = cache._get_intervals_from_dataset_name(goodtag) - assert starttime == STARTTIME_1 - assert endtime == ENDTIME_1 + bad_tag = f"/tag1/INT/{START_TIME_1_EPOCH}_{END_TIME_1_EPOCH}" + good_tag = f"/tag1/INT/_{START_TIME_1_EPOCH}_{END_TIME_1_EPOCH}" + start, end = cache._get_intervals_from_dataset_name(bad_tag) + assert start is None + assert end is None # type: ignore[unreachable] + start, end = cache._get_intervals_from_dataset_name(good_tag) + assert start == START_TIME_1 + assert end == END_TIME_1 def test_key_path_with_time(cache: BucketCache) -> None: assert ( cache._key_path( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=60, stepped=False, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_1, + get_status=False, + start=START_TIME_1, + end=END_TIME_1, ) - == f"$tag1$INT$s60$_{STARTTIME_1_EPOCH}_{ENDTIME_1_EPOCH}" + == f"$tag1$INT$s60$_{START_TIME_1_EPOCH}_{END_TIME_1_EPOCH}" ) @@ -95,14 +99,14 @@ def test_key_path_stepped(cache: BucketCache) -> None: assert ( cache._key_path( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=60, stepped=True, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_1, + get_status=False, + start=START_TIME_1, + end=END_TIME_1, ) - == f"$tag1$INT$s60$stepped$_{STARTTIME_1_EPOCH}_{ENDTIME_1_EPOCH}" + == f"$tag1$INT$s60$stepped$_{START_TIME_1_EPOCH}_{END_TIME_1_EPOCH}" ) @@ -110,23 +114,23 @@ def test_key_path_with_status(cache: BucketCache) -> None: assert ( cache._key_path( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=60, stepped=False, - status=True, + get_status=True, ) - == "$tag1$INT$s60$status" + == "$tag1$INT$s60$get_status" ) -def test_key_path_RAW(cache: BucketCache) -> None: +def test_key_path_raw(cache: BucketCache) -> None: assert ( cache._key_path( tagname=TAGNAME, - readtype=ReaderType.RAW, + read_type=ReaderType.RAW, ts=60, stepped=False, - status=False, + get_status=False, ) == "$tag1$RAW" ) @@ -136,34 +140,34 @@ def test_get_missing_intervals(cache: BucketCache) -> None: cache.store( df=DF1, tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_1, + get_status=False, + start=START_TIME_1, + end=END_TIME_1, ) cache.store( df=DF2, tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_2, - end_time=ENDTIME_2, + get_status=False, + start=START_TIME_2, + end=END_TIME_2, ) # Perfect coverage, no missing intervals missing_intervals = cache.get_missing_intervals( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_1, + get_status=False, + start=START_TIME_1, + end=END_TIME_1, ) assert len(missing_intervals) == 0 @@ -171,12 +175,12 @@ def test_get_missing_intervals(cache: BucketCache) -> None: # Request subsection, no missing intervals missing_intervals = cache.get_missing_intervals( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1 + pd.Timedelta("5m"), - end_time=ENDTIME_1 - pd.Timedelta("5m"), + get_status=False, + start=START_TIME_1 + pd.Timedelta("5m"), + end=END_TIME_1 - pd.Timedelta("5m"), ) assert len(missing_intervals) == 0 @@ -184,78 +188,68 @@ def test_get_missing_intervals(cache: BucketCache) -> None: # Request data from before to after, two missing intervals missing_intervals = cache.get_missing_intervals( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1 - pd.Timedelta("15m"), - end_time=ENDTIME_1 + pd.Timedelta("15m"), + get_status=False, + start=START_TIME_1 - pd.Timedelta("15m"), + end=END_TIME_1 + pd.Timedelta("15m"), ) assert len(missing_intervals) == 2 - assert missing_intervals[0] == (STARTTIME_1 - pd.Timedelta("15m"), STARTTIME_1) - assert missing_intervals[1] == (ENDTIME_1, ENDTIME_1 + pd.Timedelta("15m")) + assert missing_intervals[0] == (START_TIME_1 - pd.Timedelta("15m"), START_TIME_1) + assert missing_intervals[1] == (END_TIME_1, END_TIME_1 + pd.Timedelta("15m")) # Request data stretching from before first bucket, including # space between buckets, to after second bucket. Three missing intervals. missing_intervals = cache.get_missing_intervals( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1 - pd.Timedelta("15m"), - end_time=ENDTIME_2 + pd.Timedelta("15m"), + get_status=False, + start=START_TIME_1 - pd.Timedelta("15m"), + end=END_TIME_2 + pd.Timedelta("15m"), ) assert len(missing_intervals) == 3 - assert missing_intervals[0] == (STARTTIME_1 - pd.Timedelta("15m"), STARTTIME_1) - assert missing_intervals[1] == (ENDTIME_1, STARTTIME_2) - assert missing_intervals[2] == (ENDTIME_2, ENDTIME_2 + pd.Timedelta("15m")) + assert missing_intervals[0] == (START_TIME_1 - pd.Timedelta("15m"), START_TIME_1) + assert missing_intervals[1] == (END_TIME_1, START_TIME_2) + assert missing_intervals[2] == (END_TIME_2, END_TIME_2 + pd.Timedelta("15m")) def test_get_intersecting_datasets(cache: BucketCache) -> None: cache.store( df=DF1, tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_1, + get_status=False, + start=START_TIME_1, + end=END_TIME_1, ) cache.store( df=DF2, tagname=TAGNAME, - readtype=READERTYPE, - ts=TS, - stepped=False, - status=False, - start_time=STARTTIME_2, - end_time=ENDTIME_2, - ) - - intersecting_datasets = cache.get_intersecting_datasets( - tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_1, + get_status=False, + start=START_TIME_2, + end=END_TIME_2, ) # Perfect coverage intersecting_datasets = cache.get_intersecting_datasets( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_1, + get_status=False, + start=START_TIME_1, + end=END_TIME_1, ) assert len(intersecting_datasets) == 1 @@ -263,12 +257,12 @@ def test_get_intersecting_datasets(cache: BucketCache) -> None: # Request subsection intersecting_datasets = cache.get_intersecting_datasets( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1 + pd.Timedelta("5m"), - end_time=ENDTIME_1 - pd.Timedelta("5m"), + get_status=False, + start=START_TIME_1 + pd.Timedelta("5m"), + end=END_TIME_1 - pd.Timedelta("5m"), ) assert len(intersecting_datasets) == 1 @@ -276,12 +270,12 @@ def test_get_intersecting_datasets(cache: BucketCache) -> None: # Request data from before to after intersecting_datasets = cache.get_intersecting_datasets( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1 - pd.Timedelta("15m"), - end_time=ENDTIME_1 + pd.Timedelta("15m"), + get_status=False, + start=START_TIME_1 - pd.Timedelta("15m"), + end=END_TIME_1 + pd.Timedelta("15m"), ) assert len(intersecting_datasets) == 1 @@ -290,12 +284,12 @@ def test_get_intersecting_datasets(cache: BucketCache) -> None: # space between buckets, to after second bucket. intersecting_datasets = cache.get_intersecting_datasets( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1 - pd.Timedelta("15m"), - end_time=ENDTIME_2 + pd.Timedelta("15m"), + get_status=False, + start=START_TIME_1 - pd.Timedelta("15m"), + end=END_TIME_2 + pd.Timedelta("15m"), ) assert len(intersecting_datasets) == 2 @@ -304,12 +298,12 @@ def test_get_intersecting_datasets(cache: BucketCache) -> None: # inside second bucket. intersecting_datasets = cache.get_intersecting_datasets( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1 - pd.Timedelta("15m"), - end_time=ENDTIME_2 - pd.Timedelta("15m"), + get_status=False, + start=START_TIME_1 - pd.Timedelta("15m"), + end=END_TIME_2 - pd.Timedelta("15m"), ) assert len(intersecting_datasets) == 2 @@ -318,12 +312,12 @@ def test_get_intersecting_datasets(cache: BucketCache) -> None: # inside second bucket. intersecting_datasets = cache.get_intersecting_datasets( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1 + pd.Timedelta("15m"), - end_time=ENDTIME_2 - pd.Timedelta("15m"), + get_status=False, + start=START_TIME_1 + pd.Timedelta("15m"), + end=END_TIME_2 - pd.Timedelta("15m"), ) assert len(intersecting_datasets) == 2 @@ -348,63 +342,63 @@ def test_store_empty_df(cache: BucketCache) -> None: cache.store( df=df, tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_1, + get_status=False, + start=START_TIME_1, + end=END_TIME_1, ) # Specify ts to ensure correct key /if/ stored df_read = cache.fetch( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_1, + get_status=False, + start=START_TIME_1, + end=END_TIME_1, ) pd.testing.assert_frame_equal(df_read, pd.DataFrame()) cache.store( df=DF1, tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_1, + get_status=False, + start=START_TIME_1, + end=END_TIME_1, ) df_read = cache.fetch( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_1, + get_status=False, + start=START_TIME_1, + end=END_TIME_1, ) pd.testing.assert_frame_equal(DF1, df_read, check_freq=False) cache.store( df=df, tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_1, + get_status=False, + start=START_TIME_1, + end=END_TIME_1, ) # Specify ts to ensure correct key /if/ stored df_read = cache.fetch( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_1, + get_status=False, + start=START_TIME_1, + end=END_TIME_1, ) pd.testing.assert_frame_equal(DF1, df_read, check_freq=False) @@ -413,21 +407,21 @@ def test_store_single_df(cache: BucketCache) -> None: cache.store( df=DF1, tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_1, + get_status=False, + start=START_TIME_1, + end=END_TIME_1, ) df_read = cache.fetch( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_1, + get_status=False, + start=START_TIME_1, + end=END_TIME_1, ) pd.testing.assert_frame_equal(DF1, df_read, check_freq=False) @@ -436,58 +430,58 @@ def test_fetch(cache: BucketCache) -> None: cache.store( df=DF1, tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_1, + get_status=False, + start=START_TIME_1, + end=END_TIME_1, ) cache.store( df=DF2, tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_2, - end_time=ENDTIME_2, + get_status=False, + start=START_TIME_2, + end=END_TIME_2, ) df_read = cache.fetch( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_1 - pd.Timedelta("15m"), + get_status=False, + start=START_TIME_1, + end=END_TIME_1 - pd.Timedelta("15m"), ) pd.testing.assert_frame_equal( - DF1.loc[STARTTIME_1 : ENDTIME_1 - pd.Timedelta("15m")], # type: ignore[misc] + DF1.loc[START_TIME_1 : END_TIME_1 - pd.Timedelta("15m")], # type: ignore[misc] df_read, check_freq=False, ) df_read = cache.fetch( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1 - pd.Timedelta("15m"), - end_time=ENDTIME_1 + pd.Timedelta("15m"), + get_status=False, + start=START_TIME_1 - pd.Timedelta("15m"), + end=END_TIME_1 + pd.Timedelta("15m"), ) pd.testing.assert_frame_equal(DF1, df_read, check_freq=False) df_read = cache.fetch( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1 - pd.Timedelta("15m"), - end_time=ENDTIME_2 + pd.Timedelta("15m"), + get_status=False, + start=START_TIME_1 - pd.Timedelta("15m"), + end=END_TIME_2 + pd.Timedelta("15m"), ) pd.testing.assert_frame_equal(pd.concat([DF1, DF2]), df_read, check_freq=False) @@ -496,54 +490,54 @@ def test_store_overlapping_df(cache: BucketCache) -> None: cache.store( df=DF1, tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_1, + get_status=False, + start=START_TIME_1, + end=END_TIME_1, ) cache.store( df=DF2, tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_2, - end_time=ENDTIME_2, + get_status=False, + start=START_TIME_2, + end=END_TIME_2, ) cache.store( df=DF3, tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_3, - end_time=ENDTIME_3, + get_status=False, + start=START_TIME_3, + end=END_TIME_3, ) leaves = None for key in cache.iterkeys(): if len(key) > 0: leaves = key - _, starttime, endtime = leaves.split("_") # type: ignore[union-attr] - assert int(starttime) == STARTTIME_1_EPOCH - assert int(endtime) == ENDTIME_2_EPOCH + _, start, end = leaves.split("_") # type: ignore[union-attr] + assert int(start) == START_TIME_1_EPOCH + assert int(end) == END_TIME_2_EPOCH df_read = cache.fetch( tagname=TAGNAME, - readtype=READERTYPE, + read_type=READE_TYPE, ts=TS, stepped=False, - status=False, - start_time=STARTTIME_1, - end_time=ENDTIME_2, + get_status=False, + start=START_TIME_1, + end=END_TIME_2, ) df_expected = pd.concat( [ - DF1[STARTTIME_1 : STARTTIME_3 - pd.Timedelta(TS, unit="s")], # type: ignore[misc] - DF3[STARTTIME_3:ENDTIME_3], # type: ignore[misc] - DF2[ENDTIME_3 + pd.Timedelta(TS, unit="s") : ENDTIME_2], # type: ignore[misc] + DF1[START_TIME_1 : START_TIME_3 - pd.Timedelta(TS, unit="s")], # type: ignore[misc] + DF3[START_TIME_3:END_TIME_3], # type: ignore[misc] + DF2[END_TIME_3 + pd.Timedelta(TS, unit="s") : END_TIME_2], # type: ignore[misc] ] ) diff --git a/tests/test_cache.py b/tests/test_cache.py index 07744635..a12d409f 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -53,8 +53,8 @@ def test_key_path(cache: SmartCache) -> None: def test_cache_single_store_and_fetch(cache: SmartCache, data: pd.DataFrame) -> None: - cache.store(df=data, readtype=ReaderType.INT) - df_read = cache.fetch(tagname="tag1", readtype=ReaderType.INT, ts=60) + cache.store(df=data, read_type=ReaderType.INT) + df_read = cache.fetch(tagname="tag1", read_type=ReaderType.INT, ts=60) pd.testing.assert_frame_equal(data, df_read) @@ -63,53 +63,47 @@ def test_cache_multiple_store_single_fetch( ) -> None: df1 = data[0:3] df2 = data[2:10] - cache.store(df=df1, readtype=ReaderType.INT) - cache.store(df=df2, readtype=ReaderType.INT) - df_read = cache.fetch(tagname="tag1", readtype=ReaderType.INT, ts=60) + cache.store(df=df1, read_type=ReaderType.INT) + cache.store(df=df2, read_type=ReaderType.INT) + df_read = cache.fetch(tagname="tag1", read_type=ReaderType.INT, ts=60) pd.testing.assert_frame_equal(df_read, data) def test_interval_reads(cache: SmartCache, data: pd.DataFrame) -> None: - cache.store(df=data, readtype=ReaderType.INT) - start_time_oob = pd.to_datetime("2018-01-18 04:55:00") - start_time = pd.to_datetime("2018-01-18 05:05:00") - stop_time = pd.to_datetime("2018-01-18 05:08:00") - stop_time_oob = pd.to_datetime("2018-01-18 06:00:00") - - df_read = cache.fetch( - tagname="tag1", readtype=ReaderType.INT, ts=60, start_time=start_time - ) - pd.testing.assert_frame_equal(data[start_time:], df_read) # type: ignore[misc] + cache.store(df=data, read_type=ReaderType.INT) + start_oob = pd.to_datetime("2018-01-18 04:55:00") + start = pd.to_datetime("2018-01-18 05:05:00") + end = pd.to_datetime("2018-01-18 05:08:00") + end_oob = pd.to_datetime("2018-01-18 06:00:00") + + df_read = cache.fetch(tagname="tag1", read_type=ReaderType.INT, ts=60, start=start) + pd.testing.assert_frame_equal(data[start:], df_read) # type: ignore[misc] + df_read = cache.fetch(tagname="tag1", read_type=ReaderType.INT, ts=60, end=end) + pd.testing.assert_frame_equal(data[:end], df_read) # type: ignore[misc] df_read = cache.fetch( - tagname="tag1", readtype=ReaderType.INT, ts=60, stop_time=stop_time - ) - pd.testing.assert_frame_equal(data[:stop_time], df_read) # type: ignore[misc] - df_read = cache.fetch( - tagname="tag1", readtype=ReaderType.INT, ts=60, start_time=start_time_oob + tagname="tag1", read_type=ReaderType.INT, ts=60, start=start_oob ) pd.testing.assert_frame_equal(data, df_read) - df_read = cache.fetch( - tagname="tag1", readtype=ReaderType.INT, ts=60, stop_time=stop_time_oob - ) + df_read = cache.fetch(tagname="tag1", read_type=ReaderType.INT, ts=60, end=end_oob) pd.testing.assert_frame_equal(data, df_read) df_read = cache.fetch( tagname="tag1", - readtype=ReaderType.INT, + read_type=ReaderType.INT, ts=60, - start_time=start_time, - stop_time=stop_time, + start=start, + end=end, ) - pd.testing.assert_frame_equal(data[start_time:stop_time], df_read) # type: ignore[misc] + pd.testing.assert_frame_equal(data[start:end], df_read) # type: ignore[misc] def test_store_empty_df(cache: SmartCache, data: pd.DataFrame) -> None: # Empty dataframes should not be stored (note: df full of NaN is not empty!) - cache.store(df=data, readtype=ReaderType.INT) + cache.store(df=data, read_type=ReaderType.INT) df = pd.DataFrame({"tag1": []}) cache.store( - df=df, readtype=ReaderType.INT, ts=60 + df=df, read_type=ReaderType.INT, ts=60 ) # Specify ts to ensure correct key /if/ stored - df_read = cache.fetch(tagname="tag1", readtype=ReaderType.INT, ts=60) + df_read = cache.fetch(tagname="tag1", read_type=ReaderType.INT, ts=60) pd.testing.assert_frame_equal(data, df_read) @@ -126,7 +120,7 @@ def test_store_metadata(cache: SmartCache) -> None: assert "noworky" not in r -def test_to_DST_skips_time(cache: SmartCache) -> None: +def test_to_dst_skips_time(cache: SmartCache) -> None: index = pd.date_range( start="2018-03-25 01:50:00", end="2018-03-25 03:30:00", @@ -139,12 +133,12 @@ def test_to_DST_skips_time(cache: SmartCache) -> None: assert ( df.loc["2018-03-25 01:50:00":"2018-03-25 03:10:00"].size == (2 + 1 * 6 + 1) - 6 # type: ignore[misc] ) - cache.store(df=df, readtype=ReaderType.INT) - df_read = cache.fetch(tagname="tag1", readtype=ReaderType.INT, ts=600) + cache.store(df=df, read_type=ReaderType.INT) + df_read = cache.fetch(tagname="tag1", read_type=ReaderType.INT, ts=600) pd.testing.assert_frame_equal(df_read, df) -def test_from_DST_folds_time(cache: SmartCache) -> None: +def test_from_dst_folds_time(cache: SmartCache) -> None: index = pd.date_range( start="2017-10-29 00:30:00", end="2017-10-29 04:30:00", @@ -164,6 +158,6 @@ def test_from_DST_folds_time(cache: SmartCache) -> None: assert ( df.loc["2017-10-29 01:50:00":"2017-10-29 03:10:00"].size == 2 + (1 + 1) * 6 + 1 # type: ignore[misc] ) - cache.store(df=df, readtype=ReaderType.INT) - df_read = cache.fetch(tagname="tag1", readtype=ReaderType.INT, ts=600) + cache.store(df=df, read_type=ReaderType.INT) + df_read = cache.fetch(tagname="tag1", read_type=ReaderType.INT, ts=600) pd.testing.assert_frame_equal(df_read, df) diff --git a/tests/test_clients.py b/tests/test_clients.py index e37476a9..46d5c69b 100644 --- a/tests/test_clients.py +++ b/tests/test_clients.py @@ -16,18 +16,14 @@ def test_get_next_timeslice() -> None: - starttime = pd.to_datetime("2018-01-02 14:00:00") - endtime = pd.to_datetime("2018-01-02 14:15:00") + start = pd.to_datetime("2018-01-02 14:00:00") + end = pd.to_datetime("2018-01-02 14:15:00") # taglist = ['tag1', 'tag2', 'tag3'] ts = timedelta(seconds=60) - res = get_next_timeslice( - start_time=starttime, stop_time=endtime, ts=ts, max_steps=20 - ) - assert starttime, starttime + timedelta(seconds=6) == res - res = get_next_timeslice( - start_time=starttime, stop_time=endtime, ts=ts, max_steps=100000 - ) - assert starttime, endtime == res + res = get_next_timeslice(start=start, end=end, ts=ts, max_steps=20) + assert start, start + timedelta(seconds=6) == res + res = get_next_timeslice(start=start, end=end, ts=ts, max_steps=100000) + assert start, end == res def test_get_missing_intervals() -> None: @@ -41,8 +37,8 @@ def test_get_missing_intervals() -> None: df = pd.concat([df_total.iloc[0:2], df_total.iloc[3:4], df_total.iloc[8:]]) missing = get_missing_intervals( df=df, - start_time=datetime(2018, 1, 18, 5, 0, 0), - stop_time=datetime(2018, 1, 18, 6, 0, 0), + start=datetime(2018, 1, 18, 5, 0, 0), + end=datetime(2018, 1, 18, 6, 0, 0), ts=timedelta(seconds=ts), read_type=ReaderType.INT, ) @@ -59,7 +55,7 @@ def test_get_missing_intervals() -> None: reason="ODBC drivers require Windows and are unavailable in GitHub Actions", ) class TestODBC: - def test_PI_init_odbc_client_with_host_port(self) -> None: + def test_pi_init_odbc_client_with_host_port(self) -> None: host = "thehostname" port = 999 c = IMSClient(datasource="whatever", imstype="pi", host=host) @@ -69,7 +65,7 @@ def test_PI_init_odbc_client_with_host_port(self) -> None: assert c.handler.host == host assert c.handler.port == port - def test_IP21_init_odbc_client_with_host_port(self) -> None: + def test_ip21_init_odbc_client_with_host_port(self) -> None: host = "thehostname" port = 999 c = IMSClient(datasource="whatever", imstype="ip21", host=host) @@ -79,7 +75,7 @@ def test_IP21_init_odbc_client_with_host_port(self) -> None: assert c.handler.host == host assert c.handler.port == port - def test_PI_connection_string_override(self) -> None: + def test_pi_connection_string_override(self) -> None: connstr = "someuserspecifiedconnectionstring" c = IMSClient( datasource="whatever", @@ -89,7 +85,7 @@ def test_PI_connection_string_override(self) -> None: ) assert c.handler.generate_connection_string() == connstr - def test_IP21_connection_string_override(self) -> None: + def test_ip21_connection_string_override(self) -> None: connstr = "someuserspecifiedconnectionstring" c = IMSClient( datasource="whatever", @@ -101,15 +97,15 @@ def test_IP21_connection_string_override(self) -> None: def test_init_odbc_clients(self) -> None: with pytest.raises(ValueError): - c = IMSClient(datasource="xyz") + _ = IMSClient(datasource="xyz") with pytest.raises(ValueError): - c = IMSClient(datasource="sNa", imstype="pi") + _ = IMSClient(datasource="sNa", imstype="pi") with pytest.raises(ValueError): - c = IMSClient(datasource="Ono-imS", imstype="aspen") + _ = IMSClient(datasource="Ono-imS", imstype="aspen") with pytest.raises(ValueError): - c = IMSClient(datasource="ono-ims", imstype="aspen") + _ = IMSClient(datasource="ono-ims", imstype="aspen") with pytest.raises(ValueError): - c = IMSClient(datasource="sna", imstype="pi") + _ = IMSClient(datasource="sna", imstype="pi") c = IMSClient(datasource="onO-iMs", imstype="pi") assert isinstance(c.handler, PIHandlerODBC) c = IMSClient(datasource="snA", imstype="aspen") diff --git a/tests/test_data_integrity.py b/tests/test_data_integrity.py index 6e426d65..5ff854f1 100644 --- a/tests/test_data_integrity.py +++ b/tests/test_data_integrity.py @@ -39,7 +39,7 @@ @pytest.fixture # type: ignore[misc] -def PIClientOdbc() -> Generator[IMSClient, None, None]: +def pi_client_odbc() -> Generator[IMSClient, None, None]: c = IMSClient(datasource=PI_DS, imstype="pi") if os.path.exists(PI_DS + ".h5"): os.remove(PI_DS + ".h5") @@ -51,7 +51,7 @@ def PIClientOdbc() -> Generator[IMSClient, None, None]: @pytest.fixture # type: ignore[misc] -def PIClientWeb() -> Generator[IMSClient, None, None]: +def pi_client_web() -> Generator[IMSClient, None, None]: c = IMSClient(datasource=PI_DS, imstype="piwebapi", verifySSL=verifySSL) if os.path.exists(PI_DS + ".h5"): os.remove(PI_DS + ".h5") @@ -63,7 +63,7 @@ def PIClientWeb() -> Generator[IMSClient, None, None]: @pytest.fixture # type: ignore[misc] -def AspenClientOdbc() -> Generator[IMSClient, None, None]: +def aspen_client_odbc() -> Generator[IMSClient, None, None]: c = IMSClient(datasource=ASPEN_DS, imstype="ip21") if os.path.exists(ASPEN_DS + ".h5"): os.remove(ASPEN_DS + ".h5") @@ -75,7 +75,7 @@ def AspenClientOdbc() -> Generator[IMSClient, None, None]: @pytest.fixture # type: ignore[misc] -def AspenClientWeb() -> Generator[IMSClient, None, None]: +def aspen_client_web() -> Generator[IMSClient, None, None]: c = IMSClient(datasource=ASPEN_DS, imstype="aspenone", verifySSL=bool(verifySSL)) if os.path.exists(ASPEN_DS + ".h5"): os.remove(ASPEN_DS + ".h5") @@ -87,16 +87,16 @@ def AspenClientWeb() -> Generator[IMSClient, None, None]: def test_pi_odbc_web_same_values_int( - PIClientOdbc: IMSClient, PIClientWeb: IMSClient + pi_client_odbc: IMSClient, pi_client_web: IMSClient ) -> None: - df_odbc = PIClientOdbc.read( + df_odbc = pi_client_odbc.read( tags=PI_TAG, start_time=PI_START_TIME, end_time=PI_END_TIME, ts=TS, read_type=ReaderType.INT, ) - df_web = PIClientWeb.read( + df_web = pi_client_web.read( tags=PI_TAG, start_time=PI_START_TIME, end_time=PI_END_TIME, @@ -110,16 +110,16 @@ def test_pi_odbc_web_same_values_int( def test_pi_odbc_web_same_values_aggregated( - PIClientOdbc: IMSClient, PIClientWeb: IMSClient + pi_client_odbc: IMSClient, pi_client_web: IMSClient ) -> None: - df_odbc = PIClientOdbc.read( + df_odbc = pi_client_odbc.read( tags=PI_TAG, start_time=PI_START_TIME, end_time=PI_END_TIME, ts=TS, read_type=ReaderType.AVG, ) - df_web = PIClientWeb.read( + df_web = pi_client_web.read( tags=PI_TAG, start_time=PI_START_TIME, end_time=PI_END_TIME, @@ -133,15 +133,15 @@ def test_pi_odbc_web_same_values_aggregated( def test_aspen_odbc_web_same_values_raw( - AspenClientOdbc: IMSClient, AspenClientWeb: IMSClient + aspen_client_odbc: IMSClient, aspen_client_web: IMSClient ) -> None: - df_odbc = AspenClientOdbc.read( + df_odbc = aspen_client_odbc.read( tags=ASPEN_TAG, start_time=ASPEN_START_TIME, end_time=ASPEN_END_TIME, read_type=ReaderType.RAW, ) - df_web = AspenClientWeb.read( + df_web = aspen_client_web.read( tags=ASPEN_TAG, start_time=ASPEN_START_TIME, end_time=ASPEN_END_TIME, @@ -152,16 +152,16 @@ def test_aspen_odbc_web_same_values_raw( def test_aspen_odbc_web_same_values_int( - AspenClientOdbc: IMSClient, AspenClientWeb: IMSClient + aspen_client_odbc: IMSClient, aspen_client_web: IMSClient ) -> None: - df_odbc = AspenClientOdbc.read( + df_odbc = aspen_client_odbc.read( tags=ASPEN_TAG, start_time=ASPEN_START_TIME, end_time=ASPEN_END_TIME, ts=TS, read_type=ReaderType.INT, ) - df_web = AspenClientWeb.read( + df_web = aspen_client_web.read( tags=ASPEN_TAG, start_time=ASPEN_START_TIME, end_time=ASPEN_END_TIME, @@ -173,16 +173,16 @@ def test_aspen_odbc_web_same_values_int( def test_aspen_odbc_web_same_values_aggregated( - AspenClientOdbc: IMSClient, AspenClientWeb: IMSClient + aspen_client_odbc: IMSClient, aspen_client_web: IMSClient ) -> None: - df_odbc = AspenClientOdbc.read( + df_odbc = aspen_client_odbc.read( tags=ASPEN_TAG, start_time=ASPEN_START_TIME, end_time=ASPEN_END_TIME, ts=TS, read_type=ReaderType.AVG, ) - df_web = AspenClientWeb.read( + df_web = aspen_client_web.read( tags=ASPEN_TAG, start_time=ASPEN_START_TIME, end_time=ASPEN_END_TIME, @@ -193,9 +193,9 @@ def test_aspen_odbc_web_same_values_aggregated( pd.testing.assert_frame_equal(df_odbc, df_web) -def test_concat_proper_fill_up(PIClientWeb: IMSClient) -> None: - max_rows_backup = PIClientWeb.handler._max_rows - df_int = PIClientWeb.read( +def test_concat_proper_fill_up(pi_client_web: IMSClient) -> None: + max_rows_backup = pi_client_web.handler._max_rows + df_int = pi_client_web.read( tags=PI_TAG, start_time=PI_START_TIME, end_time=PI_END_TIME, @@ -203,7 +203,7 @@ def test_concat_proper_fill_up(PIClientWeb: IMSClient) -> None: read_type=ReaderType.INT, ) assert len(df_int) == 16 - df_avg = PIClientWeb.read( + df_avg = pi_client_web.read( tags=PI_TAG, start_time=PI_START_TIME, end_time=PI_END_TIME, @@ -212,8 +212,8 @@ def test_concat_proper_fill_up(PIClientWeb: IMSClient) -> None: ) assert len(df_avg) == 15 - PIClientWeb.handler._max_rows = 5 - df_int_concat = PIClientWeb.read( + pi_client_web.handler._max_rows = 5 + df_int_concat = pi_client_web.read( tags=PI_TAG, start_time=PI_START_TIME, end_time=PI_END_TIME, @@ -221,7 +221,7 @@ def test_concat_proper_fill_up(PIClientWeb: IMSClient) -> None: read_type=ReaderType.INT, ) assert len(df_int_concat) == 16 - df_avg_concat = PIClientWeb.read( + df_avg_concat = pi_client_web.read( tags=PI_TAG, start_time=PI_START_TIME, end_time=PI_END_TIME, @@ -231,19 +231,19 @@ def test_concat_proper_fill_up(PIClientWeb: IMSClient) -> None: assert len(df_avg_concat) == 15 pd.testing.assert_frame_equal(df_int, df_int_concat) pd.testing.assert_frame_equal(df_avg, df_avg_concat) - PIClientWeb.handler._max_rows = max_rows_backup + pi_client_web.handler._max_rows = max_rows_backup -def test_cache_proper_fill_up(PIClientWeb: IMSClient, tmp_path: Path) -> None: - PIClientWeb.cache = SmartCache(directory=tmp_path) - df_int_1 = PIClientWeb.read( +def test_cache_proper_fill_up(pi_client_web: IMSClient, tmp_path: Path) -> None: + pi_client_web.cache = SmartCache(directory=tmp_path) + df_int_1 = pi_client_web.read( tags=PI_TAG, start_time=PI_START_TIME, end_time=PI_END_TIME, ts=TS, read_type=ReaderType.INT, ) - df_int_2 = PIClientWeb.read( + df_int_2 = pi_client_web.read( tags=PI_TAG, start_time=PI_START_TIME_2, end_time=PI_END_TIME_2, @@ -252,11 +252,11 @@ def test_cache_proper_fill_up(PIClientWeb: IMSClient, tmp_path: Path) -> None: ) assert len(df_int_1) == 16 assert len(df_int_2) == 16 - df_cached = PIClientWeb.cache.fetch( # type: ignore[call-arg] + df_cached = pi_client_web.cache.fetch( # type: ignore[call-arg] tagname=PI_TAG, - readtype=ReaderType.INT, + read_type=ReaderType.INT, ts=TS, - start_time=ensure_datetime_with_tz(PI_START_TIME), - stop_time=ensure_datetime_with_tz(PI_END_TIME_2), + start=ensure_datetime_with_tz(PI_START_TIME), + end=ensure_datetime_with_tz(PI_END_TIME_2), ) assert len(df_cached) == 32