From 1bb6728eb274b881f778daf682a655d928f3879b Mon Sep 17 00:00:00 2001 From: Nicolas Tappy Date: Thu, 20 Jun 2024 18:03:06 +0200 Subject: [PATCH 01/21] support for .sur .pro export, bugfixes --- .../supported_formats/digitalsurf.rst | 32 +- rsciio/digitalsurf/__init__.py | 3 +- rsciio/digitalsurf/_api.py | 1051 +++++++++++++++-- rsciio/digitalsurf/specifications.yaml | 4 +- rsciio/emd/_emd_velox.py | 14 +- rsciio/tests/test_digitalsurf.py | 153 +++ rsciio/tests/test_emd_velox.py | 1 + upcoming_changes/274.bugfix.rst | 1 + 8 files changed, 1163 insertions(+), 96 deletions(-) create mode 100644 upcoming_changes/274.bugfix.rst diff --git a/doc/user_guide/supported_formats/digitalsurf.rst b/doc/user_guide/supported_formats/digitalsurf.rst index 0f6610ccd..48608a28d 100644 --- a/doc/user_guide/supported_formats/digitalsurf.rst +++ b/doc/user_guide/supported_formats/digitalsurf.rst @@ -3,16 +3,30 @@ DigitalSurf format (SUR & PRO) ------------------------------ -The ``.sur`` and ``.pro`` files are a format developed by the digitalsurf company to handle various types of -scientific measurements data such as profilometer, SEM, AFM, RGB(A) images, multilayer -surfaces and profiles. Even though it is essentially a surfaces format, 1D signals -are supported for spectra and spectral maps. Specifically, this file format is used -by Attolight SA for its scanning electron microscope cathodoluminescence -(SEM-CL) hyperspectral maps. Metadata parsing is supported, including user-specific -metadata, as well as the loading of files containing multiple objects packed together. +The ``.sur`` and ``.pro`` files are a format developed by the digitalsurf company to handle +various types of scientific data with their MountainsMap software, such as profilometer, SEM, +AFM, RGB(A) images, multilayer surfaces and profiles. Even though it is essentially a surfaces +format, 1D signals are supported for spectra and spectral maps. Specifically, this file format +is used by Attolight SA for its scanning electron microscope cathodoluminescence (SEM-CL) +hyperspectral maps. The plugin was developed based on the MountainsMap software documentation, +which contains a description of the binary format. -The plugin was developed based on the MountainsMap software documentation, which -contains a description of the binary format. +Support for ``.sur`` and ``.pro`` datasets loading is complete, including parsing of user/customer +-specific metadata, and opening of files containing multiple objects. Some rare specific objects +(e.g. force curves) are not supported, due to no example data being available. Those can be added +upon request and providing of example datasets. Heterogeneous data can be represented in ``.sur`` +and ``.pro`` objects, for instance floating-point/topography and rgb data can coexist along the same +navigation dimension. Those are casted to a homogeneous floating-point representation upon loading. + +Support for data saving is partial as ``.sur`` and ``.pro`` can be fundamentally incompatible with +hyperspy signals. First, they have limited dimensionality. Up to 3d data arrays with +either 1d (series of images) or 2d (hyperspectral studiable) navigation space can be saved. Also, +``.sur`` and ``.pro`` do not support non-uniform axes and saving of models. Finally, ``.sur`` / ``.pro`` +linearize intensities along a uniform axis to enforce an integer-representation of the data (with scaling and +offset). This means that export from float-type hyperspy signals is inherently lossy. + +Within these limitations, all features from the fileformat are supported at export, notably data +compression and setting of custom metadata. API functions ^^^^^^^^^^^^^ diff --git a/rsciio/digitalsurf/__init__.py b/rsciio/digitalsurf/__init__.py index 40459e88b..7db9455d9 100644 --- a/rsciio/digitalsurf/__init__.py +++ b/rsciio/digitalsurf/__init__.py @@ -1,7 +1,8 @@ -from ._api import file_reader +from ._api import file_reader, file_writer __all__ = [ "file_reader", + "file_writer" ] diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index e81695cb4..cbf999ff1 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -23,17 +23,19 @@ # comments can be systematically parsed into metadata and write a support for # original_metadata or other +import datetime +from copy import deepcopy import logging import os import struct import sys +import re import warnings import zlib +import ast # Commented for now because I don't know what purpose it serves # import traits.api as t -from copy import deepcopy - # Dateutil allows to parse date but I don't think it's useful here # import dateutil.parser import numpy as np @@ -45,12 +47,13 @@ # import rsciio.utils.tools # DictionaryTreeBrowser class handles the fancy metadata dictionnaries # from hyperspy.misc.utils import DictionaryTreeBrowser -from rsciio._docstrings import FILENAME_DOC, LAZY_UNSUPPORTED_DOC, RETURNS_DOC +from rsciio._docstrings import FILENAME_DOC, SIGNAL_DOC from rsciio.utils.exceptions import MountainsMapFileError +from rsciio.utils.rgb_tools import is_rgb, is_rgba +from rsciio.utils.date_time_tools import get_date_time_from_metadata _logger = logging.getLogger(__name__) - class DigitalSurfHandler(object): """Class to read Digital Surf MountainsMap files. @@ -81,26 +84,28 @@ class DigitalSurfHandler(object): 6: "_MERIDIANDISC", 7: "_MULTILAYERPROFILE", 8: "_MULTILAYERSURFACE", - 9: "_PARALLELDISC", + 9: "_PARALLELDISC", #not implemented 10: "_INTENSITYIMAGE", 11: "_INTENSITYSURFACE", 12: "_RGBIMAGE", - 13: "_RGBSURFACE", - 14: "_FORCECURVE", - 15: "_SERIEOFFORCECURVE", - 16: "_RGBINTENSITYSURFACE", + 13: "_RGBSURFACE", #Deprecated + 14: "_FORCECURVE", #Deprecated + 15: "_SERIEOFFORCECURVE", #Deprecated + 16: "_RGBINTENSITYSURFACE", #Surface + Image + 17: "_CONTOURPROFILE", + 18: "_SERIESOFRGBIMAGES", 20: "_SPECTRUM", 21: "_HYPCARD", } - def __init__(self, filename=None): + def __init__(self, filename : str|None = None): # We do not need to check for file existence here because # io module implements it in the load function self.filename = filename # The signal_dict dictionnary has to be returned by the - # file_reader function. Apparently original_metadata needs - # to be set + # file_reader function. By default, we return the minimal + # mandatory fields self.signal_dict = { "data": np.empty((0, 0, 0)), "axes": [], @@ -115,12 +120,12 @@ def __init__(self, filename=None): # _work_dict['Field']['b_pack_fn'](f,v): pack value v in file f self._work_dict = { "_01_Signature": { - "value": "DSCOMPRESSED", + "value": "DSCOMPRESSED", #Uncompressed key is DIGITAL SURF "b_unpack_fn": lambda f: self._get_str(f, 12, "DSCOMPRESSED"), "b_pack_fn": lambda f, v: self._set_str(f, v, 12), }, "_02_Format": { - "value": 0, + "value": 1, "b_unpack_fn": self._get_int16, "b_pack_fn": self._set_int16, }, @@ -145,7 +150,7 @@ def __init__(self, filename=None): "b_pack_fn": lambda f, v: self._set_str(f, v, 30), }, "_07_Operator_Name": { - "value": "", + "value": "ROSETTA", "b_unpack_fn": lambda f: self._get_str(f, 30, ""), "b_pack_fn": lambda f, v: self._set_str(f, v, 30), }, @@ -200,17 +205,17 @@ def __init__(self, filename=None): "b_pack_fn": self._set_int32, }, "_18_Number_of_Points": { - "value": 0, + "value": 1, "b_unpack_fn": self._get_int32, "b_pack_fn": self._set_int32, }, "_19_Number_of_Lines": { - "value": 0, + "value": 1, "b_unpack_fn": self._get_int32, "b_pack_fn": self._set_int32, }, "_20_Total_Nb_of_Pts": { - "value": 0, + "value": 1, "b_unpack_fn": self._get_int32, "b_pack_fn": self._set_int32, }, @@ -305,7 +310,7 @@ def __init__(self, filename=None): "b_pack_fn": self._set_int16, }, "_39_Obsolete": { - "value": 0, + "value": b'0', "b_unpack_fn": lambda f: self._get_bytes(f, 12), "b_pack_fn": lambda f, v: self._set_bytes(f, v, 12), }, @@ -355,7 +360,7 @@ def __init__(self, filename=None): "b_pack_fn": self._set_uint32, }, "_49_Obsolete": { - "value": 0, + "value": b'0', "b_unpack_fn": lambda f: self._get_bytes(f, 6), "b_pack_fn": lambda f, v: self._set_bytes(f, v, 6), }, @@ -370,7 +375,7 @@ def __init__(self, filename=None): "b_pack_fn": self._set_int16, }, "_52_Client_zone": { - "value": 0, + "value": b'0', "b_unpack_fn": lambda f: self._get_bytes(f, 128), "b_pack_fn": lambda f, v: self._set_bytes(f, v, 128), }, @@ -422,7 +427,7 @@ def __init__(self, filename=None): "_62_points": { "value": 0, "b_unpack_fn": self._unpack_data, - "b_pack_fn": lambda f, v: 0, # Not implemented + "b_pack_fn": self._pack_data, }, } @@ -442,6 +447,732 @@ def __init__(self, filename=None): self._N_data_object = 1 self._N_data_channels = 1 + # Attributes useful for save and export + + # Number of nav / sig axes + self._n_ax_nav: int = 0 + self._n_ax_sig: int = 0 + + # All as a rsciio-convention axis dict or empty + self.Xaxis: dict = {} + self.Yaxis: dict = {} + self.Zaxis: dict = {} + self.Taxis: dict = {} + + # These must be set in the split functions + self.data_split = [] + self.objtype_split = [] + # Packaging methods for writing files + + def _build_sur_file_contents(self, + set_comments:str='auto', + is_special:bool=False, + compressed:bool=True, + comments: dict = {}, + operator_name: str = '', + private_zone: bytes = b'', + client_zone: bytes = b'' + ): + + self._list_sur_file_content = [] + + #Compute number of navigation / signal axes + self._n_ax_nav, self._n_ax_sig = DigitalSurfHandler._get_n_axes(self.signal_dict) + + # Choose object type based on number of navigation and signal axes + # Populate self.Xaxis, self.Yaxis, self.Taxis (if not empty) + # Populate self.data_split and self.objtype_split (always) + self._split_signal_dict() + + # This initialize the Comment string saved with the studiable. + comment_dict = self._get_comment_dict(self.signal_dict['original_metadata'], + method=set_comments, + custom=comments) + comment_str = self._stringify_dict(comment_dict) + + #Now we build a workdict for every data object + for data,objtype in zip(self.data_split,self.objtype_split): + self._build_workdict(data, + objtype, + self.signal_dict['metadata'], + comment=comment_str, + is_special=is_special, + compressed=compressed, + operator_name=operator_name, + private_zone=private_zone, + client_zone=client_zone) + # if more than one object, we erase comment after first object. + if comment_str: + comment_str = '' + + # Finally we push it all to the content list. + self._append_work_dict_to_content() + + def _write_sur_file(self): + """Write self._list_sur_file_content to a """ + + with open(self.filename, "wb") as f: + for dic in self._list_sur_file_content: + # Extremely important! self._work_dict must access + # other fields to properly encode and decode data, + # comments etc. etc. + self._move_values_to_workdict(dic) + # Then inner consistency is trivial + for key in self._work_dict: + self._work_dict[key]['b_pack_fn'](f,self._work_dict[key]['value']) + + @staticmethod + def _get_n_axes(sig_dict: dict) -> tuple[int,int]: + """Return number of navigation and signal axes in the signal dict (in that order). + + Args: + sig_dict (dict): signal dictionary. Contains keys 'data', 'axes', 'metadata', 'original_metadata' + + Returns: + Tuple[int,int]: nax_nav,nax_sig. Number of navigation and signal axes + """ + nax_nav = 0 + nax_sig = 0 + for ax in sig_dict['axes']: + if ax['navigate']: + nax_nav += 1 + else: + nax_sig += 1 + return nax_nav, nax_sig + + @staticmethod + def _get_nobjects(omd: dict) -> int: + maxobj = 0 + for k in omd: + objnum = k.split('_')[1] + objnum = int(objnum) + if objnum > maxobj: + maxobj = objnum + return maxobj + + def _is_spectrum(self) -> bool: + """Determine if a signal is a spectrum based on axes naming""" + + spectrumlike_axnames = ['Wavelength', 'Energy', 'Energy Loss', 'E'] + is_spec = False + + for ax in self.signal_dict['axes']: + if ax['name'] in spectrumlike_axnames: + is_spec = True + + return is_spec + + def _is_surface(self) -> bool: + """Determine if a 2d-data-like signal_dict should be of surface type, ie the dataset + is a 2d surface of the 3d plane. """ + is_surface = False + surfacelike_quantnames = ['Height', 'Altitude', 'Elevation', 'Depth', 'Z'] + quant: str = self.signal_dict['metadata']['Signal']['quantity'] + for name in surfacelike_quantnames: + if quant.startswith(name): + is_surface = True + + return is_surface + + def _is_binary(self) -> bool: + return self.signal_dict['data'].dtype == bool + + def _get_num_chans(self) -> int: + """Get number of channels (aka point size) + + Args: + obj_type (int): Object type numeric code + + Returns: + int: Number of channels (point size). + """ + obj_type = self._get_object_type() + + if obj_type == 11: + return 2 #Intensity + surface (deprecated type) + elif obj_type in [12,18]: + return 3 #RGB types + elif obj_type == 13: + return 4 #RGB surface + elif obj_type in [14, 15, 35, 36]: + return 2 #Force curves + elif obj_type in [16]: + return 5 #Surface, Intensity, R, G, B (but hardly applicable to hyperspy) + else: + return 1 + + def _get_wsize(self, nax_sig: int) -> int: + if nax_sig != 1: + raise MountainsMapFileError(f"Attempted parsing W-axis size from signal with navigation dimension {nax_sig}!= 1.") + for ax in self.signal_dict['axes']: + if not ax['navigate']: + return ax['size'] + + def _get_num_objs(self,) -> int: + """Get number of objects based on object type and number of navigation axes in the signal. + + Raises: + ValueError: Several digital surf save formats will need a navigation dimension of 1 + + Returns: + int: _description_ + """ + obj_type = self._get_object_type() + nax_nav, _ = self._get_n_axes() + + if obj_type in [1,2,3,6,9,10,11,12,13,14,15,16,17,20,21,35,36,37]: + return 1 + elif obj_type in [4,5,7,8,18]: + if nax_nav != 1: + raise MountainsMapFileError(f"Attempted to save signal with number type {obj_type} and navigation dimension {nax_nav}.") + for ax in enumerate(self.signal_dict['axes']): + if ax['navigate']: + return ax['size'] + + def _get_object_type(self) -> int: + """Select the suitable _mountains_object_types """ + + nax_nav, nax_sig = self._get_n_axes(self.signal_dict) + + obj_type = None + if nax_nav == 0: + if nax_sig == 0: + raise MountainsMapFileError(msg=f"Object with empty navigation and signal axes not supported for .sur export") + elif nax_sig == 1: + if self._is_spectrum(): + obj_type = 20 # '_SPECTRUM' + else: + obj_type = 1 # '_PROFILE' + elif nax_sig == 2: + if self._is_binary(): + obj_type = 3 # "_BINARYIMAGE" + elif is_rgb(self.signal_dict['data']): + obj_type = 12 #"_RGBIMAGE" + elif is_rgba(self.signal_dict['data']): + warnings.warn(f"Alpha channel discarded upon saving RGBA signal in .sur format") + obj_type = 12 #"_RGBIMAGE" + elif self._is_surface(): + obj_type = 2 #'_SURFACE' + else: + obj_type = 10 #_INTENSITYSURFACE + else: + raise MountainsMapFileError(msg=f"Object with signal dimension {nax_sig} > 2 not supported for .sur export") + elif nax_nav == 1: + if nax_sig == 0: + warnings.warn(f"Exporting surface signal dimension {nax_sig} and navigation dimension {nax_nav} falls back on surface type but is not good practice.") + obj_type = 1 # '_PROFILE' + elif nax_sig == 1: + if self._is_spectrum(): + obj_type = 20 # '_SPECTRUM' + else: + obj_type = 1 # '_PROFILE' + elif nax_sig ==2: + #Also warn + if is_rgb(self.signal_dict['data']): + obj_type = 18 #"_SERIESOFRGBIMAGE" + elif is_rgba(self.signal_dict['data']): + warnings.warn(f"Alpha channel discarded upon saving RGBA signal in .sur format") + obj_type = 18 #"_SERIESOFRGBIMAGE" + else: + obj_type = 5 #"_SURFACESERIE" + else: + raise MountainsMapFileError(msg=f"Object with signal dimension {nax_sig} > 2 not supported for .sur export") + elif nax_nav == 2: + if nax_sig == 0: + warnings.warn(f"Signal dimension {nax_sig} and navigation dimension {nax_nav} exported as surface type. Consider transposing signal object before exporting if this is intentional.") + if self._is_surface(): + obj_type = 2 #'_SURFACE' + else: + obj_type = 10 #_INTENSITYSURFACE + elif nax_sig == 1: + obj_type = 21 #'_HYPCARD' + else: + raise MountainsMapFileError(msg=f"Object with signal dimension {nax_sig} and navigation dimension {nax_nav} not supported for .sur export") + else: + #Also raise + raise MountainsMapFileError(msg=f"Object with navigation dimension {nax_nav} > 2 not supported for .sur export") + + return obj_type + + def _split_spectrum(self,): + """Must set axes except Z, data_split & objtype_split attributes""" + #When splitting spectrum, remember that instead of the series axis (T/W), + #X axis is the spectral dimension and Y the series dimension (if series). + # Xaxis = {} + # Yaxis = {} + nax_nav = self._n_ax_nav + nax_sig = self._n_ax_sig + + if (nax_nav,nax_sig)==(0,1) or (nax_nav,nax_sig)==(1,0): + self.Xaxis = self.signal_dict['axes'][0] + elif (nax_nav,nax_sig)==(1,1): + self.Xaxis = next(ax for ax in self.signal_dict['axes'] if not ax['navigate']) + self.Yaxis = next(ax for ax in self.signal_dict['axes'] if ax['navigate']) + else: + raise MountainsMapFileError(f"Dimensions ({nax_nav})|{nax_sig}) invalid for export as spectrum type") + + self.data_split = [self.signal_dict['data']] + self.objtype_split = [20] + self._N_data_object = 1 + self._N_data_channels = 1 + + def _split_profile(self,): + """Must set axes except Z, data_split & objtype_split attributes""" + + if (self._n_ax_nav,self._n_ax_sig) in [(0,1),(1,0)]: + self.Xaxis = self.signal_dict['axes'][0] + else: + raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for a profile type") + + self.data_split = [self.signal_dict['data']] + self.objtype_split = [1] + self._N_data_object = 1 + self._N_data_channels = 1 + + def _split_profileserie(self,): + """Must set axes except Z, data_split & objtype_split attributes""" + obj_type = 4 # '_PROFILESERIE' + + if (self._n_ax_nav,self._n_ax_sig)==(1,1): + self.Xaxis = next(ax for ax in self.signal_dict['axes'] if not ax['navigate']) + self.Taxis = next(ax for ax in self.signal_dict['axes'] if ax['navigate']) + else: + raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._mountains_object_types[obj_type]} type") + + self.data_split = self._split_data_alongaxis(self.Taxis) + self.objtype_split = [obj_type] + [1]*(len(self.data_split)-1) + self._N_data_object = len(self.objtype_split) + self._N_data_channels = 1 + + def _split_binary_img(self,): + """Must set axes except Z, data_split & objtype_split attributes""" + obj_type = 3 + if (self._n_ax_nav,self._n_ax_sig) in [(0,2),(2,0)]: + self.Xaxis = self.signal_dict['axes'][1] + self.Yaxis = self.signal_dict['axes'][0] + else: + raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._mountains_object_types[obj_type]} type") + + self.data_split = [self.signal_dict['data']] + self.objtype_split = [obj_type] + self._N_data_object = 1 + self._N_data_channels = 1 + + def _split_rgb(self,): + """Must set axes except Z, data_split & objtype_split attributes""" + obj_type = 12 + if (self._n_ax_nav,self._n_ax_sig) in [(0,2),(2,0)]: + self.Xaxis = self.signal_dict['axes'][1] + self.Yaxis = self.signal_dict['axes'][0] + else: + raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._mountains_object_types[obj_type]} type") + + self.data_split = [np.int32(self.signal_dict['data']['R']), + np.int32(self.signal_dict['data']['G']), + np.int32(self.signal_dict['data']['B']) + ] + self.objtype_split = [obj_type] + [10,10] + self._N_data_object = 1 + self._N_data_channels = 3 + + def _split_surface(self,): + """Must set axes except Z, data_split & objtype_split attributes""" + obj_type = 2 + if (self._n_ax_nav,self._n_ax_sig) in [(0,2),(2,0)]: + self.Xaxis = self.signal_dict['axes'][1] + self.Yaxis = self.signal_dict['axes'][0] + else: + raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._mountains_object_types[obj_type]} type") + self.data_split = [self.signal_dict['data']] + self.objtype_split = [obj_type] + self._N_data_object = 1 + self._N_data_channels = 1 + + def _split_intensitysurface(self,): + """Must set axes except Z, data_split & objtype_split attributes""" + obj_type = 10 + if (self._n_ax_nav,self._n_ax_sig) in [(0,2),(2,0)]: + self.Xaxis = self.signal_dict['axes'][1] + self.Yaxis = self.signal_dict['axes'][0] + else: + raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._mountains_object_types[obj_type]} type") + self.data_split = [self.signal_dict['data']] + self.objtype_split = [obj_type] + self._N_data_object = 1 + self._N_data_channels = 1 + + def _split_rgbserie(self): + obj_type = 18 #"_SERIESOFRGBIMAGE" + + sigaxes_iter = iter(ax for ax in self.signal_dict['axes'] if not ax['navigate']) + self.Yaxis = next(sigaxes_iter) + self.Xaxis = next(sigaxes_iter) + self.Taxis = next(ax for ax in self.signal_dict['axes'] if ax['navigate']) + tmp_data_split = self._split_data_alongaxis(self.Taxis) + + self.data_split = [] + self.objtype_split = [] + for d in tmp_data_split: + self.data_split += [d['R'].astype(np.int32), d['G'].astype(np.int32), d['B'].astype(np.int32)] + self.objtype_split += [12,10,10] + self.objtype_split[0] = obj_type + + self._N_data_object = self.Taxis['size'] + self._N_data_channels = 3 + + def _split_surfaceserie(self): + obj_type = 5 + sigaxes_iter = iter(ax for ax in self.signal_dict['axes'] if not ax['navigate']) + self.Yaxis = next(sigaxes_iter) + self.Xaxis = next(sigaxes_iter) + self.Taxis = next(ax for ax in self.signal_dict['axes'] if ax['navigate']) + self.data_split = self._split_data_alongaxis(self.Taxis) + self.objtype_split = [2]*len(self.data_split) + self.objtype_split[0] = obj_type + self._N_data_object = len(self.data_split) + self._N_data_channels = 1 + + def _split_hyperspectral(self): + obj_type = 21 + sigaxes_iter = iter(ax for ax in self.signal_dict['axes'] if ax['navigate']) + self.Yaxis = next(sigaxes_iter) + self.Xaxis = next(sigaxes_iter) + self.Taxis = next(ax for ax in self.signal_dict['axes'] if not ax['navigate']) + self.data_split = [self.signal_dict['data']] + self.objtype_split = [obj_type] + self._N_data_object = 1 + self._N_data_channels = 1 + + def _split_data_alongaxis(self, axis: dict) -> list[np.ndarray]: + idx = self.signal_dict['axes'].index(axis) + # return idx + datasplit = [] + for dslice in np.rollaxis(self.signal_dict['data'],idx): + datasplit.append(dslice) + return datasplit + + def _split_signal_dict(self): + """Select the suitable _mountains_object_types """ + + n_nav = self._n_ax_nav + n_sig = self._n_ax_sig + + #Here, I manually unfold the nested conditions for legibility. + #Since there are a fixed number of dimensions supported by + # digitalsurf .sur/.pro files, I think this is the best way to + # proceed. + if (n_nav,n_sig) == (0,1): + if self._is_spectrum(): + self._split_spectrum() + else: + self._split_profile() + elif (n_nav,n_sig) == (0,2): + if self._is_binary(): + self._split_binary_img() + elif is_rgb(self.signal_dict['data']): #"_RGBIMAGE" + self._split_rgb() + elif is_rgba(self.signal_dict['data']): + warnings.warn(f"A channel discarded upon saving \ + RGBA signal in .sur format") + self._split_rgb() + elif self._is_surface(): #'_SURFACE' + self._split_surface() + else: # _INTENSITYSURFACE + self._split_intensitysurface() + elif (n_nav,n_sig) == (1,0): + warnings.warn(f"Exporting surface signal dimension {n_sig} and navigation dimension \ + {n_nav} falls back on profile type but is not good practice. Consider \ + transposing before saving to avoid unexpected behaviour.") + self._split_profile() + elif (n_nav,n_sig) == (1,1): + if self._is_spectrum(): + self._split_spectrum() + else: + self._split_profileserie() + elif (n_nav,n_sig) == (1,2): + if is_rgb(self.signal_dict['data']): + self._split_rgbserie() + if is_rgba(self.signal_dict['data']): + warnings.warn(f"Alpha channel discarded upon saving RGBA signal in .sur format") + obj_type = 18 #"_SERIESOFRGBIMAGE" + self._split_rgbserie() + else: + self._split_surfaceserie() + elif (n_nav,n_sig) == (2,0): + warnings.warn(f"Signal dimension {n_sig} and navigation dimension {n_nav} exported as surface type. Consider transposing signal object before exporting if this is intentional.") + if self._is_binary(): + self._split_binary_img() + elif is_rgb(self.signal_dict['data']): #"_RGBIMAGE" + self._split_rgb() + elif is_rgba(self.signal_dict['data']): + warnings.warn(f"A channel discarded upon saving \ + RGBA signal in .sur format") + self._split_rgb() + if self._is_surface(): + self._split_surface() + else: + self._split_intensitysurface() + elif (n_nav,n_sig) == (2,1): + self._split_hyperspectral() + else: + raise MountainsMapFileError(msg=f"Object with signal dimension {n_sig} and navigation dimension {n_nav} not supported for .sur export") + + def _norm_data(self, data: np.ndarray, is_special: bool, apply_sat_lo: bool = False, apply_sat_hi: bool = False): + """Normalize input data to 16-bits or 32-bits ints and initialize an axis on which the data is normalized. + + Args: + data (np.ndarray): dataset + is_special (bool): whether NaNs get sent to N.M points in the sur format. + apply_sat_lo (bool, optional): Signal low-value saturation in output datafile. Defaults to False. + apply_sat_hi (bool, optional): Signal high-value saturation in output datafile. Defaults to False. + + Raises: + MountainsMapFileError: raised if input is of complex type + MountainsMapFileError: raised if input is of unsigned int type + MountainsMapFileError: raised if input is of int > 32 bits type + + Returns: + tuple[int,int,int,float,float,np.ndarray[int]]: pointsize, Zmin, Zmax, Zscale, Zoffset, data_int + """ + data_type = data.dtype + + if np.issubdtype(data_type,np.complexfloating): + raise MountainsMapFileError(f"digitalsurf file formats do not support export of complex data. Convert data to real-value representations before before export") + elif data_type==np.uint8: + warnings.warn("np.uint8 datatype exported as 16bits") + pointsize = 16 #Pointsize has to be 16 or 32 in surf format + Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data.astype(np.int16), pointsize, is_special) + data_int = data.astype(np.int16) + elif data_type==np.uint16: + warnings.warn("np.uint16 datatype exported as 32bits") + pointsize = 32 #Pointsize has to be 16 or 32 in surf format + Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data.astype(np.int32), pointsize, is_special) + data_int = data.astype(np.int32) + elif np.issubdtype(data_type,np.unsignedinteger): + raise MountainsMapFileError(f"digitalsurf file formats do not support unsigned data >16bits. Convert data to signed integers before export.") + elif data_type==np.int8: + pointsize = 16 #Pointsize has to be 16 or 32 in surf format + Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, 8, is_special) + data_int = data + elif data_type==np.int16: + pointsize = 16 + Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, pointsize, is_special) + data_int = data + elif data_type==np.int32: + pointsize = 32 + data_int = data + Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, pointsize, is_special) + elif np.issubdtype(data_type,np.integer): + raise MountainsMapFileError(f"digitalsurf file formats do not support export integers larger than 32 bits. Convert data to 32-bit representation before exporting") + elif np.issubdtype(data_type,np.floating): + if self.signal_dict['data'].itemsize*8 > 32: + warnings.warn(f"Lossy conversion of {data_type} to 32-bits-ints representation will occur.") + pointsize = 32 + Zmin, Zmax, Zscale, Zoffset, data_int = self._norm_float(data, is_special) + + return pointsize, Zmin, Zmax, Zscale, Zoffset, data_int + + def _norm_signed_int(self, data:np.ndarray, intsize: int, is_special: bool = False): + # There are no NaN values for integers. Special points means considering high/low saturation of integer scale. + + data_int_min = - 2**(intsize-1) + data_int_max = 2**(intsize -1) + + is_satlo = (data==data_int_min).sum() >= 1 and is_special + is_sathi = (data==data_int_max).sum() >= 1 and is_special + + Zmin = data_int_min + 1 if is_satlo else data.min() + Zmax = data_int_max - 1 if is_sathi else data.max() + Zscale = 1.0 + Zoffset = 0.0 + + return Zmin, Zmax, Zscale, Zoffset + + def _norm_float(self, data : np.ndarray, is_special: bool = False,): + """Normalize float data on a 32 bits int scale.""" + + Zoffset_f = np.nanmin(data) + Zmax_f = np.nanmax(data) + is_nan = np.any(np.isnan(data)) + + if is_special and is_nan: + Zmin = - 2**(32-1) + 2 + Zmax = 2**32 + Zmin - 3 + else: + Zmin = - 2**(32-1) + Zmax = 2**32 + Zmin - 1 + + Zscale = (Zmax_f - Zoffset_f)/(Zmax - Zmin) + data_int = (data - Zoffset_f)/Zscale + Zmin + + if is_special and is_nan: + data_int[np.isnan(data)] = Zmin - 2 + + data_int = data_int.astype(np.int32) + + return Zmin, Zmax, Zscale, Zoffset_f, data_int + + def _get_Zname_Zunit(self, metadata: dict) -> tuple[str,str]: + """Attempt reading Z-axis name and Unit from metadata.Signal.Quantity field. + Return empty str if do not exist. + + Returns: + tuple[str,str]: Zname,Zunit + """ + quantitystr: str = metadata.get('Signal',{}).get('quantity','') + quantitystr = quantitystr.strip() + quantity = quantitystr.split(' ') + if len(quantity)>1: + Zunit = quantity.pop() + Zunit = Zunit.strip('()') + Zname = ' '.join(quantity) + elif len(quantity)==1: + Zname = quantity.pop() + Zunit = '' + else: + Zname = '' + Zunit = '' + + return Zname,Zunit + + def _get_datetime_info(self,) -> tuple[int,int,int,int,int,int]: + date = self.signal_dict['metadata']['General'].get('date','') + time = self.signal_dict['metadata']['General'].get('time','') + + try: + [yyyy,mm,dd] = date.strip().split('-') + except ValueError: + [yyyy,mm,dd] = [0,0,0] + + try: + [hh,minmin,ss] = time.strip().strip('Z').slit(':') + except ValueError: + [hh,minmin,ss] = [0,0,0] + + return yyyy,mm,dd,hh,minmin,ss + + def _build_workdict(self, + data: np.ndarray, + obj_type: int, + metadata: dict = {}, + comment: str = "", + is_special: bool = True, + compressed: bool = True, + operator_name: str = '', + private_zone: bytes = b'', + client_zone: bytes = b'' + ): + + if not compressed: + self._work_dict['_01_Signature']['value'] = 'DIGITAL SURF' # DSCOMPRESSED by default + else: + self._work_dict['_01_Signature']['value'] = 'DSCOMPRESSED' # DSCOMPRESSED by default + + # self._work_dict['_02_Format']['value'] = 0 # Dft. other possible value is 257 for MacintoshII computers with Motorola CPUs. Obv not supported... + self._work_dict['_03_Number_of_Objects']['value'] = self._N_data_object + # self._work_dict['_04_Version']['value'] = 1 # Version number. Always default. + self._work_dict['_05_Object_Type']['value'] = obj_type + # self._work_dict['_06_Object_Name']['value'] = '' Obsolete, DOS-version only (Not supported) + self._work_dict['_07_Operator_Name']['value'] = operator_name #Should be settable from kwargs + self._work_dict['_08_P_Size']['value'] = self._N_data_channels + + # self._work_dict['_09_Acquisition_Type']['value'] = 0 # AFM data only, could be inferred + # self._work_dict['_10_Range_Type']['value'] = 0 #Only 1 for high-range (z-stage scanning), AFM data only, could be inferred + + self._work_dict['_11_Special_Points']['value'] = int(is_special) + + # self._work_dict['_12_Absolute']['value'] = 0 #Probably irrelevant in most cases. Absolute vs rel heights (for profilometers), can be inferred + # self._work_dict['_13_Gauge_Resolution']['value'] = 0.0 #Probably irrelevant. Only for profilometers (maybe AFM), can be inferred + + # T-axis acts as W-axis for spectrum / hyperspectrum surfaces. + if obj_type in [21]: + ws = self.Taxis.get('size',0) + else: + ws = 0 + self._work_dict['_14_W_Size']['value'] = ws + + bsize, Zmin, Zmax, Zscale, Zoffset, data_int = self._norm_data(data,is_special,apply_sat_lo=True,apply_sat_hi=True) + Zname, Zunit = self._get_Zname_Zunit(metadata) + + #Axes element set regardless of object size + self._work_dict['_15_Size_of_Points']['value'] = bsize + self._work_dict['_16_Zmin']['value'] = Zmin + self._work_dict['_17_Zmax']['value'] = Zmax + self._work_dict['_18_Number_of_Points']['value']= self.Xaxis.get('size',1) + self._work_dict['_19_Number_of_Lines']['value'] = self.Yaxis.get('size',1) + self._work_dict['_20_Total_Nb_of_Pts']['value'] = data.size + self._work_dict['_21_X_Spacing']['value'] = self.Xaxis.get('scale',0.0) + self._work_dict['_22_Y_Spacing']['value'] = self.Yaxis.get('scale',0.0) + self._work_dict['_23_Z_Spacing']['value'] = Zscale + self._work_dict['_24_Name_of_X_Axis']['value'] = self.Xaxis.get('name','') + self._work_dict['_25_Name_of_Y_Axis']['value'] = self.Yaxis.get('name','') + self._work_dict['_26_Name_of_Z_Axis']['value'] = Zname + self._work_dict['_27_X_Step_Unit']['value'] = self.Xaxis.get('units','') + self._work_dict['_28_Y_Step_Unit']['value'] = self.Yaxis.get('units','') + self._work_dict['_29_Z_Step_Unit']['value'] = Zunit + self._work_dict['_30_X_Length_Unit']['value'] = self.Xaxis.get('units','') + self._work_dict['_31_Y_Length_Unit']['value'] = self.Yaxis.get('units','') + self._work_dict['_32_Z_Length_Unit']['value'] = Zunit + self._work_dict['_33_X_Unit_Ratio']['value'] = 1 + self._work_dict['_34_Y_Unit_Ratio']['value'] = 1 + self._work_dict['_35_Z_Unit_Ratio']['value'] = 1 + + # _36_Imprint -> Obsolete + # _37_Inverted -> Always No + # _38_Levelled -> Always No + # _39_Obsolete -> Obsolete + + dt: datetime.datetime = get_date_time_from_metadata(metadata,formatting='datetime') + if dt is not None: + self._work_dict['_40_Seconds']['value'] = dt.second + self._work_dict['_41_Minutes']['value'] = dt.minute + self._work_dict['_42_Hours']['value'] = dt.hour + self._work_dict['_43_Day']['value'] = dt.day + self._work_dict['_44_Month']['value'] = dt.month + self._work_dict['_45_Year']['value'] = dt.year + self._work_dict['_46_Day_of_week']['value'] = dt.weekday() + + # _47_Measurement_duration -> Nonsaved and non-metadata, but float in seconds + + if compressed: + data_bin = self._compress_data(data_int,nstreams=1) #nstreams hard-set to 1. Could be unlocked in the future + else: + fmt = " 2**15: + warnings.warn(f"Comment exceeding max length of 32.0 kB and will be cropped") + comment_len = np.int16(2**15) + + self._work_dict['_50_Comment_size']['value'] = comment_len + + privatesize = len(private_zone) + if privatesize > 2**15: + warnings.warn(f"Private size exceeding max length of 32.0 kB and will be cropped") + privatesize = np.int16(2**15) + + self._work_dict['_51_Private_size']['value'] = privatesize + + self._work_dict['_52_Client_zone']['value'] = client_zone + + self._work_dict['_53_X_Offset']['value'] = self.Xaxis.get('offset',0.0) + self._work_dict['_54_Y_Offset']['value'] = self.Yaxis.get('offset',0.0) + self._work_dict['_55_Z_Offset']['value'] = Zoffset + self._work_dict['_56_T_Spacing']['value'] = self.Taxis.get('scale',0.0) + self._work_dict['_57_T_Offset']['value'] = self.Taxis.get('offset',0.0) + self._work_dict['_58_T_Axis_Name']['value'] = self.Taxis.get('name','') + self._work_dict['_59_T_Step_Unit']['value'] = self.Taxis.get('units','') + + self._work_dict['_60_Comment']['value'] = comment + + self._work_dict['_61_Private_zone']['value'] = private_zone + self._work_dict['_62_points']['value'] = data_bin + # Read methods def _read_sur_file(self): """Read the binary, possibly compressed, content of the surface @@ -485,12 +1216,17 @@ def _read_sur_file(self): def _read_single_sur_object(self, file): for key, val in self._work_dict.items(): self._work_dict[key]["value"] = val["b_unpack_fn"](file) + # print(f"{key}: {self._work_dict[key]['value']}") def _append_work_dict_to_content(self): """Save the values stored in the work dict in the surface file list""" datadict = deepcopy({key: val["value"] for key, val in self._work_dict.items()}) self._list_sur_file_content.append(datadict) + def _move_values_to_workdict(self,dic:dict): + for key in self._work_dict: + self._work_dict[key]['value'] = deepcopy(dic[key]) + def _get_work_dict_key_value(self, key): return self._work_dict[key]["value"] @@ -499,9 +1235,7 @@ def _build_sur_dict(self): """Create a signal dict with an unpacked object""" # If the signal is of the type spectrum or hypercard - if self._Object_type in [ - "_HYPCARD", - ]: + if self._Object_type in ["_HYPCARD"]: self._build_hyperspectral_map() elif self._Object_type in ["_SPECTRUM"]: self._build_spectrum() @@ -509,7 +1243,7 @@ def _build_sur_dict(self): self._build_general_1D_data() elif self._Object_type in ["_PROFILESERIE"]: self._build_1D_series() - elif self._Object_type in ["_SURFACE"]: + elif self._Object_type in ["_SURFACE","_INTENSITYIMAGE","_BINARYIMAGE"]: self._build_surface() elif self._Object_type in ["_SURFACESERIE"]: self._build_surface_series() @@ -521,12 +1255,12 @@ def _build_sur_dict(self): self._build_RGB_image() elif self._Object_type in ["_RGBINTENSITYSURFACE"]: self._build_RGB_surface() - elif self._Object_type in ["_BINARYIMAGE"]: - self._build_surface() + # elif self._Object_type in ["_BINARYIMAGE"]: + # self._build_surface() else: raise MountainsMapFileError( - self._Object_type + "is not a supported mountain object." - ) + f"{self._Object_type} is not a supported mountain object." + ) return self.signal_dict @@ -700,9 +1434,7 @@ def _build_1D_series( self.signal_dict["data"] = np.stack(data) - def _build_surface( - self, - ): + def _build_surface(self,): """Build a surface""" # Check that the object contained only one object. @@ -723,9 +1455,7 @@ def _build_surface( self._set_metadata_and_original_metadata(hypdic) - def _build_surface_series( - self, - ): + def _build_surface_series(self,): """Build a series of surfaces. The T axis is navigation and set from the first object""" @@ -784,9 +1514,7 @@ def _build_RGB_surface( # Pushing data into the dictionary self.signal_dict["data"] = np.stack(data) - def _build_RGB_image( - self, - ): + def _build_RGB_image(self,): """Build an RGB image. The T axis is navigation and set from P Size""" @@ -893,16 +1621,14 @@ def _build_generic_metadata(self, unpacked_dict): return metadict - def _build_original_metadata( - self, - ): + def _build_original_metadata(self,): """Builds a metadata dictionary from the header""" original_metadata_dict = {} # Iteration over Number of data objects for i in range(self._N_data_object): # Iteration over the Number of Data channels - for j in range(self._N_data_channels): + for j in range(max(self._N_data_channels,1)): # Creating a dictionary key for each object k = (i + 1) * (j + 1) key = "Object_{:d}_Channel_{:d}".format(i, j) @@ -930,9 +1656,7 @@ def _build_original_metadata( return original_metadata_dict - def _build_signal_specific_metadata( - self, - ) -> dict: + def _build_signal_specific_metadata(self,) -> dict: """Build additional metadata specific to signal type. return a dictionary for update in the metadata.""" if self.signal_dict["metadata"]["Signal"]["signal_type"] == "CL": @@ -1161,31 +1885,126 @@ def _MS_parse(str_ms, prefix, delimiter): li_value = str_value.split(" ") try: if key == "Grating": - dict_ms[key_main][key] = li_value[ - 0 - ] # we don't want to eval this one + dict_ms[key_main][key] = li_value[0] # we don't want to eval this one else: - dict_ms[key_main][key] = eval(li_value[0]) + dict_ms[key_main][key] = ast.literal_eval(li_value[0]) except Exception: dict_ms[key_main][key] = li_value[0] if len(li_value) > 1: dict_ms[key_main][key + "_units"] = li_value[1] return dict_ms + @staticmethod + def _get_comment_dict(original_metadata: dict, method: str = 'auto', custom: dict = {}) -> dict: + """Return the dictionary used to set the dataset comments (akA custom parameters) while exporting a file. + + By default (method='auto'), tries to identify if the object was originally imported by rosettasciio + from a digitalsurf .sur/.pro file with a comment field parsed as original_metadata (i.e. + Object_0_Channel_0.Parsed). In that case, digitalsurf ignores non-parsed original metadata + (ie .sur/.pro file headers). If the original metadata contains multiple objects with + non-empty parsed content (Object_0_Channel_0.Parsed, Object_0_Channel_1.Parsed etc...), only + the first non-empty X.Parsed sub-dictionary is returned. This falls back on returning the + raw 'original_metadata' + + Optionally the raw 'original_metadata' dictionary can be exported (method='raw'), + a custom dictionary provided by the user (method='custom'), or no comment at all (method='off') + + Args: + method (str, optional): method to export. Defaults to 'auto'. + custom (dict, optional): custom dictionary. Ignored unless method is set to 'custom', Defaults to {}. + + Raises: + MountainsMapFileError: if an invalid key is entered + + Returns: + dict: dictionary to be exported as a .sur object + """ + if method == 'raw': + return original_metadata + elif method == 'custom': + return custom + elif method == 'off': + return {} + elif method == 'auto': + pattern = re.compile("Object_\d*_Channel_\d*") + omd = original_metadata + #filter original metadata content of dict type and matching pattern. + validfields = [omd[key] for key in omd if pattern.match(key) and isinstance(omd[key],dict)] + #In case none match, give up filtering and return raw + if not validfields: + return omd + #In case some match, return first non-empty "Parsed" sub-dict + for field in validfields: + #Return none for non-existing "Parsed" key + candidate = field.get('Parsed') + #For non-none, non-empty dict-type candidate + if candidate and isinstance(candidate,dict): + return candidate + #dict casting for non-none but non-dict candidate + elif candidate is not None: + return {'Parsed': candidate} + #else none candidate, or empty dict -> do nothing + #Finally, if valid fields are present but no candidate + #did a non-empty return, it is safe to return empty + return {} + else: + raise MountainsMapFileError(f"Non-valid method for setting mountainsmap file comment. Choose one of: 'auto','raw','custom','off' ") + + @staticmethod + def _stringify_dict(omd: dict): + """Pack nested dictionary metadata into a string. Pack dictionary-type elements + into digitalsurf "Section title" metadata type ('$_ preceding section title). Pack + other elements into equal-sign separated key-value pairs. + + Supports the key-units logic {'key': value, 'key_units': 'un'} used in hyperspy. + """ + + #Separate dict into list of keys and list of values to authorize index-based pop/insert + keys_queue = list(omd.keys()) + vals_queue = list(omd.values()) + #commentstring to be returned + cmtstr: str = "" + #Loop until queues are empty + while keys_queue: + #pop first object + k = keys_queue.pop(0) + v = vals_queue.pop(0) + #if object is header + if isinstance(v,dict): + cmtstr += f"$_{k}\n" + keys_queue = list(v.keys()) + keys_queue + vals_queue = list(v.values()) + vals_queue + else: + try: + ku_idx = keys_queue.index(k + '_units') + has_units = True + except ValueError: + ku_idx = None + has_units = False + + if has_units: + _ = keys_queue.pop(ku_idx) + vu = vals_queue.pop(ku_idx) + cmtstr += f"${k} = {v.__repr__()} {vu}\n" + else: + cmtstr += f"${k} = {v.__repr__()}\n" + + return cmtstr + # Post processing @staticmethod def post_process_RGB(signal): signal = signal.transpose() - max_data = np.nanmax(signal.data) - if max_data <= 256: + max_data = np.max(signal.data) + if max_data <= 255: signal.change_dtype("uint8") signal.change_dtype("rgb8") elif max_data <= 65536: - signal.change_dtype("uint8") - signal.change_dtype("rgb8") + signal.change_dtype("uint16") + signal.change_dtype("rgb16") else: warnings.warn( - """RGB-announced data could not be converted to + """RGB-announced data could not be converted to uint8 or uint16 datatype""" ) @@ -1224,7 +2043,7 @@ def _set_str(file, val, size, encoding="latin-1"): file.write( struct.pack( "<{:d}s".format(size), - "{{:<{:d}s}}".format(size).format(val).encode(encoding), + f"{val}".ljust(size).encode(encoding), ) ) @@ -1299,12 +2118,21 @@ def _pack_private(self, file, val, encoding="latin-1"): privatesize = self._get_work_dict_key_value("_51_Private_size") self._set_str(file, val, privatesize) + def _is_data_int(self,): + if self._Object_type in ['_BINARYIMAGE', + '_RGBIMAGE', + '_RGBSURFACE', + '_SERIESOFRGBIMAGES']: + return True + else: + return False + def _unpack_data(self, file, encoding="latin-1"): - """This needs to be special because it reads until the end of - file. This causes an error in the series of data""" # Size of datapoints in bytes. Always int16 (==2) or 32 (==4) psize = int(self._get_work_dict_key_value("_15_Size_of_Points") / 8) + Zmin = self._get_work_dict_key_value("_16_Zmin") + dtype = np.int16 if psize == 2 else np.int32 if self._get_work_dict_key_value("_01_Signature") != "DSCOMPRESSED": @@ -1322,12 +2150,9 @@ def _unpack_data(self, file, encoding="latin-1"): readsize = Npts_tot * psize if Wsize != 0: readsize *= Wsize - # if Npts_channel is not 0: - # readsize*=Npts_channel # Read the exact size of the data _points = np.frombuffer(file.read(readsize), dtype=dtype) - # _points = np.fromstring(file.read(readsize),dtype=dtype) else: # If the points are compressed do the uncompress magic. There @@ -1352,36 +2177,74 @@ def _unpack_data(self, file, encoding="latin-1"): # Finally numpy converts it to a numeric object _points = np.frombuffer(rawData, dtype=dtype) - # _points = np.fromstring(rawData, dtype=dtype) # rescale data # We set non measured points to nan according to .sur ways nm = [] if self._get_work_dict_key_value("_11_Special_Points") == 1: - # has unmeasured points + # has non-measured points nm = _points == self._get_work_dict_key_value("_16_Zmin") - 2 - # We set the point in the numeric scale - _points = _points.astype(float) * self._get_work_dict_key_value( - "_23_Z_Spacing" - ) * self._get_work_dict_key_value( - "_35_Z_Unit_Ratio" - ) + self._get_work_dict_key_value("_55_Z_Offset") + _points = (_points.astype(float) - Zmin) * self._get_work_dict_key_value("_23_Z_Spacing") * self._get_work_dict_key_value("_35_Z_Unit_Ratio") + self._get_work_dict_key_value("_55_Z_Offset") - _points[nm] = np.nan + # We set the point in the numeric scale + if self._is_data_int(): + _points = np.round(_points).astype(int) + else: + _points[nm] = np.nan + # Return the points, rescaled return _points def _pack_data(self, file, val, encoding="latin-1"): - """This needs to be special because it writes until the end of - file.""" - datasize = self._get_work_dict_key_value("_62_points") - self._set_str(file, val, datasize) + """This needs to be special because it writes until the end of file.""" + #Also valid for uncompressed + datasize = self._get_work_dict_key_value('_48_Compressed_data_size') + self._set_bytes(file,val,datasize) + + @staticmethod + def _compress_data(data_int, nstreams: int = 1) -> bytes: + """Pack the input data using the digitalsurf zip approach and return the result as a + binary string ready to be written onto a file. """ + if nstreams <= 0 or nstreams >8 : + raise MountainsMapFileError(f"Number of compression streams must be >= 1, <= 8") + + bstr = b'' + bstr += struct.pack("= 11: + key = "SharedProperties.EDSSpectrumQuantificationSettings" + else: + key = "Operations.ImageQuantificationOperation" + mapping[key] = ("Sample.elements", self._convert_element_list) return mapping diff --git a/rsciio/tests/test_digitalsurf.py b/rsciio/tests/test_digitalsurf.py index a49c5dc9f..9121d90f8 100644 --- a/rsciio/tests/test_digitalsurf.py +++ b/rsciio/tests/test_digitalsurf.py @@ -496,3 +496,156 @@ def test_metadata_mapping(): ] == 7000 ) + + +def test_get_n_obj_chn(): + + omd = {"Object_0_Channel_0":{}, + "Object_1_Channel_0":{}, + "Object_2_Channel_0":{}, + "Object_2_Channel_1":{}, + "Object_2_Channel_2":{}, + "Object_3_Channel_0":{},} + + assert DigitalSurfHandler._get_nobjects(omd)==3 + + +def test_compressdata(): + + testdat = np.arange(120, dtype=np.int32) + + #Refuse too many / neg streams + with pytest.raises(MountainsMapFileError): + DigitalSurfHandler._compress_data(testdat,nstreams=9) + with pytest.raises(MountainsMapFileError): + DigitalSurfHandler._compress_data(testdat,nstreams=-1) + + # Accept 1 (dft) or several streams + bcomp = DigitalSurfHandler._compress_data(testdat) + assert bcomp.startswith(b'\x01\x00\x00\x00\xe0\x01\x00\x00') + bcomp = DigitalSurfHandler._compress_data(testdat,nstreams=2) + assert bcomp.startswith(b'\x02\x00\x00\x00\xf0\x00\x00\x00_\x00\x00\x00') + + # Accept 16-bits int as well as 32 + testdat = np.arange(120, dtype=np.int16) + bcomp = DigitalSurfHandler._compress_data(testdat) + assert bcomp.startswith(b'\x01\x00\x00\x00\xf0\x00\x00\x00') + + + # Also streams non-perfectly divided data + testdat = np.arange(120, dtype=np.int16) + bcomp = DigitalSurfHandler._compress_data(testdat) + assert bcomp.startswith(b'\x01\x00\x00\x00\xf0\x00\x00\x00') + + testdat = np.arange(127, dtype=np.int16) + bcomp = DigitalSurfHandler._compress_data(testdat,nstreams=3) + assert bcomp.startswith(b'\x03\x00\x00\x00V\x00\x00\x00C\x00\x00\x00'+ + b'V\x00\x00\x00F\x00\x00\x00'+ + b'R\x00\x00\x00B\x00\x00\x00') + + +def test_get_comment_dict(): + tdh = DigitalSurfHandler() + tdh.signal_dict={'original_metadata':{ + 'Object_0_Channel_0':{ + 'Parsed':{ + 'key_1': 1, + 'key_2':'2' + } + } + }} + + assert tdh._get_comment_dict('auto')=={'key_1': 1,'key_2':'2'} + assert tdh._get_comment_dict('off')=={} + assert tdh._get_comment_dict('raw')=={'Object_0_Channel_0':{'Parsed':{'key_1': 1,'key_2':'2'}}} + assert tdh._get_comment_dict('custom',custom={'a':0}) == {'a':0} + + #Goes to second dict if only this one's valid + tdh.signal_dict={'original_metadata':{ + 'Object_0_Channel_0':{'Header':{}}, + 'Object_0_Channel_1':{'Header':'ObjHead','Parsed':{'key_1': '0'}}, + }} + assert tdh._get_comment_dict('auto') == {'key_1': '0'} + + #Return empty if none valid + tdh.signal_dict={'original_metadata':{ + 'Object_0_Channel_0':{'Header':{}}, + 'Object_0_Channel_1':{'Header':'ObjHead'}, + }} + assert tdh._get_comment_dict('auto') == {} + + #Return dict-cast if a single field is named 'Parsed' (weird case) + tdh.signal_dict={'original_metadata':{ + 'Object_0_Channel_0':{'Header':{}}, + 'Object_0_Channel_1':{'Header':'ObjHead','Parsed':'SomeContent'}, + }} + assert tdh._get_comment_dict('auto') == {'Parsed':'SomeContent'} + +@pytest.mark.parametrize("test_object", ["test_profile.pro", "test_spectra.pro", "test_spectral_map.sur", "test_spectral_map_compressed.sur", "test_spectrum.pro", "test_spectrum_compressed.pro", "test_surface.sur"]) +def test_writetestobjects(tmp_path,test_object): + """Test data integrity of load/save functions. Starting from externally-generated data (i.e. not from hyperspy)""" + + df = TEST_DATA_PATH.joinpath(test_object) + + d = hs.load(df) + fn = tmp_path.joinpath(test_object) + d.save(fn,is_special=False) + d2 = hs.load(fn) + d2.save(fn,is_special=False) + d3 = hs.load(fn) + + assert np.allclose(d2.data,d.data) + assert np.allclose(d2.data,d3.data) + + a = d.axes_manager.navigation_axes + b = d2.axes_manager.navigation_axes + c = d3.axes_manager.navigation_axes + + for ax,ax2,ax3 in zip(a,b,c): + assert np.allclose(ax.axis,ax2.axis) + assert np.allclose(ax.axis,ax3.axis) + + a = d.axes_manager.signal_axes + b = d2.axes_manager.signal_axes + c = d3.axes_manager.signal_axes + + for ax,ax2,ax3 in zip(a,b,c): + assert np.allclose(ax.axis,ax2.axis) + assert np.allclose(ax.axis,ax3.axis) + +def test_writeRGB(tmp_path): + + df = TEST_DATA_PATH.joinpath("test_RGB.sur") + d = hs.load(df) + fn = tmp_path.joinpath("test_RGB.sur") + d.save(fn,is_special=False) + d2 = hs.load(fn) + d2.save(fn,is_special=False) + d3 = hs.load(fn) + + for k in ['R','G','B']: + assert np.allclose(d2.data[k],d.data[k]) + assert np.allclose(d3.data[k],d.data[k]) + + a = d.axes_manager.navigation_axes + b = d2.axes_manager.navigation_axes + c = d3.axes_manager.navigation_axes + + for ax,ax2,ax3 in zip(a,b,c): + assert np.allclose(ax.axis,ax2.axis) + assert np.allclose(ax.axis,ax3.axis) + + a = d.axes_manager.signal_axes + b = d2.axes_manager.signal_axes + c = d3.axes_manager.signal_axes + + for ax,ax2,ax3 in zip(a,b,c): + assert np.allclose(ax.axis,ax2.axis) + assert np.allclose(ax.axis,ax3.axis) + +@pytest.mark.parametrize("dtype", [np.int16, np.int32, np.float64, np.uint8, np.uint16]) +def test_writegeneric_validtypes(tmp_path,dtype): + + gen = hs.signals.Signal1D(np.arange(24,dtype=dtype))+25 + fgen = tmp_path.joinpath('test.pro') + gen.save(fgen,overwrite=True) \ No newline at end of file diff --git a/rsciio/tests/test_emd_velox.py b/rsciio/tests/test_emd_velox.py index ec51ca602..68ad2950c 100644 --- a/rsciio/tests/test_emd_velox.py +++ b/rsciio/tests/test_emd_velox.py @@ -524,6 +524,7 @@ def teardown_class(cls): @pytest.mark.parametrize("lazy", (True, False)) def test_spectrum_images(self, lazy): s = hs.load(self.fei_files_path / "Test SI 16x16 215 kx.emd", lazy=lazy) + assert s[-1].metadata.Sample.elements == ["C", "O", "Ca", "Cu"] assert len(s) == 10 for i, v in enumerate(["C", "Ca", "O", "Cu", "HAADF", "EDS"]): assert s[i + 4].metadata.General.title == v diff --git a/upcoming_changes/274.bugfix.rst b/upcoming_changes/274.bugfix.rst new file mode 100644 index 000000000..ac0d389ff --- /dev/null +++ b/upcoming_changes/274.bugfix.rst @@ -0,0 +1 @@ +:ref:`emd_fei-format`: Fix parsing elements from EDS data from velox emd file v11. \ No newline at end of file From 72aa59c995707fc55bd2709f7b1b5f13a308fde2 Mon Sep 17 00:00:00 2001 From: Nicolas Tappy Date: Thu, 20 Jun 2024 18:03:06 +0200 Subject: [PATCH 02/21] support for .sur .pro export, bugfixes --- .../supported_formats/digitalsurf.rst | 32 +- rsciio/digitalsurf/__init__.py | 3 +- rsciio/digitalsurf/_api.py | 1051 +++++++++++++++-- rsciio/digitalsurf/specifications.yaml | 4 +- rsciio/tests/test_digitalsurf.py | 153 +++ 5 files changed, 1155 insertions(+), 88 deletions(-) diff --git a/doc/user_guide/supported_formats/digitalsurf.rst b/doc/user_guide/supported_formats/digitalsurf.rst index 0f6610ccd..48608a28d 100644 --- a/doc/user_guide/supported_formats/digitalsurf.rst +++ b/doc/user_guide/supported_formats/digitalsurf.rst @@ -3,16 +3,30 @@ DigitalSurf format (SUR & PRO) ------------------------------ -The ``.sur`` and ``.pro`` files are a format developed by the digitalsurf company to handle various types of -scientific measurements data such as profilometer, SEM, AFM, RGB(A) images, multilayer -surfaces and profiles. Even though it is essentially a surfaces format, 1D signals -are supported for spectra and spectral maps. Specifically, this file format is used -by Attolight SA for its scanning electron microscope cathodoluminescence -(SEM-CL) hyperspectral maps. Metadata parsing is supported, including user-specific -metadata, as well as the loading of files containing multiple objects packed together. +The ``.sur`` and ``.pro`` files are a format developed by the digitalsurf company to handle +various types of scientific data with their MountainsMap software, such as profilometer, SEM, +AFM, RGB(A) images, multilayer surfaces and profiles. Even though it is essentially a surfaces +format, 1D signals are supported for spectra and spectral maps. Specifically, this file format +is used by Attolight SA for its scanning electron microscope cathodoluminescence (SEM-CL) +hyperspectral maps. The plugin was developed based on the MountainsMap software documentation, +which contains a description of the binary format. -The plugin was developed based on the MountainsMap software documentation, which -contains a description of the binary format. +Support for ``.sur`` and ``.pro`` datasets loading is complete, including parsing of user/customer +-specific metadata, and opening of files containing multiple objects. Some rare specific objects +(e.g. force curves) are not supported, due to no example data being available. Those can be added +upon request and providing of example datasets. Heterogeneous data can be represented in ``.sur`` +and ``.pro`` objects, for instance floating-point/topography and rgb data can coexist along the same +navigation dimension. Those are casted to a homogeneous floating-point representation upon loading. + +Support for data saving is partial as ``.sur`` and ``.pro`` can be fundamentally incompatible with +hyperspy signals. First, they have limited dimensionality. Up to 3d data arrays with +either 1d (series of images) or 2d (hyperspectral studiable) navigation space can be saved. Also, +``.sur`` and ``.pro`` do not support non-uniform axes and saving of models. Finally, ``.sur`` / ``.pro`` +linearize intensities along a uniform axis to enforce an integer-representation of the data (with scaling and +offset). This means that export from float-type hyperspy signals is inherently lossy. + +Within these limitations, all features from the fileformat are supported at export, notably data +compression and setting of custom metadata. API functions ^^^^^^^^^^^^^ diff --git a/rsciio/digitalsurf/__init__.py b/rsciio/digitalsurf/__init__.py index 40459e88b..7db9455d9 100644 --- a/rsciio/digitalsurf/__init__.py +++ b/rsciio/digitalsurf/__init__.py @@ -1,7 +1,8 @@ -from ._api import file_reader +from ._api import file_reader, file_writer __all__ = [ "file_reader", + "file_writer" ] diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index e81695cb4..cbf999ff1 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -23,17 +23,19 @@ # comments can be systematically parsed into metadata and write a support for # original_metadata or other +import datetime +from copy import deepcopy import logging import os import struct import sys +import re import warnings import zlib +import ast # Commented for now because I don't know what purpose it serves # import traits.api as t -from copy import deepcopy - # Dateutil allows to parse date but I don't think it's useful here # import dateutil.parser import numpy as np @@ -45,12 +47,13 @@ # import rsciio.utils.tools # DictionaryTreeBrowser class handles the fancy metadata dictionnaries # from hyperspy.misc.utils import DictionaryTreeBrowser -from rsciio._docstrings import FILENAME_DOC, LAZY_UNSUPPORTED_DOC, RETURNS_DOC +from rsciio._docstrings import FILENAME_DOC, SIGNAL_DOC from rsciio.utils.exceptions import MountainsMapFileError +from rsciio.utils.rgb_tools import is_rgb, is_rgba +from rsciio.utils.date_time_tools import get_date_time_from_metadata _logger = logging.getLogger(__name__) - class DigitalSurfHandler(object): """Class to read Digital Surf MountainsMap files. @@ -81,26 +84,28 @@ class DigitalSurfHandler(object): 6: "_MERIDIANDISC", 7: "_MULTILAYERPROFILE", 8: "_MULTILAYERSURFACE", - 9: "_PARALLELDISC", + 9: "_PARALLELDISC", #not implemented 10: "_INTENSITYIMAGE", 11: "_INTENSITYSURFACE", 12: "_RGBIMAGE", - 13: "_RGBSURFACE", - 14: "_FORCECURVE", - 15: "_SERIEOFFORCECURVE", - 16: "_RGBINTENSITYSURFACE", + 13: "_RGBSURFACE", #Deprecated + 14: "_FORCECURVE", #Deprecated + 15: "_SERIEOFFORCECURVE", #Deprecated + 16: "_RGBINTENSITYSURFACE", #Surface + Image + 17: "_CONTOURPROFILE", + 18: "_SERIESOFRGBIMAGES", 20: "_SPECTRUM", 21: "_HYPCARD", } - def __init__(self, filename=None): + def __init__(self, filename : str|None = None): # We do not need to check for file existence here because # io module implements it in the load function self.filename = filename # The signal_dict dictionnary has to be returned by the - # file_reader function. Apparently original_metadata needs - # to be set + # file_reader function. By default, we return the minimal + # mandatory fields self.signal_dict = { "data": np.empty((0, 0, 0)), "axes": [], @@ -115,12 +120,12 @@ def __init__(self, filename=None): # _work_dict['Field']['b_pack_fn'](f,v): pack value v in file f self._work_dict = { "_01_Signature": { - "value": "DSCOMPRESSED", + "value": "DSCOMPRESSED", #Uncompressed key is DIGITAL SURF "b_unpack_fn": lambda f: self._get_str(f, 12, "DSCOMPRESSED"), "b_pack_fn": lambda f, v: self._set_str(f, v, 12), }, "_02_Format": { - "value": 0, + "value": 1, "b_unpack_fn": self._get_int16, "b_pack_fn": self._set_int16, }, @@ -145,7 +150,7 @@ def __init__(self, filename=None): "b_pack_fn": lambda f, v: self._set_str(f, v, 30), }, "_07_Operator_Name": { - "value": "", + "value": "ROSETTA", "b_unpack_fn": lambda f: self._get_str(f, 30, ""), "b_pack_fn": lambda f, v: self._set_str(f, v, 30), }, @@ -200,17 +205,17 @@ def __init__(self, filename=None): "b_pack_fn": self._set_int32, }, "_18_Number_of_Points": { - "value": 0, + "value": 1, "b_unpack_fn": self._get_int32, "b_pack_fn": self._set_int32, }, "_19_Number_of_Lines": { - "value": 0, + "value": 1, "b_unpack_fn": self._get_int32, "b_pack_fn": self._set_int32, }, "_20_Total_Nb_of_Pts": { - "value": 0, + "value": 1, "b_unpack_fn": self._get_int32, "b_pack_fn": self._set_int32, }, @@ -305,7 +310,7 @@ def __init__(self, filename=None): "b_pack_fn": self._set_int16, }, "_39_Obsolete": { - "value": 0, + "value": b'0', "b_unpack_fn": lambda f: self._get_bytes(f, 12), "b_pack_fn": lambda f, v: self._set_bytes(f, v, 12), }, @@ -355,7 +360,7 @@ def __init__(self, filename=None): "b_pack_fn": self._set_uint32, }, "_49_Obsolete": { - "value": 0, + "value": b'0', "b_unpack_fn": lambda f: self._get_bytes(f, 6), "b_pack_fn": lambda f, v: self._set_bytes(f, v, 6), }, @@ -370,7 +375,7 @@ def __init__(self, filename=None): "b_pack_fn": self._set_int16, }, "_52_Client_zone": { - "value": 0, + "value": b'0', "b_unpack_fn": lambda f: self._get_bytes(f, 128), "b_pack_fn": lambda f, v: self._set_bytes(f, v, 128), }, @@ -422,7 +427,7 @@ def __init__(self, filename=None): "_62_points": { "value": 0, "b_unpack_fn": self._unpack_data, - "b_pack_fn": lambda f, v: 0, # Not implemented + "b_pack_fn": self._pack_data, }, } @@ -442,6 +447,732 @@ def __init__(self, filename=None): self._N_data_object = 1 self._N_data_channels = 1 + # Attributes useful for save and export + + # Number of nav / sig axes + self._n_ax_nav: int = 0 + self._n_ax_sig: int = 0 + + # All as a rsciio-convention axis dict or empty + self.Xaxis: dict = {} + self.Yaxis: dict = {} + self.Zaxis: dict = {} + self.Taxis: dict = {} + + # These must be set in the split functions + self.data_split = [] + self.objtype_split = [] + # Packaging methods for writing files + + def _build_sur_file_contents(self, + set_comments:str='auto', + is_special:bool=False, + compressed:bool=True, + comments: dict = {}, + operator_name: str = '', + private_zone: bytes = b'', + client_zone: bytes = b'' + ): + + self._list_sur_file_content = [] + + #Compute number of navigation / signal axes + self._n_ax_nav, self._n_ax_sig = DigitalSurfHandler._get_n_axes(self.signal_dict) + + # Choose object type based on number of navigation and signal axes + # Populate self.Xaxis, self.Yaxis, self.Taxis (if not empty) + # Populate self.data_split and self.objtype_split (always) + self._split_signal_dict() + + # This initialize the Comment string saved with the studiable. + comment_dict = self._get_comment_dict(self.signal_dict['original_metadata'], + method=set_comments, + custom=comments) + comment_str = self._stringify_dict(comment_dict) + + #Now we build a workdict for every data object + for data,objtype in zip(self.data_split,self.objtype_split): + self._build_workdict(data, + objtype, + self.signal_dict['metadata'], + comment=comment_str, + is_special=is_special, + compressed=compressed, + operator_name=operator_name, + private_zone=private_zone, + client_zone=client_zone) + # if more than one object, we erase comment after first object. + if comment_str: + comment_str = '' + + # Finally we push it all to the content list. + self._append_work_dict_to_content() + + def _write_sur_file(self): + """Write self._list_sur_file_content to a """ + + with open(self.filename, "wb") as f: + for dic in self._list_sur_file_content: + # Extremely important! self._work_dict must access + # other fields to properly encode and decode data, + # comments etc. etc. + self._move_values_to_workdict(dic) + # Then inner consistency is trivial + for key in self._work_dict: + self._work_dict[key]['b_pack_fn'](f,self._work_dict[key]['value']) + + @staticmethod + def _get_n_axes(sig_dict: dict) -> tuple[int,int]: + """Return number of navigation and signal axes in the signal dict (in that order). + + Args: + sig_dict (dict): signal dictionary. Contains keys 'data', 'axes', 'metadata', 'original_metadata' + + Returns: + Tuple[int,int]: nax_nav,nax_sig. Number of navigation and signal axes + """ + nax_nav = 0 + nax_sig = 0 + for ax in sig_dict['axes']: + if ax['navigate']: + nax_nav += 1 + else: + nax_sig += 1 + return nax_nav, nax_sig + + @staticmethod + def _get_nobjects(omd: dict) -> int: + maxobj = 0 + for k in omd: + objnum = k.split('_')[1] + objnum = int(objnum) + if objnum > maxobj: + maxobj = objnum + return maxobj + + def _is_spectrum(self) -> bool: + """Determine if a signal is a spectrum based on axes naming""" + + spectrumlike_axnames = ['Wavelength', 'Energy', 'Energy Loss', 'E'] + is_spec = False + + for ax in self.signal_dict['axes']: + if ax['name'] in spectrumlike_axnames: + is_spec = True + + return is_spec + + def _is_surface(self) -> bool: + """Determine if a 2d-data-like signal_dict should be of surface type, ie the dataset + is a 2d surface of the 3d plane. """ + is_surface = False + surfacelike_quantnames = ['Height', 'Altitude', 'Elevation', 'Depth', 'Z'] + quant: str = self.signal_dict['metadata']['Signal']['quantity'] + for name in surfacelike_quantnames: + if quant.startswith(name): + is_surface = True + + return is_surface + + def _is_binary(self) -> bool: + return self.signal_dict['data'].dtype == bool + + def _get_num_chans(self) -> int: + """Get number of channels (aka point size) + + Args: + obj_type (int): Object type numeric code + + Returns: + int: Number of channels (point size). + """ + obj_type = self._get_object_type() + + if obj_type == 11: + return 2 #Intensity + surface (deprecated type) + elif obj_type in [12,18]: + return 3 #RGB types + elif obj_type == 13: + return 4 #RGB surface + elif obj_type in [14, 15, 35, 36]: + return 2 #Force curves + elif obj_type in [16]: + return 5 #Surface, Intensity, R, G, B (but hardly applicable to hyperspy) + else: + return 1 + + def _get_wsize(self, nax_sig: int) -> int: + if nax_sig != 1: + raise MountainsMapFileError(f"Attempted parsing W-axis size from signal with navigation dimension {nax_sig}!= 1.") + for ax in self.signal_dict['axes']: + if not ax['navigate']: + return ax['size'] + + def _get_num_objs(self,) -> int: + """Get number of objects based on object type and number of navigation axes in the signal. + + Raises: + ValueError: Several digital surf save formats will need a navigation dimension of 1 + + Returns: + int: _description_ + """ + obj_type = self._get_object_type() + nax_nav, _ = self._get_n_axes() + + if obj_type in [1,2,3,6,9,10,11,12,13,14,15,16,17,20,21,35,36,37]: + return 1 + elif obj_type in [4,5,7,8,18]: + if nax_nav != 1: + raise MountainsMapFileError(f"Attempted to save signal with number type {obj_type} and navigation dimension {nax_nav}.") + for ax in enumerate(self.signal_dict['axes']): + if ax['navigate']: + return ax['size'] + + def _get_object_type(self) -> int: + """Select the suitable _mountains_object_types """ + + nax_nav, nax_sig = self._get_n_axes(self.signal_dict) + + obj_type = None + if nax_nav == 0: + if nax_sig == 0: + raise MountainsMapFileError(msg=f"Object with empty navigation and signal axes not supported for .sur export") + elif nax_sig == 1: + if self._is_spectrum(): + obj_type = 20 # '_SPECTRUM' + else: + obj_type = 1 # '_PROFILE' + elif nax_sig == 2: + if self._is_binary(): + obj_type = 3 # "_BINARYIMAGE" + elif is_rgb(self.signal_dict['data']): + obj_type = 12 #"_RGBIMAGE" + elif is_rgba(self.signal_dict['data']): + warnings.warn(f"Alpha channel discarded upon saving RGBA signal in .sur format") + obj_type = 12 #"_RGBIMAGE" + elif self._is_surface(): + obj_type = 2 #'_SURFACE' + else: + obj_type = 10 #_INTENSITYSURFACE + else: + raise MountainsMapFileError(msg=f"Object with signal dimension {nax_sig} > 2 not supported for .sur export") + elif nax_nav == 1: + if nax_sig == 0: + warnings.warn(f"Exporting surface signal dimension {nax_sig} and navigation dimension {nax_nav} falls back on surface type but is not good practice.") + obj_type = 1 # '_PROFILE' + elif nax_sig == 1: + if self._is_spectrum(): + obj_type = 20 # '_SPECTRUM' + else: + obj_type = 1 # '_PROFILE' + elif nax_sig ==2: + #Also warn + if is_rgb(self.signal_dict['data']): + obj_type = 18 #"_SERIESOFRGBIMAGE" + elif is_rgba(self.signal_dict['data']): + warnings.warn(f"Alpha channel discarded upon saving RGBA signal in .sur format") + obj_type = 18 #"_SERIESOFRGBIMAGE" + else: + obj_type = 5 #"_SURFACESERIE" + else: + raise MountainsMapFileError(msg=f"Object with signal dimension {nax_sig} > 2 not supported for .sur export") + elif nax_nav == 2: + if nax_sig == 0: + warnings.warn(f"Signal dimension {nax_sig} and navigation dimension {nax_nav} exported as surface type. Consider transposing signal object before exporting if this is intentional.") + if self._is_surface(): + obj_type = 2 #'_SURFACE' + else: + obj_type = 10 #_INTENSITYSURFACE + elif nax_sig == 1: + obj_type = 21 #'_HYPCARD' + else: + raise MountainsMapFileError(msg=f"Object with signal dimension {nax_sig} and navigation dimension {nax_nav} not supported for .sur export") + else: + #Also raise + raise MountainsMapFileError(msg=f"Object with navigation dimension {nax_nav} > 2 not supported for .sur export") + + return obj_type + + def _split_spectrum(self,): + """Must set axes except Z, data_split & objtype_split attributes""" + #When splitting spectrum, remember that instead of the series axis (T/W), + #X axis is the spectral dimension and Y the series dimension (if series). + # Xaxis = {} + # Yaxis = {} + nax_nav = self._n_ax_nav + nax_sig = self._n_ax_sig + + if (nax_nav,nax_sig)==(0,1) or (nax_nav,nax_sig)==(1,0): + self.Xaxis = self.signal_dict['axes'][0] + elif (nax_nav,nax_sig)==(1,1): + self.Xaxis = next(ax for ax in self.signal_dict['axes'] if not ax['navigate']) + self.Yaxis = next(ax for ax in self.signal_dict['axes'] if ax['navigate']) + else: + raise MountainsMapFileError(f"Dimensions ({nax_nav})|{nax_sig}) invalid for export as spectrum type") + + self.data_split = [self.signal_dict['data']] + self.objtype_split = [20] + self._N_data_object = 1 + self._N_data_channels = 1 + + def _split_profile(self,): + """Must set axes except Z, data_split & objtype_split attributes""" + + if (self._n_ax_nav,self._n_ax_sig) in [(0,1),(1,0)]: + self.Xaxis = self.signal_dict['axes'][0] + else: + raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for a profile type") + + self.data_split = [self.signal_dict['data']] + self.objtype_split = [1] + self._N_data_object = 1 + self._N_data_channels = 1 + + def _split_profileserie(self,): + """Must set axes except Z, data_split & objtype_split attributes""" + obj_type = 4 # '_PROFILESERIE' + + if (self._n_ax_nav,self._n_ax_sig)==(1,1): + self.Xaxis = next(ax for ax in self.signal_dict['axes'] if not ax['navigate']) + self.Taxis = next(ax for ax in self.signal_dict['axes'] if ax['navigate']) + else: + raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._mountains_object_types[obj_type]} type") + + self.data_split = self._split_data_alongaxis(self.Taxis) + self.objtype_split = [obj_type] + [1]*(len(self.data_split)-1) + self._N_data_object = len(self.objtype_split) + self._N_data_channels = 1 + + def _split_binary_img(self,): + """Must set axes except Z, data_split & objtype_split attributes""" + obj_type = 3 + if (self._n_ax_nav,self._n_ax_sig) in [(0,2),(2,0)]: + self.Xaxis = self.signal_dict['axes'][1] + self.Yaxis = self.signal_dict['axes'][0] + else: + raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._mountains_object_types[obj_type]} type") + + self.data_split = [self.signal_dict['data']] + self.objtype_split = [obj_type] + self._N_data_object = 1 + self._N_data_channels = 1 + + def _split_rgb(self,): + """Must set axes except Z, data_split & objtype_split attributes""" + obj_type = 12 + if (self._n_ax_nav,self._n_ax_sig) in [(0,2),(2,0)]: + self.Xaxis = self.signal_dict['axes'][1] + self.Yaxis = self.signal_dict['axes'][0] + else: + raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._mountains_object_types[obj_type]} type") + + self.data_split = [np.int32(self.signal_dict['data']['R']), + np.int32(self.signal_dict['data']['G']), + np.int32(self.signal_dict['data']['B']) + ] + self.objtype_split = [obj_type] + [10,10] + self._N_data_object = 1 + self._N_data_channels = 3 + + def _split_surface(self,): + """Must set axes except Z, data_split & objtype_split attributes""" + obj_type = 2 + if (self._n_ax_nav,self._n_ax_sig) in [(0,2),(2,0)]: + self.Xaxis = self.signal_dict['axes'][1] + self.Yaxis = self.signal_dict['axes'][0] + else: + raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._mountains_object_types[obj_type]} type") + self.data_split = [self.signal_dict['data']] + self.objtype_split = [obj_type] + self._N_data_object = 1 + self._N_data_channels = 1 + + def _split_intensitysurface(self,): + """Must set axes except Z, data_split & objtype_split attributes""" + obj_type = 10 + if (self._n_ax_nav,self._n_ax_sig) in [(0,2),(2,0)]: + self.Xaxis = self.signal_dict['axes'][1] + self.Yaxis = self.signal_dict['axes'][0] + else: + raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._mountains_object_types[obj_type]} type") + self.data_split = [self.signal_dict['data']] + self.objtype_split = [obj_type] + self._N_data_object = 1 + self._N_data_channels = 1 + + def _split_rgbserie(self): + obj_type = 18 #"_SERIESOFRGBIMAGE" + + sigaxes_iter = iter(ax for ax in self.signal_dict['axes'] if not ax['navigate']) + self.Yaxis = next(sigaxes_iter) + self.Xaxis = next(sigaxes_iter) + self.Taxis = next(ax for ax in self.signal_dict['axes'] if ax['navigate']) + tmp_data_split = self._split_data_alongaxis(self.Taxis) + + self.data_split = [] + self.objtype_split = [] + for d in tmp_data_split: + self.data_split += [d['R'].astype(np.int32), d['G'].astype(np.int32), d['B'].astype(np.int32)] + self.objtype_split += [12,10,10] + self.objtype_split[0] = obj_type + + self._N_data_object = self.Taxis['size'] + self._N_data_channels = 3 + + def _split_surfaceserie(self): + obj_type = 5 + sigaxes_iter = iter(ax for ax in self.signal_dict['axes'] if not ax['navigate']) + self.Yaxis = next(sigaxes_iter) + self.Xaxis = next(sigaxes_iter) + self.Taxis = next(ax for ax in self.signal_dict['axes'] if ax['navigate']) + self.data_split = self._split_data_alongaxis(self.Taxis) + self.objtype_split = [2]*len(self.data_split) + self.objtype_split[0] = obj_type + self._N_data_object = len(self.data_split) + self._N_data_channels = 1 + + def _split_hyperspectral(self): + obj_type = 21 + sigaxes_iter = iter(ax for ax in self.signal_dict['axes'] if ax['navigate']) + self.Yaxis = next(sigaxes_iter) + self.Xaxis = next(sigaxes_iter) + self.Taxis = next(ax for ax in self.signal_dict['axes'] if not ax['navigate']) + self.data_split = [self.signal_dict['data']] + self.objtype_split = [obj_type] + self._N_data_object = 1 + self._N_data_channels = 1 + + def _split_data_alongaxis(self, axis: dict) -> list[np.ndarray]: + idx = self.signal_dict['axes'].index(axis) + # return idx + datasplit = [] + for dslice in np.rollaxis(self.signal_dict['data'],idx): + datasplit.append(dslice) + return datasplit + + def _split_signal_dict(self): + """Select the suitable _mountains_object_types """ + + n_nav = self._n_ax_nav + n_sig = self._n_ax_sig + + #Here, I manually unfold the nested conditions for legibility. + #Since there are a fixed number of dimensions supported by + # digitalsurf .sur/.pro files, I think this is the best way to + # proceed. + if (n_nav,n_sig) == (0,1): + if self._is_spectrum(): + self._split_spectrum() + else: + self._split_profile() + elif (n_nav,n_sig) == (0,2): + if self._is_binary(): + self._split_binary_img() + elif is_rgb(self.signal_dict['data']): #"_RGBIMAGE" + self._split_rgb() + elif is_rgba(self.signal_dict['data']): + warnings.warn(f"A channel discarded upon saving \ + RGBA signal in .sur format") + self._split_rgb() + elif self._is_surface(): #'_SURFACE' + self._split_surface() + else: # _INTENSITYSURFACE + self._split_intensitysurface() + elif (n_nav,n_sig) == (1,0): + warnings.warn(f"Exporting surface signal dimension {n_sig} and navigation dimension \ + {n_nav} falls back on profile type but is not good practice. Consider \ + transposing before saving to avoid unexpected behaviour.") + self._split_profile() + elif (n_nav,n_sig) == (1,1): + if self._is_spectrum(): + self._split_spectrum() + else: + self._split_profileserie() + elif (n_nav,n_sig) == (1,2): + if is_rgb(self.signal_dict['data']): + self._split_rgbserie() + if is_rgba(self.signal_dict['data']): + warnings.warn(f"Alpha channel discarded upon saving RGBA signal in .sur format") + obj_type = 18 #"_SERIESOFRGBIMAGE" + self._split_rgbserie() + else: + self._split_surfaceserie() + elif (n_nav,n_sig) == (2,0): + warnings.warn(f"Signal dimension {n_sig} and navigation dimension {n_nav} exported as surface type. Consider transposing signal object before exporting if this is intentional.") + if self._is_binary(): + self._split_binary_img() + elif is_rgb(self.signal_dict['data']): #"_RGBIMAGE" + self._split_rgb() + elif is_rgba(self.signal_dict['data']): + warnings.warn(f"A channel discarded upon saving \ + RGBA signal in .sur format") + self._split_rgb() + if self._is_surface(): + self._split_surface() + else: + self._split_intensitysurface() + elif (n_nav,n_sig) == (2,1): + self._split_hyperspectral() + else: + raise MountainsMapFileError(msg=f"Object with signal dimension {n_sig} and navigation dimension {n_nav} not supported for .sur export") + + def _norm_data(self, data: np.ndarray, is_special: bool, apply_sat_lo: bool = False, apply_sat_hi: bool = False): + """Normalize input data to 16-bits or 32-bits ints and initialize an axis on which the data is normalized. + + Args: + data (np.ndarray): dataset + is_special (bool): whether NaNs get sent to N.M points in the sur format. + apply_sat_lo (bool, optional): Signal low-value saturation in output datafile. Defaults to False. + apply_sat_hi (bool, optional): Signal high-value saturation in output datafile. Defaults to False. + + Raises: + MountainsMapFileError: raised if input is of complex type + MountainsMapFileError: raised if input is of unsigned int type + MountainsMapFileError: raised if input is of int > 32 bits type + + Returns: + tuple[int,int,int,float,float,np.ndarray[int]]: pointsize, Zmin, Zmax, Zscale, Zoffset, data_int + """ + data_type = data.dtype + + if np.issubdtype(data_type,np.complexfloating): + raise MountainsMapFileError(f"digitalsurf file formats do not support export of complex data. Convert data to real-value representations before before export") + elif data_type==np.uint8: + warnings.warn("np.uint8 datatype exported as 16bits") + pointsize = 16 #Pointsize has to be 16 or 32 in surf format + Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data.astype(np.int16), pointsize, is_special) + data_int = data.astype(np.int16) + elif data_type==np.uint16: + warnings.warn("np.uint16 datatype exported as 32bits") + pointsize = 32 #Pointsize has to be 16 or 32 in surf format + Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data.astype(np.int32), pointsize, is_special) + data_int = data.astype(np.int32) + elif np.issubdtype(data_type,np.unsignedinteger): + raise MountainsMapFileError(f"digitalsurf file formats do not support unsigned data >16bits. Convert data to signed integers before export.") + elif data_type==np.int8: + pointsize = 16 #Pointsize has to be 16 or 32 in surf format + Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, 8, is_special) + data_int = data + elif data_type==np.int16: + pointsize = 16 + Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, pointsize, is_special) + data_int = data + elif data_type==np.int32: + pointsize = 32 + data_int = data + Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, pointsize, is_special) + elif np.issubdtype(data_type,np.integer): + raise MountainsMapFileError(f"digitalsurf file formats do not support export integers larger than 32 bits. Convert data to 32-bit representation before exporting") + elif np.issubdtype(data_type,np.floating): + if self.signal_dict['data'].itemsize*8 > 32: + warnings.warn(f"Lossy conversion of {data_type} to 32-bits-ints representation will occur.") + pointsize = 32 + Zmin, Zmax, Zscale, Zoffset, data_int = self._norm_float(data, is_special) + + return pointsize, Zmin, Zmax, Zscale, Zoffset, data_int + + def _norm_signed_int(self, data:np.ndarray, intsize: int, is_special: bool = False): + # There are no NaN values for integers. Special points means considering high/low saturation of integer scale. + + data_int_min = - 2**(intsize-1) + data_int_max = 2**(intsize -1) + + is_satlo = (data==data_int_min).sum() >= 1 and is_special + is_sathi = (data==data_int_max).sum() >= 1 and is_special + + Zmin = data_int_min + 1 if is_satlo else data.min() + Zmax = data_int_max - 1 if is_sathi else data.max() + Zscale = 1.0 + Zoffset = 0.0 + + return Zmin, Zmax, Zscale, Zoffset + + def _norm_float(self, data : np.ndarray, is_special: bool = False,): + """Normalize float data on a 32 bits int scale.""" + + Zoffset_f = np.nanmin(data) + Zmax_f = np.nanmax(data) + is_nan = np.any(np.isnan(data)) + + if is_special and is_nan: + Zmin = - 2**(32-1) + 2 + Zmax = 2**32 + Zmin - 3 + else: + Zmin = - 2**(32-1) + Zmax = 2**32 + Zmin - 1 + + Zscale = (Zmax_f - Zoffset_f)/(Zmax - Zmin) + data_int = (data - Zoffset_f)/Zscale + Zmin + + if is_special and is_nan: + data_int[np.isnan(data)] = Zmin - 2 + + data_int = data_int.astype(np.int32) + + return Zmin, Zmax, Zscale, Zoffset_f, data_int + + def _get_Zname_Zunit(self, metadata: dict) -> tuple[str,str]: + """Attempt reading Z-axis name and Unit from metadata.Signal.Quantity field. + Return empty str if do not exist. + + Returns: + tuple[str,str]: Zname,Zunit + """ + quantitystr: str = metadata.get('Signal',{}).get('quantity','') + quantitystr = quantitystr.strip() + quantity = quantitystr.split(' ') + if len(quantity)>1: + Zunit = quantity.pop() + Zunit = Zunit.strip('()') + Zname = ' '.join(quantity) + elif len(quantity)==1: + Zname = quantity.pop() + Zunit = '' + else: + Zname = '' + Zunit = '' + + return Zname,Zunit + + def _get_datetime_info(self,) -> tuple[int,int,int,int,int,int]: + date = self.signal_dict['metadata']['General'].get('date','') + time = self.signal_dict['metadata']['General'].get('time','') + + try: + [yyyy,mm,dd] = date.strip().split('-') + except ValueError: + [yyyy,mm,dd] = [0,0,0] + + try: + [hh,minmin,ss] = time.strip().strip('Z').slit(':') + except ValueError: + [hh,minmin,ss] = [0,0,0] + + return yyyy,mm,dd,hh,minmin,ss + + def _build_workdict(self, + data: np.ndarray, + obj_type: int, + metadata: dict = {}, + comment: str = "", + is_special: bool = True, + compressed: bool = True, + operator_name: str = '', + private_zone: bytes = b'', + client_zone: bytes = b'' + ): + + if not compressed: + self._work_dict['_01_Signature']['value'] = 'DIGITAL SURF' # DSCOMPRESSED by default + else: + self._work_dict['_01_Signature']['value'] = 'DSCOMPRESSED' # DSCOMPRESSED by default + + # self._work_dict['_02_Format']['value'] = 0 # Dft. other possible value is 257 for MacintoshII computers with Motorola CPUs. Obv not supported... + self._work_dict['_03_Number_of_Objects']['value'] = self._N_data_object + # self._work_dict['_04_Version']['value'] = 1 # Version number. Always default. + self._work_dict['_05_Object_Type']['value'] = obj_type + # self._work_dict['_06_Object_Name']['value'] = '' Obsolete, DOS-version only (Not supported) + self._work_dict['_07_Operator_Name']['value'] = operator_name #Should be settable from kwargs + self._work_dict['_08_P_Size']['value'] = self._N_data_channels + + # self._work_dict['_09_Acquisition_Type']['value'] = 0 # AFM data only, could be inferred + # self._work_dict['_10_Range_Type']['value'] = 0 #Only 1 for high-range (z-stage scanning), AFM data only, could be inferred + + self._work_dict['_11_Special_Points']['value'] = int(is_special) + + # self._work_dict['_12_Absolute']['value'] = 0 #Probably irrelevant in most cases. Absolute vs rel heights (for profilometers), can be inferred + # self._work_dict['_13_Gauge_Resolution']['value'] = 0.0 #Probably irrelevant. Only for profilometers (maybe AFM), can be inferred + + # T-axis acts as W-axis for spectrum / hyperspectrum surfaces. + if obj_type in [21]: + ws = self.Taxis.get('size',0) + else: + ws = 0 + self._work_dict['_14_W_Size']['value'] = ws + + bsize, Zmin, Zmax, Zscale, Zoffset, data_int = self._norm_data(data,is_special,apply_sat_lo=True,apply_sat_hi=True) + Zname, Zunit = self._get_Zname_Zunit(metadata) + + #Axes element set regardless of object size + self._work_dict['_15_Size_of_Points']['value'] = bsize + self._work_dict['_16_Zmin']['value'] = Zmin + self._work_dict['_17_Zmax']['value'] = Zmax + self._work_dict['_18_Number_of_Points']['value']= self.Xaxis.get('size',1) + self._work_dict['_19_Number_of_Lines']['value'] = self.Yaxis.get('size',1) + self._work_dict['_20_Total_Nb_of_Pts']['value'] = data.size + self._work_dict['_21_X_Spacing']['value'] = self.Xaxis.get('scale',0.0) + self._work_dict['_22_Y_Spacing']['value'] = self.Yaxis.get('scale',0.0) + self._work_dict['_23_Z_Spacing']['value'] = Zscale + self._work_dict['_24_Name_of_X_Axis']['value'] = self.Xaxis.get('name','') + self._work_dict['_25_Name_of_Y_Axis']['value'] = self.Yaxis.get('name','') + self._work_dict['_26_Name_of_Z_Axis']['value'] = Zname + self._work_dict['_27_X_Step_Unit']['value'] = self.Xaxis.get('units','') + self._work_dict['_28_Y_Step_Unit']['value'] = self.Yaxis.get('units','') + self._work_dict['_29_Z_Step_Unit']['value'] = Zunit + self._work_dict['_30_X_Length_Unit']['value'] = self.Xaxis.get('units','') + self._work_dict['_31_Y_Length_Unit']['value'] = self.Yaxis.get('units','') + self._work_dict['_32_Z_Length_Unit']['value'] = Zunit + self._work_dict['_33_X_Unit_Ratio']['value'] = 1 + self._work_dict['_34_Y_Unit_Ratio']['value'] = 1 + self._work_dict['_35_Z_Unit_Ratio']['value'] = 1 + + # _36_Imprint -> Obsolete + # _37_Inverted -> Always No + # _38_Levelled -> Always No + # _39_Obsolete -> Obsolete + + dt: datetime.datetime = get_date_time_from_metadata(metadata,formatting='datetime') + if dt is not None: + self._work_dict['_40_Seconds']['value'] = dt.second + self._work_dict['_41_Minutes']['value'] = dt.minute + self._work_dict['_42_Hours']['value'] = dt.hour + self._work_dict['_43_Day']['value'] = dt.day + self._work_dict['_44_Month']['value'] = dt.month + self._work_dict['_45_Year']['value'] = dt.year + self._work_dict['_46_Day_of_week']['value'] = dt.weekday() + + # _47_Measurement_duration -> Nonsaved and non-metadata, but float in seconds + + if compressed: + data_bin = self._compress_data(data_int,nstreams=1) #nstreams hard-set to 1. Could be unlocked in the future + else: + fmt = " 2**15: + warnings.warn(f"Comment exceeding max length of 32.0 kB and will be cropped") + comment_len = np.int16(2**15) + + self._work_dict['_50_Comment_size']['value'] = comment_len + + privatesize = len(private_zone) + if privatesize > 2**15: + warnings.warn(f"Private size exceeding max length of 32.0 kB and will be cropped") + privatesize = np.int16(2**15) + + self._work_dict['_51_Private_size']['value'] = privatesize + + self._work_dict['_52_Client_zone']['value'] = client_zone + + self._work_dict['_53_X_Offset']['value'] = self.Xaxis.get('offset',0.0) + self._work_dict['_54_Y_Offset']['value'] = self.Yaxis.get('offset',0.0) + self._work_dict['_55_Z_Offset']['value'] = Zoffset + self._work_dict['_56_T_Spacing']['value'] = self.Taxis.get('scale',0.0) + self._work_dict['_57_T_Offset']['value'] = self.Taxis.get('offset',0.0) + self._work_dict['_58_T_Axis_Name']['value'] = self.Taxis.get('name','') + self._work_dict['_59_T_Step_Unit']['value'] = self.Taxis.get('units','') + + self._work_dict['_60_Comment']['value'] = comment + + self._work_dict['_61_Private_zone']['value'] = private_zone + self._work_dict['_62_points']['value'] = data_bin + # Read methods def _read_sur_file(self): """Read the binary, possibly compressed, content of the surface @@ -485,12 +1216,17 @@ def _read_sur_file(self): def _read_single_sur_object(self, file): for key, val in self._work_dict.items(): self._work_dict[key]["value"] = val["b_unpack_fn"](file) + # print(f"{key}: {self._work_dict[key]['value']}") def _append_work_dict_to_content(self): """Save the values stored in the work dict in the surface file list""" datadict = deepcopy({key: val["value"] for key, val in self._work_dict.items()}) self._list_sur_file_content.append(datadict) + def _move_values_to_workdict(self,dic:dict): + for key in self._work_dict: + self._work_dict[key]['value'] = deepcopy(dic[key]) + def _get_work_dict_key_value(self, key): return self._work_dict[key]["value"] @@ -499,9 +1235,7 @@ def _build_sur_dict(self): """Create a signal dict with an unpacked object""" # If the signal is of the type spectrum or hypercard - if self._Object_type in [ - "_HYPCARD", - ]: + if self._Object_type in ["_HYPCARD"]: self._build_hyperspectral_map() elif self._Object_type in ["_SPECTRUM"]: self._build_spectrum() @@ -509,7 +1243,7 @@ def _build_sur_dict(self): self._build_general_1D_data() elif self._Object_type in ["_PROFILESERIE"]: self._build_1D_series() - elif self._Object_type in ["_SURFACE"]: + elif self._Object_type in ["_SURFACE","_INTENSITYIMAGE","_BINARYIMAGE"]: self._build_surface() elif self._Object_type in ["_SURFACESERIE"]: self._build_surface_series() @@ -521,12 +1255,12 @@ def _build_sur_dict(self): self._build_RGB_image() elif self._Object_type in ["_RGBINTENSITYSURFACE"]: self._build_RGB_surface() - elif self._Object_type in ["_BINARYIMAGE"]: - self._build_surface() + # elif self._Object_type in ["_BINARYIMAGE"]: + # self._build_surface() else: raise MountainsMapFileError( - self._Object_type + "is not a supported mountain object." - ) + f"{self._Object_type} is not a supported mountain object." + ) return self.signal_dict @@ -700,9 +1434,7 @@ def _build_1D_series( self.signal_dict["data"] = np.stack(data) - def _build_surface( - self, - ): + def _build_surface(self,): """Build a surface""" # Check that the object contained only one object. @@ -723,9 +1455,7 @@ def _build_surface( self._set_metadata_and_original_metadata(hypdic) - def _build_surface_series( - self, - ): + def _build_surface_series(self,): """Build a series of surfaces. The T axis is navigation and set from the first object""" @@ -784,9 +1514,7 @@ def _build_RGB_surface( # Pushing data into the dictionary self.signal_dict["data"] = np.stack(data) - def _build_RGB_image( - self, - ): + def _build_RGB_image(self,): """Build an RGB image. The T axis is navigation and set from P Size""" @@ -893,16 +1621,14 @@ def _build_generic_metadata(self, unpacked_dict): return metadict - def _build_original_metadata( - self, - ): + def _build_original_metadata(self,): """Builds a metadata dictionary from the header""" original_metadata_dict = {} # Iteration over Number of data objects for i in range(self._N_data_object): # Iteration over the Number of Data channels - for j in range(self._N_data_channels): + for j in range(max(self._N_data_channels,1)): # Creating a dictionary key for each object k = (i + 1) * (j + 1) key = "Object_{:d}_Channel_{:d}".format(i, j) @@ -930,9 +1656,7 @@ def _build_original_metadata( return original_metadata_dict - def _build_signal_specific_metadata( - self, - ) -> dict: + def _build_signal_specific_metadata(self,) -> dict: """Build additional metadata specific to signal type. return a dictionary for update in the metadata.""" if self.signal_dict["metadata"]["Signal"]["signal_type"] == "CL": @@ -1161,31 +1885,126 @@ def _MS_parse(str_ms, prefix, delimiter): li_value = str_value.split(" ") try: if key == "Grating": - dict_ms[key_main][key] = li_value[ - 0 - ] # we don't want to eval this one + dict_ms[key_main][key] = li_value[0] # we don't want to eval this one else: - dict_ms[key_main][key] = eval(li_value[0]) + dict_ms[key_main][key] = ast.literal_eval(li_value[0]) except Exception: dict_ms[key_main][key] = li_value[0] if len(li_value) > 1: dict_ms[key_main][key + "_units"] = li_value[1] return dict_ms + @staticmethod + def _get_comment_dict(original_metadata: dict, method: str = 'auto', custom: dict = {}) -> dict: + """Return the dictionary used to set the dataset comments (akA custom parameters) while exporting a file. + + By default (method='auto'), tries to identify if the object was originally imported by rosettasciio + from a digitalsurf .sur/.pro file with a comment field parsed as original_metadata (i.e. + Object_0_Channel_0.Parsed). In that case, digitalsurf ignores non-parsed original metadata + (ie .sur/.pro file headers). If the original metadata contains multiple objects with + non-empty parsed content (Object_0_Channel_0.Parsed, Object_0_Channel_1.Parsed etc...), only + the first non-empty X.Parsed sub-dictionary is returned. This falls back on returning the + raw 'original_metadata' + + Optionally the raw 'original_metadata' dictionary can be exported (method='raw'), + a custom dictionary provided by the user (method='custom'), or no comment at all (method='off') + + Args: + method (str, optional): method to export. Defaults to 'auto'. + custom (dict, optional): custom dictionary. Ignored unless method is set to 'custom', Defaults to {}. + + Raises: + MountainsMapFileError: if an invalid key is entered + + Returns: + dict: dictionary to be exported as a .sur object + """ + if method == 'raw': + return original_metadata + elif method == 'custom': + return custom + elif method == 'off': + return {} + elif method == 'auto': + pattern = re.compile("Object_\d*_Channel_\d*") + omd = original_metadata + #filter original metadata content of dict type and matching pattern. + validfields = [omd[key] for key in omd if pattern.match(key) and isinstance(omd[key],dict)] + #In case none match, give up filtering and return raw + if not validfields: + return omd + #In case some match, return first non-empty "Parsed" sub-dict + for field in validfields: + #Return none for non-existing "Parsed" key + candidate = field.get('Parsed') + #For non-none, non-empty dict-type candidate + if candidate and isinstance(candidate,dict): + return candidate + #dict casting for non-none but non-dict candidate + elif candidate is not None: + return {'Parsed': candidate} + #else none candidate, or empty dict -> do nothing + #Finally, if valid fields are present but no candidate + #did a non-empty return, it is safe to return empty + return {} + else: + raise MountainsMapFileError(f"Non-valid method for setting mountainsmap file comment. Choose one of: 'auto','raw','custom','off' ") + + @staticmethod + def _stringify_dict(omd: dict): + """Pack nested dictionary metadata into a string. Pack dictionary-type elements + into digitalsurf "Section title" metadata type ('$_ preceding section title). Pack + other elements into equal-sign separated key-value pairs. + + Supports the key-units logic {'key': value, 'key_units': 'un'} used in hyperspy. + """ + + #Separate dict into list of keys and list of values to authorize index-based pop/insert + keys_queue = list(omd.keys()) + vals_queue = list(omd.values()) + #commentstring to be returned + cmtstr: str = "" + #Loop until queues are empty + while keys_queue: + #pop first object + k = keys_queue.pop(0) + v = vals_queue.pop(0) + #if object is header + if isinstance(v,dict): + cmtstr += f"$_{k}\n" + keys_queue = list(v.keys()) + keys_queue + vals_queue = list(v.values()) + vals_queue + else: + try: + ku_idx = keys_queue.index(k + '_units') + has_units = True + except ValueError: + ku_idx = None + has_units = False + + if has_units: + _ = keys_queue.pop(ku_idx) + vu = vals_queue.pop(ku_idx) + cmtstr += f"${k} = {v.__repr__()} {vu}\n" + else: + cmtstr += f"${k} = {v.__repr__()}\n" + + return cmtstr + # Post processing @staticmethod def post_process_RGB(signal): signal = signal.transpose() - max_data = np.nanmax(signal.data) - if max_data <= 256: + max_data = np.max(signal.data) + if max_data <= 255: signal.change_dtype("uint8") signal.change_dtype("rgb8") elif max_data <= 65536: - signal.change_dtype("uint8") - signal.change_dtype("rgb8") + signal.change_dtype("uint16") + signal.change_dtype("rgb16") else: warnings.warn( - """RGB-announced data could not be converted to + """RGB-announced data could not be converted to uint8 or uint16 datatype""" ) @@ -1224,7 +2043,7 @@ def _set_str(file, val, size, encoding="latin-1"): file.write( struct.pack( "<{:d}s".format(size), - "{{:<{:d}s}}".format(size).format(val).encode(encoding), + f"{val}".ljust(size).encode(encoding), ) ) @@ -1299,12 +2118,21 @@ def _pack_private(self, file, val, encoding="latin-1"): privatesize = self._get_work_dict_key_value("_51_Private_size") self._set_str(file, val, privatesize) + def _is_data_int(self,): + if self._Object_type in ['_BINARYIMAGE', + '_RGBIMAGE', + '_RGBSURFACE', + '_SERIESOFRGBIMAGES']: + return True + else: + return False + def _unpack_data(self, file, encoding="latin-1"): - """This needs to be special because it reads until the end of - file. This causes an error in the series of data""" # Size of datapoints in bytes. Always int16 (==2) or 32 (==4) psize = int(self._get_work_dict_key_value("_15_Size_of_Points") / 8) + Zmin = self._get_work_dict_key_value("_16_Zmin") + dtype = np.int16 if psize == 2 else np.int32 if self._get_work_dict_key_value("_01_Signature") != "DSCOMPRESSED": @@ -1322,12 +2150,9 @@ def _unpack_data(self, file, encoding="latin-1"): readsize = Npts_tot * psize if Wsize != 0: readsize *= Wsize - # if Npts_channel is not 0: - # readsize*=Npts_channel # Read the exact size of the data _points = np.frombuffer(file.read(readsize), dtype=dtype) - # _points = np.fromstring(file.read(readsize),dtype=dtype) else: # If the points are compressed do the uncompress magic. There @@ -1352,36 +2177,74 @@ def _unpack_data(self, file, encoding="latin-1"): # Finally numpy converts it to a numeric object _points = np.frombuffer(rawData, dtype=dtype) - # _points = np.fromstring(rawData, dtype=dtype) # rescale data # We set non measured points to nan according to .sur ways nm = [] if self._get_work_dict_key_value("_11_Special_Points") == 1: - # has unmeasured points + # has non-measured points nm = _points == self._get_work_dict_key_value("_16_Zmin") - 2 - # We set the point in the numeric scale - _points = _points.astype(float) * self._get_work_dict_key_value( - "_23_Z_Spacing" - ) * self._get_work_dict_key_value( - "_35_Z_Unit_Ratio" - ) + self._get_work_dict_key_value("_55_Z_Offset") + _points = (_points.astype(float) - Zmin) * self._get_work_dict_key_value("_23_Z_Spacing") * self._get_work_dict_key_value("_35_Z_Unit_Ratio") + self._get_work_dict_key_value("_55_Z_Offset") - _points[nm] = np.nan + # We set the point in the numeric scale + if self._is_data_int(): + _points = np.round(_points).astype(int) + else: + _points[nm] = np.nan + # Return the points, rescaled return _points def _pack_data(self, file, val, encoding="latin-1"): - """This needs to be special because it writes until the end of - file.""" - datasize = self._get_work_dict_key_value("_62_points") - self._set_str(file, val, datasize) + """This needs to be special because it writes until the end of file.""" + #Also valid for uncompressed + datasize = self._get_work_dict_key_value('_48_Compressed_data_size') + self._set_bytes(file,val,datasize) + + @staticmethod + def _compress_data(data_int, nstreams: int = 1) -> bytes: + """Pack the input data using the digitalsurf zip approach and return the result as a + binary string ready to be written onto a file. """ + if nstreams <= 0 or nstreams >8 : + raise MountainsMapFileError(f"Number of compression streams must be >= 1, <= 8") + + bstr = b'' + bstr += struct.pack(" Date: Fri, 21 Jun 2024 19:21:25 +0200 Subject: [PATCH 03/21] Fixing bugs, making doc --- .../supported_formats/digitalsurf.rst | 74 ++- rsciio/digitalsurf/_api.py | 584 +++++++++--------- .../tests/data/digitalsurf/test_isurface.sur | Bin 0 -> 56141 bytes rsciio/tests/test_digitalsurf.py | 94 +-- 4 files changed, 383 insertions(+), 369 deletions(-) create mode 100644 rsciio/tests/data/digitalsurf/test_isurface.sur diff --git a/doc/user_guide/supported_formats/digitalsurf.rst b/doc/user_guide/supported_formats/digitalsurf.rst index 48608a28d..8b5807abd 100644 --- a/doc/user_guide/supported_formats/digitalsurf.rst +++ b/doc/user_guide/supported_formats/digitalsurf.rst @@ -3,30 +3,56 @@ DigitalSurf format (SUR & PRO) ------------------------------ -The ``.sur`` and ``.pro`` files are a format developed by the digitalsurf company to handle -various types of scientific data with their MountainsMap software, such as profilometer, SEM, -AFM, RGB(A) images, multilayer surfaces and profiles. Even though it is essentially a surfaces -format, 1D signals are supported for spectra and spectral maps. Specifically, this file format -is used by Attolight SA for its scanning electron microscope cathodoluminescence (SEM-CL) -hyperspectral maps. The plugin was developed based on the MountainsMap software documentation, -which contains a description of the binary format. - -Support for ``.sur`` and ``.pro`` datasets loading is complete, including parsing of user/customer --specific metadata, and opening of files containing multiple objects. Some rare specific objects -(e.g. force curves) are not supported, due to no example data being available. Those can be added -upon request and providing of example datasets. Heterogeneous data can be represented in ``.sur`` -and ``.pro`` objects, for instance floating-point/topography and rgb data can coexist along the same -navigation dimension. Those are casted to a homogeneous floating-point representation upon loading. - -Support for data saving is partial as ``.sur`` and ``.pro`` can be fundamentally incompatible with -hyperspy signals. First, they have limited dimensionality. Up to 3d data arrays with -either 1d (series of images) or 2d (hyperspectral studiable) navigation space can be saved. Also, -``.sur`` and ``.pro`` do not support non-uniform axes and saving of models. Finally, ``.sur`` / ``.pro`` -linearize intensities along a uniform axis to enforce an integer-representation of the data (with scaling and -offset). This means that export from float-type hyperspy signals is inherently lossy. - -Within these limitations, all features from the fileformat are supported at export, notably data -compression and setting of custom metadata. +``.sur`` and ``.pro`` is format developed by digitalsurf to import/export data in their MountainsMap scientific +analysis software. Target datasets originally result from (micro)-topography and imaging instruments: SEM, AFM, +profilometer. RGB(A) images, multilayer surfaces and profiles are also supported. Even though it is essentially +a surfaces format, 1D signals are supported for spectra and spectral maps. Specifically, this is the fileformat +used by Attolight SA for its scanning electron microscope cathodoluminescence (SEM-CL) hyperspectral maps. This +plugin was developed based on the MountainsMap software documentation. + +Support for loading ``.sur`` and ``.pro`` datasets is complete, including parsing of user/customer-specific +metadata, and opening of files containing multiple objects. Some rare specific objects (e.g. force curves) +are not supported, due to no example data being available. Those can be added upon request and providing of +example datasets. Heterogeneous data can be represented in ``.sur`` and ``.pro`` objects, for instance +floating-point/topography and rgb data can coexist along the same navigation dimension. Those are casted to +a homogeneous floating-point representation upon loading. + +Support for data saving is partial as ``.sur`` and ``.pro`` do not support all features of hyperspy signals. +First, they have limited dimensionality. Up to 3d data arrays with either 1d (series of images) or 2d +(hyperspectral studiable) navigation space can be saved. Also, ``.sur`` and ``.pro`` do not support non-uniform +axes and saving of models. Finally, ``.sur`` / ``.pro`` linearize intensities along a uniform axis to enforce +an integer-representation of the data (with scaling and offset). This means that export from float-type hyperspy +signals is inherently lossy. + +Within these limitations, all features from ``.sur`` and ``.pro`` fileformats are supported, notably data +compression and setting of custom metadata. The file writer splits a signal into the suitable digitalsurf +dataobject primarily by inspecting its dimensions and its datatype, ultimately how various axes and signal +quantity are named. The criteria are listed here below: + ++-----------------+---------------+------------------------------------------------------------------------------+ +| Nav. dimension | Sig dimension | Extension and MountainsMap subclass | ++=================+===============+==============================================================================+ +| 0 | 1 | ``.pro``: Spectrum (based on axes name), Profile (default) | ++-----------------+---------------+------------------------------------------------------------------------------+ +| 0 | 2 | ``.sur``: BinaryImage (based on dtype), RGBImage (based on dtype), | +| | | Surface (default), | ++-----------------+---------------+------------------------------------------------------------------------------+ +| 1 | 0 | ``.pro``: same as (1,0) | ++-----------------+---------------+------------------------------------------------------------------------------+ +| 1 | 1 | ``.pro``: Spectrum Serie (based on axes name), Profile Serie (default) | ++-----------------+---------------+------------------------------------------------------------------------------+ +| 1 | 2 | ``.sur``: RGBImage Serie (based on dtype), Surface Series (default) | ++-----------------+---------------+------------------------------------------------------------------------------+ +| 2 | 0 | ``.sur``: same as (0,2) | ++-----------------+---------------+------------------------------------------------------------------------------+ +| 2 | 1 | ``.sur``: hyperspectralMap (default) | ++-----------------+---------------+------------------------------------------------------------------------------+ + +Axes named one of ``Wavelength``, ``Energy``, ``Energy Loss``, ``E``, are considered spectral, and quantities +named one of ``Height``, ``Altitude``, ``Elevation``, ``Depth``, ``Z`` are considered surface. The difference +between Surface and IntensitySurface stems from the AFM / profilometry origin of MountainsMap. "Surface" has +the proper meaning of an open boundary of 3d space, whereas "IntensitySurface" is a mere 2D mapping of an arbitrary +quantity. API functions ^^^^^^^^^^^^^ diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index cbf999ff1..2685fc622 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -47,7 +47,7 @@ # import rsciio.utils.tools # DictionaryTreeBrowser class handles the fancy metadata dictionnaries # from hyperspy.misc.utils import DictionaryTreeBrowser -from rsciio._docstrings import FILENAME_DOC, SIGNAL_DOC +from rsciio._docstrings import FILENAME_DOC, LAZY_UNSUPPORTED_DOC, RETURNS_DOC, SIGNAL_DOC from rsciio.utils.exceptions import MountainsMapFileError from rsciio.utils.rgb_tools import is_rgb, is_rgba from rsciio.utils.date_time_tools import get_date_time_from_metadata @@ -125,14 +125,14 @@ def __init__(self, filename : str|None = None): "b_pack_fn": lambda f, v: self._set_str(f, v, 12), }, "_02_Format": { - "value": 1, + "value": 0, "b_unpack_fn": self._get_int16, "b_pack_fn": self._set_int16, }, "_03_Number_of_Objects": { "value": 1, - "b_unpack_fn": self._get_int16, - "b_pack_fn": self._set_int16, + "b_unpack_fn": self._get_uint16, + "b_pack_fn": self._set_uint16, }, "_04_Version": { "value": 1, @@ -146,12 +146,12 @@ def __init__(self, filename : str|None = None): }, "_06_Object_Name": { "value": "", - "b_unpack_fn": lambda f: self._get_str(f, 30, "DOSONLY"), + "b_unpack_fn": lambda f: self._get_str(f, 30, ''), "b_pack_fn": lambda f, v: self._set_str(f, v, 30), }, "_07_Operator_Name": { "value": "ROSETTA", - "b_unpack_fn": lambda f: self._get_str(f, 30, ""), + "b_unpack_fn": lambda f: self._get_str(f, 30, ''), "b_pack_fn": lambda f, v: self._set_str(f, v, 30), }, "_08_P_Size": { @@ -186,8 +186,8 @@ def __init__(self, filename : str|None = None): }, "_14_W_Size": { "value": 0, - "b_unpack_fn": self._get_int32, - "b_pack_fn": self._set_int32, + "b_unpack_fn": self._get_uint32, + "b_pack_fn": self._set_uint32, }, "_15_Size_of_Points": { "value": 16, @@ -310,7 +310,7 @@ def __init__(self, filename : str|None = None): "b_pack_fn": self._set_int16, }, "_39_Obsolete": { - "value": b'0', + "value": b'', "b_unpack_fn": lambda f: self._get_bytes(f, 12), "b_pack_fn": lambda f, v: self._set_bytes(f, v, 12), }, @@ -360,7 +360,7 @@ def __init__(self, filename : str|None = None): "b_pack_fn": self._set_uint32, }, "_49_Obsolete": { - "value": b'0', + "value": b'', "b_unpack_fn": lambda f: self._get_bytes(f, 6), "b_pack_fn": lambda f, v: self._set_bytes(f, v, 6), }, @@ -375,7 +375,7 @@ def __init__(self, filename : str|None = None): "b_pack_fn": self._set_int16, }, "_52_Client_zone": { - "value": b'0', + "value": b'', "b_unpack_fn": lambda f: self._get_bytes(f, 128), "b_pack_fn": lambda f, v: self._set_bytes(f, v, 128), }, @@ -420,7 +420,7 @@ def __init__(self, filename : str|None = None): "b_pack_fn": self._pack_comment, }, "_61_Private_zone": { - "value": 0, + "value": b'', "b_unpack_fn": self._unpack_private, "b_pack_fn": self._pack_private, }, @@ -444,7 +444,7 @@ def __init__(self, filename : str|None = None): self._Object_type = "_UNKNOWN" # Number of data objects in the file. - self._N_data_object = 1 + self._N_data_objects = 1 self._N_data_channels = 1 # Attributes useful for save and export @@ -462,35 +462,95 @@ def __init__(self, filename : str|None = None): # These must be set in the split functions self.data_split = [] self.objtype_split = [] - # Packaging methods for writing files + + # File Writer Inner methods + + def _write_sur_file(self): + """Write self._list_sur_file_content to a file. This method is + start-and-forget. The brainwork is performed in the construction + of sur_file_content list of dictionaries.""" + + with open(self.filename, "wb") as f: + for dic in self._list_sur_file_content: + # Extremely important! self._work_dict must access + # other fields to properly encode and decode data, + # comments etc. etc. + self._move_values_to_workdict(dic) + # Then inner consistency is trivial + for key in self._work_dict: + self._work_dict[key]['b_pack_fn'](f,self._work_dict[key]['value']) + + def _validate_filename(self): + + sur_only = ['_SURFACE', + '_BINARYIMAGE', + '_SURFACESERIE', + '_MULTILAYERSURFACE', + '_INTENSITYIMAGE', + '_INTENSITYSURFACE', + '_RGBIMAGE', + '_RGBSURFACE', + '_RGBINTENSITYSURFACE', + '_SERIESOFRGBIMAGES', + '_HYPCARD'] + + pro_only = ['_PROFILE', + '_PROFILESERIE', + '_MULTILAYERPROFILE', + '_FORCECURVE', + '_SERIEOFFORCECURVE', + '_CONTOURPROFILE', + '_SPECTRUM', + ] + + if self._Object_type in sur_only and not self.filename.lower().endswith('sur'): + raise MountainsMapFileError(f"Attempting save of DigitalSurf {self._Object_type} with\ + .{self.filename.split('.')[-1]} extension, which only supports .sur") + + if self._Object_type in pro_only and not self.filename.lower().endswith('pro'): + raise MountainsMapFileError(f"Attempting save of DigitalSurf {self._Object_type} with\ + .{self.filename.split('.')[-1]} extension, which only supports .pro") def _build_sur_file_contents(self, set_comments:str='auto', is_special:bool=False, compressed:bool=True, comments: dict = {}, + object_name: str = '', operator_name: str = '', + absolute: int = 0, private_zone: bytes = b'', client_zone: bytes = b'' ): - + """Build the _sur_file_content list necessary to write a signal dictionary to + a ``.sur`` or ``.pro`` file. The signal dictionary's inner consistency is the + responsibility of hyperspy, and the this function's responsibility is to make + a consistent list of _sur_file_content.""" + self._list_sur_file_content = [] #Compute number of navigation / signal axes self._n_ax_nav, self._n_ax_sig = DigitalSurfHandler._get_n_axes(self.signal_dict) # Choose object type based on number of navigation and signal axes + # Populate self._Object_type # Populate self.Xaxis, self.Yaxis, self.Taxis (if not empty) # Populate self.data_split and self.objtype_split (always) self._split_signal_dict() - # This initialize the Comment string saved with the studiable. + #Raise error if wrong extension + # self._validate_filename() + + #Get a dictionary to be saved in the comment fielt of exported file comment_dict = self._get_comment_dict(self.signal_dict['original_metadata'], method=set_comments, custom=comments) + #Convert the dictionary to a string of suitable format. comment_str = self._stringify_dict(comment_dict) - #Now we build a workdict for every data object + # A _work_dict is created for each of the data arrays and object + # that have splitted from the main object. In most cases, only a + # single object is present in the split. for data,objtype in zip(self.data_split,self.objtype_split): self._build_workdict(data, objtype, @@ -498,35 +558,27 @@ def _build_sur_file_contents(self, comment=comment_str, is_special=is_special, compressed=compressed, + object_name=object_name, operator_name=operator_name, + absolute=absolute, private_zone=private_zone, client_zone=client_zone) - # if more than one object, we erase comment after first object. + # if the objects are multiple, comment is erased after the first + # object. This is not mandatory, but makes marginally smaller files. if comment_str: comment_str = '' # Finally we push it all to the content list. self._append_work_dict_to_content() - - def _write_sur_file(self): - """Write self._list_sur_file_content to a """ - - with open(self.filename, "wb") as f: - for dic in self._list_sur_file_content: - # Extremely important! self._work_dict must access - # other fields to properly encode and decode data, - # comments etc. etc. - self._move_values_to_workdict(dic) - # Then inner consistency is trivial - for key in self._work_dict: - self._work_dict[key]['b_pack_fn'](f,self._work_dict[key]['value']) - + + #Signal dictionary analysis methods @staticmethod def _get_n_axes(sig_dict: dict) -> tuple[int,int]: """Return number of navigation and signal axes in the signal dict (in that order). + Could be moved away from the .sur api as other functions probably use this as well Args: - sig_dict (dict): signal dictionary. Contains keys 'data', 'axes', 'metadata', 'original_metadata' + sig_dict (dict): signal dict, has to contain keys: 'data', 'axes', 'metadata' Returns: Tuple[int,int]: nax_nav,nax_sig. Number of navigation and signal axes @@ -540,18 +592,11 @@ def _get_n_axes(sig_dict: dict) -> tuple[int,int]: nax_sig += 1 return nax_nav, nax_sig - @staticmethod - def _get_nobjects(omd: dict) -> int: - maxobj = 0 - for k in omd: - objnum = k.split('_')[1] - objnum = int(objnum) - if objnum > maxobj: - maxobj = objnum - return maxobj - def _is_spectrum(self) -> bool: - """Determine if a signal is a spectrum based on axes naming""" + """Determine if a signal is a spectrum type based on axes naming + for export of sur_files. Could be cross-checked with other criteria + such as hyperspy subclass etc... For now we keep it simple. If it has + an ax named like a spectral axis, then probably its a spectrum. """ spectrumlike_axnames = ['Wavelength', 'Energy', 'Energy Loss', 'E'] is_spec = False @@ -564,7 +609,7 @@ def _is_spectrum(self) -> bool: def _is_surface(self) -> bool: """Determine if a 2d-data-like signal_dict should be of surface type, ie the dataset - is a 2d surface of the 3d plane. """ + is a 2d surface of the 3d space. """ is_surface = False surfacelike_quantnames = ['Height', 'Altitude', 'Elevation', 'Depth', 'Z'] quant: str = self.signal_dict['metadata']['Signal']['quantity'] @@ -577,129 +622,79 @@ def _is_surface(self) -> bool: def _is_binary(self) -> bool: return self.signal_dict['data'].dtype == bool - def _get_num_chans(self) -> int: - """Get number of channels (aka point size) - - Args: - obj_type (int): Object type numeric code - - Returns: - int: Number of channels (point size). - """ - obj_type = self._get_object_type() - - if obj_type == 11: - return 2 #Intensity + surface (deprecated type) - elif obj_type in [12,18]: - return 3 #RGB types - elif obj_type == 13: - return 4 #RGB surface - elif obj_type in [14, 15, 35, 36]: - return 2 #Force curves - elif obj_type in [16]: - return 5 #Surface, Intensity, R, G, B (but hardly applicable to hyperspy) - else: - return 1 - - def _get_wsize(self, nax_sig: int) -> int: - if nax_sig != 1: - raise MountainsMapFileError(f"Attempted parsing W-axis size from signal with navigation dimension {nax_sig}!= 1.") - for ax in self.signal_dict['axes']: - if not ax['navigate']: - return ax['size'] - - def _get_num_objs(self,) -> int: - """Get number of objects based on object type and number of navigation axes in the signal. - - Raises: - ValueError: Several digital surf save formats will need a navigation dimension of 1 - - Returns: - int: _description_ - """ - obj_type = self._get_object_type() - nax_nav, _ = self._get_n_axes() - - if obj_type in [1,2,3,6,9,10,11,12,13,14,15,16,17,20,21,35,36,37]: - return 1 - elif obj_type in [4,5,7,8,18]: - if nax_nav != 1: - raise MountainsMapFileError(f"Attempted to save signal with number type {obj_type} and navigation dimension {nax_nav}.") - for ax in enumerate(self.signal_dict['axes']): - if ax['navigate']: - return ax['size'] - - def _get_object_type(self) -> int: + #Splitting /subclassing methods + def _split_signal_dict(self): """Select the suitable _mountains_object_types """ - nax_nav, nax_sig = self._get_n_axes(self.signal_dict) - - obj_type = None - if nax_nav == 0: - if nax_sig == 0: - raise MountainsMapFileError(msg=f"Object with empty navigation and signal axes not supported for .sur export") - elif nax_sig == 1: - if self._is_spectrum(): - obj_type = 20 # '_SPECTRUM' - else: - obj_type = 1 # '_PROFILE' - elif nax_sig == 2: - if self._is_binary(): - obj_type = 3 # "_BINARYIMAGE" - elif is_rgb(self.signal_dict['data']): - obj_type = 12 #"_RGBIMAGE" - elif is_rgba(self.signal_dict['data']): - warnings.warn(f"Alpha channel discarded upon saving RGBA signal in .sur format") - obj_type = 12 #"_RGBIMAGE" - elif self._is_surface(): - obj_type = 2 #'_SURFACE' - else: - obj_type = 10 #_INTENSITYSURFACE + n_nav = self._n_ax_nav + n_sig = self._n_ax_sig + + #Here, I manually unfold the nested conditions for legibility. + #Since there are a fixed number of dimensions supported by + # digitalsurf .sur/.pro files, I think this is the best way to + # proceed. + if (n_nav,n_sig) == (0,1): + if self._is_spectrum(): + self._split_spectrum() else: - raise MountainsMapFileError(msg=f"Object with signal dimension {nax_sig} > 2 not supported for .sur export") - elif nax_nav == 1: - if nax_sig == 0: - warnings.warn(f"Exporting surface signal dimension {nax_sig} and navigation dimension {nax_nav} falls back on surface type but is not good practice.") - obj_type = 1 # '_PROFILE' - elif nax_sig == 1: - if self._is_spectrum(): - obj_type = 20 # '_SPECTRUM' - else: - obj_type = 1 # '_PROFILE' - elif nax_sig ==2: - #Also warn - if is_rgb(self.signal_dict['data']): - obj_type = 18 #"_SERIESOFRGBIMAGE" - elif is_rgba(self.signal_dict['data']): - warnings.warn(f"Alpha channel discarded upon saving RGBA signal in .sur format") - obj_type = 18 #"_SERIESOFRGBIMAGE" - else: - obj_type = 5 #"_SURFACESERIE" - else: - raise MountainsMapFileError(msg=f"Object with signal dimension {nax_sig} > 2 not supported for .sur export") - elif nax_nav == 2: - if nax_sig == 0: - warnings.warn(f"Signal dimension {nax_sig} and navigation dimension {nax_nav} exported as surface type. Consider transposing signal object before exporting if this is intentional.") - if self._is_surface(): - obj_type = 2 #'_SURFACE' - else: - obj_type = 10 #_INTENSITYSURFACE - elif nax_sig == 1: - obj_type = 21 #'_HYPCARD' + self._split_profile() + elif (n_nav,n_sig) == (0,2): + if self._is_binary(): + self._split_binary_img() + elif is_rgb(self.signal_dict['data']): #"_RGBIMAGE" + self._split_rgb() + elif is_rgba(self.signal_dict['data']): + warnings.warn(f"A channel discarded upon saving \ + RGBA signal in .sur format") + self._split_rgb() + # elif self._is_surface(): #'_SURFACE' + # self._split_surface() + else: # _INTENSITYSURFACE + self._split_surface() + elif (n_nav,n_sig) == (1,0): + warnings.warn(f"Exporting surface signal dimension {n_sig} and navigation dimension \ + {n_nav} falls back on profile type but is not good practice. Consider \ + transposing before saving to avoid unexpected behaviour.") + self._split_profile() + elif (n_nav,n_sig) == (1,1): + if self._is_spectrum(): + self._split_spectrum() + else: + self._split_profileserie() + elif (n_nav,n_sig) == (1,2): + if is_rgb(self.signal_dict['data']): + self._split_rgbserie() + if is_rgba(self.signal_dict['data']): + warnings.warn(f"Alpha channel discarded upon saving RGBA signal in .sur format") + self._split_rgbserie() else: - raise MountainsMapFileError(msg=f"Object with signal dimension {nax_sig} and navigation dimension {nax_nav} not supported for .sur export") + self._split_surfaceserie() + elif (n_nav,n_sig) == (2,0): + warnings.warn(f"Signal dimension {n_sig} and navigation dimension {n_nav} exported as surface type. Consider transposing signal object before exporting if this is intentional.") + if self._is_binary(): + self._split_binary_img() + elif is_rgb(self.signal_dict['data']): #"_RGBIMAGE" + self._split_rgb() + elif is_rgba(self.signal_dict['data']): + warnings.warn(f"A channel discarded upon saving \ + RGBA signal in .sur format") + self._split_rgb() + if self._is_surface(): + self._split_surface() + else: + self._split_intensitysurface() + elif (n_nav,n_sig) == (2,1): + self._split_hyperspectral() else: - #Also raise - raise MountainsMapFileError(msg=f"Object with navigation dimension {nax_nav} > 2 not supported for .sur export") - - return obj_type + raise MountainsMapFileError(msg=f"Object with signal dimension {n_sig} and navigation dimension {n_nav} not supported for .sur export") def _split_spectrum(self,): - """Must set axes except Z, data_split & objtype_split attributes""" - #When splitting spectrum, remember that instead of the series axis (T/W), + """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" + #When splitting spectrum, no series axis (T/W), #X axis is the spectral dimension and Y the series dimension (if series). - # Xaxis = {} - # Yaxis = {} + obj_type = 20 + self._Object_type = self._mountains_object_types[obj_type] + nax_nav = self._n_ax_nav nax_sig = self._n_ax_sig @@ -712,12 +707,15 @@ def _split_spectrum(self,): raise MountainsMapFileError(f"Dimensions ({nax_nav})|{nax_sig}) invalid for export as spectrum type") self.data_split = [self.signal_dict['data']] - self.objtype_split = [20] - self._N_data_object = 1 + self.objtype_split = [obj_type] + self._N_data_objects = 1 self._N_data_channels = 1 def _split_profile(self,): - """Must set axes except Z, data_split & objtype_split attributes""" + """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" + + obj_type = 1 + self._Object_type = self._mountains_object_types[obj_type] if (self._n_ax_nav,self._n_ax_sig) in [(0,1),(1,0)]: self.Xaxis = self.signal_dict['axes'][0] @@ -725,28 +723,31 @@ def _split_profile(self,): raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for a profile type") self.data_split = [self.signal_dict['data']] - self.objtype_split = [1] - self._N_data_object = 1 + self.objtype_split = [obj_type] + self._N_data_objects = 1 self._N_data_channels = 1 def _split_profileserie(self,): - """Must set axes except Z, data_split & objtype_split attributes""" + """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" obj_type = 4 # '_PROFILESERIE' + self._Object_type = self._mountains_object_types[obj_type] if (self._n_ax_nav,self._n_ax_sig)==(1,1): self.Xaxis = next(ax for ax in self.signal_dict['axes'] if not ax['navigate']) self.Taxis = next(ax for ax in self.signal_dict['axes'] if ax['navigate']) else: - raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._mountains_object_types[obj_type]} type") + raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._Object_type} type") self.data_split = self._split_data_alongaxis(self.Taxis) self.objtype_split = [obj_type] + [1]*(len(self.data_split)-1) - self._N_data_object = len(self.objtype_split) + self._N_data_objects = len(self.objtype_split) self._N_data_channels = 1 def _split_binary_img(self,): - """Must set axes except Z, data_split & objtype_split attributes""" + """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" obj_type = 3 + self._Object_type = self._mountains_object_types[obj_type] + if (self._n_ax_nav,self._n_ax_sig) in [(0,2),(2,0)]: self.Xaxis = self.signal_dict['axes'][1] self.Yaxis = self.signal_dict['axes'][0] @@ -755,12 +756,13 @@ def _split_binary_img(self,): self.data_split = [self.signal_dict['data']] self.objtype_split = [obj_type] - self._N_data_object = 1 + self._N_data_objects = 1 self._N_data_channels = 1 def _split_rgb(self,): - """Must set axes except Z, data_split & objtype_split attributes""" + """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" obj_type = 12 + self._Object_type = self._mountains_object_types[obj_type] if (self._n_ax_nav,self._n_ax_sig) in [(0,2),(2,0)]: self.Xaxis = self.signal_dict['axes'][1] self.Yaxis = self.signal_dict['axes'][0] @@ -772,12 +774,13 @@ def _split_rgb(self,): np.int32(self.signal_dict['data']['B']) ] self.objtype_split = [obj_type] + [10,10] - self._N_data_object = 1 + self._N_data_objects = 1 self._N_data_channels = 3 def _split_surface(self,): - """Must set axes except Z, data_split & objtype_split attributes""" + """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" obj_type = 2 + self._Object_type = self._mountains_object_types[obj_type] if (self._n_ax_nav,self._n_ax_sig) in [(0,2),(2,0)]: self.Xaxis = self.signal_dict['axes'][1] self.Yaxis = self.signal_dict['axes'][0] @@ -785,12 +788,13 @@ def _split_surface(self,): raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._mountains_object_types[obj_type]} type") self.data_split = [self.signal_dict['data']] self.objtype_split = [obj_type] - self._N_data_object = 1 + self._N_data_objects = 1 self._N_data_channels = 1 def _split_intensitysurface(self,): - """Must set axes except Z, data_split & objtype_split attributes""" + """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" obj_type = 10 + self._Object_type = self._mountains_object_types[obj_type] if (self._n_ax_nav,self._n_ax_sig) in [(0,2),(2,0)]: self.Xaxis = self.signal_dict['axes'][1] self.Yaxis = self.signal_dict['axes'][0] @@ -798,12 +802,14 @@ def _split_intensitysurface(self,): raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._mountains_object_types[obj_type]} type") self.data_split = [self.signal_dict['data']] self.objtype_split = [obj_type] - self._N_data_object = 1 + self._N_data_objects = 1 self._N_data_channels = 1 def _split_rgbserie(self): + """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" obj_type = 18 #"_SERIESOFRGBIMAGE" - + self._Object_type = self._mountains_object_types[obj_type] + sigaxes_iter = iter(ax for ax in self.signal_dict['axes'] if not ax['navigate']) self.Yaxis = next(sigaxes_iter) self.Xaxis = next(sigaxes_iter) @@ -817,11 +823,13 @@ def _split_rgbserie(self): self.objtype_split += [12,10,10] self.objtype_split[0] = obj_type - self._N_data_object = self.Taxis['size'] + self._N_data_objects = self.Taxis['size'] self._N_data_channels = 3 def _split_surfaceserie(self): + """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" obj_type = 5 + self._Object_type = self._mountains_object_types[obj_type] sigaxes_iter = iter(ax for ax in self.signal_dict['axes'] if not ax['navigate']) self.Yaxis = next(sigaxes_iter) self.Xaxis = next(sigaxes_iter) @@ -829,21 +837,25 @@ def _split_surfaceserie(self): self.data_split = self._split_data_alongaxis(self.Taxis) self.objtype_split = [2]*len(self.data_split) self.objtype_split[0] = obj_type - self._N_data_object = len(self.data_split) + self._N_data_objects = len(self.data_split) self._N_data_channels = 1 def _split_hyperspectral(self): + """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" obj_type = 21 + self._Object_type = self._mountains_object_types[obj_type] sigaxes_iter = iter(ax for ax in self.signal_dict['axes'] if ax['navigate']) self.Yaxis = next(sigaxes_iter) self.Xaxis = next(sigaxes_iter) self.Taxis = next(ax for ax in self.signal_dict['axes'] if not ax['navigate']) self.data_split = [self.signal_dict['data']] self.objtype_split = [obj_type] - self._N_data_object = 1 + self._N_data_objects = 1 self._N_data_channels = 1 def _split_data_alongaxis(self, axis: dict) -> list[np.ndarray]: + """Split the data in a series of lower-dim datasets that can be exported to + a surface / profile file""" idx = self.signal_dict['axes'].index(axis) # return idx datasplit = [] @@ -851,80 +863,12 @@ def _split_data_alongaxis(self, axis: dict) -> list[np.ndarray]: datasplit.append(dslice) return datasplit - def _split_signal_dict(self): - """Select the suitable _mountains_object_types """ - - n_nav = self._n_ax_nav - n_sig = self._n_ax_sig - - #Here, I manually unfold the nested conditions for legibility. - #Since there are a fixed number of dimensions supported by - # digitalsurf .sur/.pro files, I think this is the best way to - # proceed. - if (n_nav,n_sig) == (0,1): - if self._is_spectrum(): - self._split_spectrum() - else: - self._split_profile() - elif (n_nav,n_sig) == (0,2): - if self._is_binary(): - self._split_binary_img() - elif is_rgb(self.signal_dict['data']): #"_RGBIMAGE" - self._split_rgb() - elif is_rgba(self.signal_dict['data']): - warnings.warn(f"A channel discarded upon saving \ - RGBA signal in .sur format") - self._split_rgb() - elif self._is_surface(): #'_SURFACE' - self._split_surface() - else: # _INTENSITYSURFACE - self._split_intensitysurface() - elif (n_nav,n_sig) == (1,0): - warnings.warn(f"Exporting surface signal dimension {n_sig} and navigation dimension \ - {n_nav} falls back on profile type but is not good practice. Consider \ - transposing before saving to avoid unexpected behaviour.") - self._split_profile() - elif (n_nav,n_sig) == (1,1): - if self._is_spectrum(): - self._split_spectrum() - else: - self._split_profileserie() - elif (n_nav,n_sig) == (1,2): - if is_rgb(self.signal_dict['data']): - self._split_rgbserie() - if is_rgba(self.signal_dict['data']): - warnings.warn(f"Alpha channel discarded upon saving RGBA signal in .sur format") - obj_type = 18 #"_SERIESOFRGBIMAGE" - self._split_rgbserie() - else: - self._split_surfaceserie() - elif (n_nav,n_sig) == (2,0): - warnings.warn(f"Signal dimension {n_sig} and navigation dimension {n_nav} exported as surface type. Consider transposing signal object before exporting if this is intentional.") - if self._is_binary(): - self._split_binary_img() - elif is_rgb(self.signal_dict['data']): #"_RGBIMAGE" - self._split_rgb() - elif is_rgba(self.signal_dict['data']): - warnings.warn(f"A channel discarded upon saving \ - RGBA signal in .sur format") - self._split_rgb() - if self._is_surface(): - self._split_surface() - else: - self._split_intensitysurface() - elif (n_nav,n_sig) == (2,1): - self._split_hyperspectral() - else: - raise MountainsMapFileError(msg=f"Object with signal dimension {n_sig} and navigation dimension {n_nav} not supported for .sur export") - - def _norm_data(self, data: np.ndarray, is_special: bool, apply_sat_lo: bool = False, apply_sat_hi: bool = False): + def _norm_data(self, data: np.ndarray, is_special: bool): """Normalize input data to 16-bits or 32-bits ints and initialize an axis on which the data is normalized. Args: data (np.ndarray): dataset - is_special (bool): whether NaNs get sent to N.M points in the sur format. - apply_sat_lo (bool, optional): Signal low-value saturation in output datafile. Defaults to False. - apply_sat_hi (bool, optional): Signal high-value saturation in output datafile. Defaults to False. + is_special (bool): whether NaNs get sent to N.M points in the sur format and apply saturation Raises: MountainsMapFileError: raised if input is of complex type @@ -939,17 +883,17 @@ def _norm_data(self, data: np.ndarray, is_special: bool, apply_sat_lo: bool = Fa if np.issubdtype(data_type,np.complexfloating): raise MountainsMapFileError(f"digitalsurf file formats do not support export of complex data. Convert data to real-value representations before before export") elif data_type==np.uint8: - warnings.warn("np.uint8 datatype exported as 16bits") - pointsize = 16 #Pointsize has to be 16 or 32 in surf format + warnings.warn("np.uint8 datatype exported as np.int16.") + pointsize = 16 Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data.astype(np.int16), pointsize, is_special) data_int = data.astype(np.int16) elif data_type==np.uint16: - warnings.warn("np.uint16 datatype exported as 32bits") + warnings.warn("np.uint16 datatype exported as np.int32") pointsize = 32 #Pointsize has to be 16 or 32 in surf format Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data.astype(np.int32), pointsize, is_special) data_int = data.astype(np.int32) elif np.issubdtype(data_type,np.unsignedinteger): - raise MountainsMapFileError(f"digitalsurf file formats do not support unsigned data >16bits. Convert data to signed integers before export.") + raise MountainsMapFileError(f"digitalsurf file formats do not support unsigned int >16bits. Convert data to signed integers before export.") elif data_type==np.int8: pointsize = 16 #Pointsize has to be 16 or 32 in surf format Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, 8, is_special) @@ -965,16 +909,15 @@ def _norm_data(self, data: np.ndarray, is_special: bool, apply_sat_lo: bool = Fa elif np.issubdtype(data_type,np.integer): raise MountainsMapFileError(f"digitalsurf file formats do not support export integers larger than 32 bits. Convert data to 32-bit representation before exporting") elif np.issubdtype(data_type,np.floating): - if self.signal_dict['data'].itemsize*8 > 32: - warnings.warn(f"Lossy conversion of {data_type} to 32-bits-ints representation will occur.") pointsize = 32 Zmin, Zmax, Zscale, Zoffset, data_int = self._norm_float(data, is_special) return pointsize, Zmin, Zmax, Zscale, Zoffset, data_int def _norm_signed_int(self, data:np.ndarray, intsize: int, is_special: bool = False): + """Normalized data of integer type. No normalization per se, but the Zmin and Zmax threshold are set + if saturation needs to be flagged""" # There are no NaN values for integers. Special points means considering high/low saturation of integer scale. - data_int_min = - 2**(intsize-1) data_int_max = 2**(intsize -1) @@ -984,12 +927,13 @@ def _norm_signed_int(self, data:np.ndarray, intsize: int, is_special: bool = Fal Zmin = data_int_min + 1 if is_satlo else data.min() Zmax = data_int_max - 1 if is_sathi else data.max() Zscale = 1.0 - Zoffset = 0.0 + Zoffset = Zmin return Zmin, Zmax, Zscale, Zoffset def _norm_float(self, data : np.ndarray, is_special: bool = False,): - """Normalize float data on a 32 bits int scale.""" + """Normalize float data on a 32 bits int scale. Inherently lossy + but that's how things are with mountainsmap files. """ Zoffset_f = np.nanmin(data) Zmax_f = np.nanmax(data) @@ -1035,22 +979,6 @@ def _get_Zname_Zunit(self, metadata: dict) -> tuple[str,str]: return Zname,Zunit - def _get_datetime_info(self,) -> tuple[int,int,int,int,int,int]: - date = self.signal_dict['metadata']['General'].get('date','') - time = self.signal_dict['metadata']['General'].get('time','') - - try: - [yyyy,mm,dd] = date.strip().split('-') - except ValueError: - [yyyy,mm,dd] = [0,0,0] - - try: - [hh,minmin,ss] = time.strip().strip('Z').slit(':') - except ValueError: - [hh,minmin,ss] = [0,0,0] - - return yyyy,mm,dd,hh,minmin,ss - def _build_workdict(self, data: np.ndarray, obj_type: int, @@ -1058,10 +986,13 @@ def _build_workdict(self, comment: str = "", is_special: bool = True, compressed: bool = True, + object_name: str = '', operator_name: str = '', + absolute: int = 0, private_zone: bytes = b'', client_zone: bytes = b'' ): + """Populate _work_dict with the """ if not compressed: self._work_dict['_01_Signature']['value'] = 'DIGITAL SURF' # DSCOMPRESSED by default @@ -1069,20 +1000,20 @@ def _build_workdict(self, self._work_dict['_01_Signature']['value'] = 'DSCOMPRESSED' # DSCOMPRESSED by default # self._work_dict['_02_Format']['value'] = 0 # Dft. other possible value is 257 for MacintoshII computers with Motorola CPUs. Obv not supported... - self._work_dict['_03_Number_of_Objects']['value'] = self._N_data_object + self._work_dict['_03_Number_of_Objects']['value'] = self._N_data_objects # self._work_dict['_04_Version']['value'] = 1 # Version number. Always default. self._work_dict['_05_Object_Type']['value'] = obj_type - # self._work_dict['_06_Object_Name']['value'] = '' Obsolete, DOS-version only (Not supported) + self._work_dict['_06_Object_Name']['value'] = object_name #Obsolete, DOS-version only (Not supported) self._work_dict['_07_Operator_Name']['value'] = operator_name #Should be settable from kwargs self._work_dict['_08_P_Size']['value'] = self._N_data_channels - # self._work_dict['_09_Acquisition_Type']['value'] = 0 # AFM data only, could be inferred - # self._work_dict['_10_Range_Type']['value'] = 0 #Only 1 for high-range (z-stage scanning), AFM data only, could be inferred + self._work_dict['_09_Acquisition_Type']['value'] = 0 # AFM data only, could be inferred + self._work_dict['_10_Range_Type']['value'] = 0 #Only 1 for high-range (z-stage scanning), AFM data only, could be inferred self._work_dict['_11_Special_Points']['value'] = int(is_special) - # self._work_dict['_12_Absolute']['value'] = 0 #Probably irrelevant in most cases. Absolute vs rel heights (for profilometers), can be inferred - # self._work_dict['_13_Gauge_Resolution']['value'] = 0.0 #Probably irrelevant. Only for profilometers (maybe AFM), can be inferred + self._work_dict['_12_Absolute']['value'] = absolute #Probably irrelevant in most cases. Absolute vs rel heights (for profilometers), can be inferred + self._work_dict['_13_Gauge_Resolution']['value'] = 0.0 #Probably irrelevant. Only for profilometers (maybe AFM), can be inferred # T-axis acts as W-axis for spectrum / hyperspectrum surfaces. if obj_type in [21]: @@ -1091,7 +1022,7 @@ def _build_workdict(self, ws = 0 self._work_dict['_14_W_Size']['value'] = ws - bsize, Zmin, Zmax, Zscale, Zoffset, data_int = self._norm_data(data,is_special,apply_sat_lo=True,apply_sat_hi=True) + bsize, Zmin, Zmax, Zscale, Zoffset, data_int = self._norm_data(data,is_special) Zname, Zunit = self._get_Zname_Zunit(metadata) #Axes element set regardless of object size @@ -1100,7 +1031,9 @@ def _build_workdict(self, self._work_dict['_17_Zmax']['value'] = Zmax self._work_dict['_18_Number_of_Points']['value']= self.Xaxis.get('size',1) self._work_dict['_19_Number_of_Lines']['value'] = self.Yaxis.get('size',1) - self._work_dict['_20_Total_Nb_of_Pts']['value'] = data.size + #This needs to be this way due to the way we export our hyp maps + self._work_dict['_20_Total_Nb_of_Pts']['value'] = self.Xaxis.get('size',1)*self.Yaxis.get('size',1) + self._work_dict['_21_X_Spacing']['value'] = self.Xaxis.get('scale',0.0) self._work_dict['_22_Y_Spacing']['value'] = self.Yaxis.get('scale',0.0) self._work_dict['_23_Z_Spacing']['value'] = Zscale @@ -1136,11 +1069,13 @@ def _build_workdict(self, if compressed: data_bin = self._compress_data(data_int,nstreams=1) #nstreams hard-set to 1. Could be unlocked in the future + compressed_size = len(data_bin) else: - fmt = " 0 and self._N_data_object > 0: - n_objects_to_read = self._N_data_channels * self._N_data_object + if self._N_data_channels > 0 and self._N_data_objects > 0: + n_objects_to_read = self._N_data_channels * self._N_data_objects elif self._N_data_channels > 0: n_objects_to_read = self._N_data_channels - elif self._N_data_object > 0: - n_objects_to_read = self._N_data_object + elif self._N_data_objects > 0: + n_objects_to_read = self._N_data_objects else: n_objects_to_read = 1 @@ -1626,7 +1561,7 @@ def _build_original_metadata(self,): original_metadata_dict = {} # Iteration over Number of data objects - for i in range(self._N_data_object): + for i in range(self._N_data_objects): # Iteration over the Number of Data channels for j in range(max(self._N_data_channels,1)): # Creating a dictionary key for each object @@ -2009,8 +1944,21 @@ def post_process_RGB(signal): ) return signal - # pack/unpack binary quantities + + @staticmethod + def _get_uint16(file, default=None): + """Read a 16-bits int with a user-definable default value if + no file is given""" + if file is None: + return default + b = file.read(2) + return struct.unpack("h", b)[0] - else: - return struct.unpack(" int: + """Return size of uncompressed data in bytes""" + psize = int(self._get_work_dict_key_value("_15_Size_of_Points") / 8) + # Datapoints in X and Y dimensions + Npts_tot = self._get_work_dict_key_value("_20_Total_Nb_of_Pts") + # Datasize in WL. max between value and 1 as often W_Size saved as 0 + Wsize = max(self._get_work_dict_key_value("_14_W_Size"),1) + # Wsize = 1 + + datasize = Npts_tot * Wsize * psize + + return datasize + def _unpack_data(self, file, encoding="latin-1"): # Size of datapoints in bytes. Always int16 (==2) or 32 (==4) psize = int(self._get_work_dict_key_value("_15_Size_of_Points") / 8) - Zmin = self._get_work_dict_key_value("_16_Zmin") - dtype = np.int16 if psize == 2 else np.int32 if self._get_work_dict_key_value("_01_Signature") != "DSCOMPRESSED": @@ -2142,17 +2098,17 @@ def _unpack_data(self, file, encoding="latin-1"): # Datapoints in X and Y dimensions Npts_tot = self._get_work_dict_key_value("_20_Total_Nb_of_Pts") # Datasize in WL - Wsize = self._get_work_dict_key_value("_14_W_Size") + Wsize = max(self._get_work_dict_key_value("_14_W_Size"),1) # We need to take into account the fact that Wsize is often # set to 0 instead of 1 in non-spectral data to compute the # space occupied by data in the file - readsize = Npts_tot * psize - if Wsize != 0: - readsize *= Wsize - + readsize = Npts_tot * psize * Wsize + # if Wsize != 0: + # readsize *= Wsize + buf = file.read(readsize) # Read the exact size of the data - _points = np.frombuffer(file.read(readsize), dtype=dtype) + _points = np.frombuffer(buf, dtype=dtype) else: # If the points are compressed do the uncompress magic. There @@ -2184,8 +2140,9 @@ def _unpack_data(self, file, encoding="latin-1"): if self._get_work_dict_key_value("_11_Special_Points") == 1: # has non-measured points nm = _points == self._get_work_dict_key_value("_16_Zmin") - 2 - - _points = (_points.astype(float) - Zmin) * self._get_work_dict_key_value("_23_Z_Spacing") * self._get_work_dict_key_value("_35_Z_Unit_Ratio") + self._get_work_dict_key_value("_55_Z_Offset") + + Zmin = self._get_work_dict_key_value("_16_Zmin") + _points = (_points.astype(float) - Zmin)*self._get_work_dict_key_value("_23_Z_Spacing") * self._get_work_dict_key_value("_35_Z_Unit_Ratio") + self._get_work_dict_key_value("_55_Z_Offset") # We set the point in the numeric scale if self._is_data_int(): @@ -2199,7 +2156,10 @@ def _unpack_data(self, file, encoding="latin-1"): def _pack_data(self, file, val, encoding="latin-1"): """This needs to be special because it writes until the end of file.""" #Also valid for uncompressed - datasize = self._get_work_dict_key_value('_48_Compressed_data_size') + if self._get_work_dict_key_value("_01_Signature") != "DSCOMPRESSED": + datasize = self._get_uncompressed_datasize() + else: + datasize = self._get_work_dict_key_value('_48_Compressed_data_size') self._set_bytes(file,val,datasize) @staticmethod @@ -2286,8 +2246,14 @@ def file_writer(filename, signal: dict, **kwds): comments: dict, default = {} Set a custom dictionnary in the comments field of the exported file. Ignored if set_comments is not set to 'custom'. + object_name: str, default = '' + Set the object name field in the output file operator_name: str, default = '' Set the operator name field in the exported file. + absolute: int, default = 0, + Unsigned int capable of flagging whether surface heights are relative (0) or + absolute (1). Higher unsigned int values can be used to distinguish several + data series sharing internal reference private_zone: bytes, default = b'', Set arbitrary byte-content in the private_zone field of exported file metadata. Maximum size is 32.0 kB and content will be cropped if this size is exceeded @@ -2301,5 +2267,5 @@ def file_writer(filename, signal: dict, **kwds): ds._build_sur_file_contents(**kwds) ds._write_sur_file() - -file_reader.__doc__ %= (FILENAME_DOC,SIGNAL_DOC) +file_reader.__doc__ %= (FILENAME_DOC,LAZY_UNSUPPORTED_DOC,RETURNS_DOC) +file_writer.__doc__ %= (FILENAME_DOC,SIGNAL_DOC) diff --git a/rsciio/tests/data/digitalsurf/test_isurface.sur b/rsciio/tests/data/digitalsurf/test_isurface.sur new file mode 100644 index 0000000000000000000000000000000000000000..2719726e8197eef500378a04998f2d7935ebf709 GIT binary patch literal 56141 zcmc$FbxbBOu;&7cyE`nfxVyvR%i`|t&WF3Zv$(svyMDO4`-i)`+`e31-uvV3a`)Fw zGij&Ow9_={{5q40Dv8L+$SaB|DT#@G`v&%}fPbU;;bh|Iq~~aFV&vptV6A6kVE=>W z$NwFHefyvEpY^wIKfZmgv3!vSFMRX+r}_{6_UqfXV&p2W|KQ3&N=`KkV<+?fHvMlC z3o~=4|LPEt{vm1WWMb=R;pF}wy*4)grTbsW1`N&&|9!;&>-c|G3jP=Whr$16l3D%l z`v1Q2|Bv>6+7cCIEApT1|Ggdm^PIrG3H{rHTPB^B$*;uiWrB7WgA|jKWLwPh%gf8^ zoK1QcS6BTOKOCz3{L>lF*U>~8x0$U~lh5z$9wh(Kv)NH2 z{%$07e^z8{1NRw2X>0AU@2l44M5#f0_@LPeOVY>3^q9u0J3!+F+IxgXOv%@h@F0ND zL7+P1YCz?hRy|zuG2;j4O2nvI%deg@C<5Jjq9*+29q)Hj(F{A0&kGx_+zxmb9$wT& zb9hy|jWG2GnP-f8une?iVT@X=8&iB{?`3hX)HWn6D~>(w6-{$S?MRs_wSERCfpjd{ zej`w~e3ljb8!_4dVjNjxtbpprcNe*oop4orHWOqK?-`83vMG_7@~>W6TKRh;#Cvf& zAs1i0%(q&2Z=OqGZDPIma1ZJ65Zj&1&R{Gms+Nbz2 z{B)Us6S_cY&A@$2q8$SP!c=>+ z7MBdoWoNi~6UMy_0(YEq*CozgR<8td7Trqi{UA_YL%? z(M1#fPUuWuu@O&c>59vAa@>$R*@zXE(Zg#XbX`X`h!z!-PX6D6<IhI4Gqb7G7t!0LI(8yNTpQz^MRH{+&k;@{bUdsS3|bo)za+=8TH1_w{>?Sdz>@Gx=Q}T-@Ihk?wlB#mr+u_U3D&*l%&I}p!jf% z3gF%G{2MC3mN<|hQ(ve>AE|uAY*(0>!EL?QArITZwh?#>tpoTlWGpRz3_ZR3d~T`X zMA&-j-JR62FB!t0D$)&ebn1N-#y?*>46=wPI>w_sHv*^`=;|Vaht!4HmgCJ-1Habt%)= zIVVC!^=KNta^PQALcD%CiH@n6r6Rcwe6SyO6kE$NC_n}rq>7aNl5LUz5h7q1c zgi|0rI}f2YIA$?B<-;G^y7Xx&VP|q(1gP9)-ApW_Vxc0XJ{DrCMx0+id(#fIatU<< zG5KrScFtKn`C83`wSdv~$4TFnvd(EdrlciE!E$aA`KP>eg3aw}-e4 z((>qul-Kjzqv)IQ8Us{0dEeP-?mE0TvW5BQc?H+^2C! z%E;+?)6!O=UyXwfT9gP3&cO4FDXtRh$P_91Yfv^q3j?S8kq(=b8sYBsiIm%~&VWgs z8vDM^o^z%=|8Ndcz8r&y=nLyQvPHxa8~!@G4(!_+51@^?*2t8*VOAh7L9kovM%6?jo#9wvOEiP>G9l?E%uAl zEC7h@K~8cc*M1nqI9r%79J^v%K%hrqLUuNp2YYVo~98_Jy{Kv`}RlY0&86weM zA@670ZzJqNZ_^-YVl+oDB{DkO zg1W&T4DWgCeo&Xe)HBgR*rAH*ih@~im8t5=j$zDJXw)vcS9i;?<$mQlL-Q)2MH~%6yn=ISD~WpGZDe8_`xcl* zuP?X-Hv5#B$2N#-h1@~LH%(Ym)0#WA4Cy`9Y7jF)ctPB1FX?dvpAD)R6?#^e1FE4Q6XU4mGnfEKy8qkD3gK#f z2*+m(M#>}Ia8}>wy~`?K$!>zVpo=`DtI2Q5Oe$)cNuoo{^m@ZGh;>XnHH~x_v={A71QA;%pnG@@-QIZBgSoJ0BE*ophzF6*UFuFMmKA`4#ya(CF0&gK2?;O ze_jVWr@m?|b(pBY7XrX2LTLQ1|S?}5a$`a-w%&Mh-Xx8LTR#7ZF2C$20W}M!@u0+uK z^(LD5DU{h|zz*tRG;cJXbgn&&WKxBoRT;hFas<RI7uuy9(b}O{eZFowxd= zqu@@Ildt8`uOXDS5v>ztZwC^~t)FiR-ABdt_sU*Kh2F-_(+Do*5syJ+4R3MuanLw# zF-WmzeZ-z1_%f}^es}@p=x7umy^izZGqm!x07G+Ns;*@oCI(gfY-1Ru@)osAY!UEq z6g5jbgusdRosO4IY)FzBufrmr`xkUcqL}>&t(XKax4*ji=Z8UrE+=UPG=qRN2Z-;%;k&+im{aA(B@hokGu-%;AhTEoQ&}Zr zw#g8fnbc!!P0&WLqzs;f@aMM^GAb=`<8+1yO03H-rUFBk-S}p2Vs!Un{Edp@Z?rr+WRg`XoZ*CcAevB z87kWKe@Dk{yK2w4twUtvuNx+rBB#ox2BOzf&hYYLpckv}ZmN^f*~RUCy5iP%;~H+* zKv=UdiL3jA<=^eMQgRAk>X6-Y8HGWMLtX2b1opX6qF_KBC`$`QX?hAO{tbl~j)gFuZZXpmuEm~s z^HUie>qs>9Q#J}#fCooebsSPEpW9@FjDo9kHzT@iW2NJ75gFdgY+eXblUU~)U5Mio z)ZIYlw~*H!qk(YqDK1oCH8NRgJnj&9!}qBZwQ+ViXVjiCoWLXezN=g?EVhVUXCVk& z#|(2Kb@8)E3ycoR`XTBB$EzhkX9Nt#(yRIUFpP6vh#P$hS^wrYqojNN6*cs)emrg; zPwuLwzpH#Ksl`j2Toor|-n4&O-(xte6ZemCjH9ThOjpn>!LuMjYVG597I_HYJ&M9N zv@zc)3-(r_VlhuOM%0M-J))sw`HQx*#wj>x08{|<; zvGh*svYcYr@ZU0)FQ}U>P@X*;I$4Bo0pj|L#r^2i6Y|uS$oD| z7@E{*=SnMsKX^0zPNXV!9b?HLHKjkd$(DXjZ{*C){ak|=G3jhSA_5GpL6G6Vba;<3}xy0&iFc@NK{>eR-%wMJ5e8Cd+Y|DrZOC6n3Lf5#UHH{kq*-V&W&*4Y%4Vfty%5!iRHLooXK zB&NMk86Azi&t+|hhz$?Dq2d@iZ=t6B*K@Bn9=ck^Z7(|oJD{hXOn4F(=+b7?E5Z3( z%A2=31FLotun^4IpAebnD1$6c<2&srO4{XsRXWfv>*%36C3i-R`Ela z+&e|I$ym8^b5JtiBB*UL}{-&(_J1^Ft*q@Ic-(~uQ3rpvtJaxU+YX7bPV%unaR#k?IbjPaap@YY2PbT z1$fA&oH*yZxzFY`+-f!4k;LZXa2>k(l)ZMcS1?cOPRYt`90gf9@QNCm*=z0TCm24^IL z4IwOq&a)G029a2gQ=ihq6iCR*sNY7n&EMA;f0Ifi&S))2 z<4fplSTGHtS_yqka7u_K<|c8Z8h$~c-(Rzd{tCeGpk1i_Q5r8vLLOC5YgqmVH^-Cn zcJi0=F#-0Y4iI5;y|JH+UIOw*)_v1S!!RdLxENV5}q<2>! z1d@4M{+EhpRQw95>@?AVurxyV4BqC_V}JCFNh%RR3{6`vZm8;Pkqr`~;KyJ_ilGKQ z?QT5n*t0UF@U0m&O(gluW1``gCBHN=_23nfL-6M$4GPn%5PQx2h_S*e@@aeU?MQcq zwTDv}XSA(DhX|7tFW-JvfDuB^UDPA%JlC-^KQf|G{)g{|!4Hm8_DuNCldQ zi_{)djXDv3$^_Zg%S0t@$xEXpMXb%bWegzr%zVz_{QmK>>&;SAxXf&sMqz0)c&!?r%PpPX>yKGtEI%K& zW1~h`Dq2P++~W_@OJ;Eu;YmXLRLsPlpoB%%@OWVtq(?IPCC_9;`KH9r@%O8XA@>v{ zP9p-pjATht9nC}V3Fn@uC0;`d>DAns>bazPH^ooBP&Frfk43lu9lpyW8s$mrJbRRc zqd@$nXJ?)G15n7oF+DRhje;08s@OM_>v8yAwNn*g33i9SI!ERam*?L;XsBHg-*=nM zjgV1QYZ96y3^G0#MR^o~KT} ztVagr$K_O-M;L58yKPx!jGA(M@Ld#Vx+$1D%Qs(HU} z(3qc3KBm?$!5)$#n=iQSZrIChe4U2NV9MH8If@&%<9}W{`=8RPNgp4CxH_mQyShLv z^MX`c$quIgLq|xL>dh0-;yRxCZ&BG|~&up9icGc3~xJI^X`R{oT0l1&QTYKRBp%3#qR0t_@IQ||;mr|8pqRX(hy zhk8g3Qp)Oj9IlWhti1VQ4I=60 zM@9aC0w;`~w|*gH+C*~o;=EA)>~N0lf=ZPFCgG}tbak#--1~RIu6HQZd&b9B>~xpK zQCfuT@r^2wS@8Jkf{ZCfC0>S!i_{@Ie7^3_tj3{&Io}0$1M%ZUy@+V9Qf}%iQ|ENS zV-UP0o9fNzydP7xD$?C;i@viG;Tb9=<@z(K=T6G7I`L5q=x0*9O@34T>=%F|cwL?U zK1Jk}@E724Nxw|<1Wo3-V~0&apa2Emt%;ok&W{tyPba@0%6lDNt}57gqP;5_VO0joF7@<0HO zL!uG~VX9Dpu|0_OKiyY4zQh!1>)#1TaNacZ5)m^#XbS(6S_TMG3(pKcV_@rfH@ zO?hPDv)A5hb`}Kz-=;E=7RLiRT8(0 zT+_g;v6fTnMgDP4Ox$W-ZZ8;{XtbM!P6h^%TTK5N$5ML9e$j%%HhJT3m=#j`ezqXT~sv~oyZ8AHH5s!%Qep@e2SU^Zo zEyfnSL{B?8AR4t&CFU>i@-;o+Ca4b92D@s@ml@6bd&FQbIx4;K@X*Sx63z==XY+RT$a*t5?(WzeXt8->=mIqLp{7PA1Q9YvMh2{XV3~ z`Iwal4Qbhb%BV?5(jun1EDcqEIn|cSLnlG4smtfW9$oomIUvg6{8W1C$4~zrbURz7 z%&@LAm?nOT^CBK4LFlAlL@~%tylef}37}t#RVk9BprbKVd1|d1=1`WXBizI7H|ByU zUA7=Ld}l~-Ss40Z^ian)iV|t7)q53=t1YX(XVFz;xTJ7BfEI}tJ$uoDpcd$il3pGl zZLsMLQUTn?@MP|nmoi%HZMEVJUD6pBJMi!}M;|4m3TofoBIod7vw?E8%LkR~&VSmQ zr_gwutMpO&uXnbU1vcEEtxRzYv}-yZUo;21Ink~lHV-weCabS+6Yv}st1~2k#<}=0 zF6W2VDm2{$|KJ<24#a8C4IF8#9oH*jF8cg>R=Bm&gm5k6oYfSAfc`*4q3#4mbK!ga zOCTT#@_s+z2D&DqQ!Mf3?bX10t+(&py83^qx%C=EU?WMb^}6V288h0&URZqWf|>~+ z{d4N9qqw2yr4DQ(rCmJYBpNE=AfKdl%fhXE){e2uLgk>z;gD*NvcLNZgrO8xhU>oh z_cJcupK3@RtFp?qgmbba&G^K@B~PN-Pb;9~3fr(^E}gvU!23)OpwOf2E>SZNonulv zuEIb%4+_)Gq*Xsu>8pHju{0>wbC+Wa26NN zP}9aQYb(B}l=)Fcl|LUDpvT%MOIWcw-=jrh5Uf7L2>jv9AhoI_??$RsnFt=nTr0TS zQw@9iC#)>Ar-H(=QTcerQLrl_XLZEvzeoeh%3*lN*#Lle8V)v2z*`2(7GCDynoIab zR(7xK*BH*C4k4kyh(4iFFnM$pQ;#F1?J@tp@14j& z#pU(-S+H}1*Um#8r2vC^FfaFZU4B<@xZ3+c^sU@^`yp@S#_h>heLSA*8WkLhkp?F- zcp+NU=FbK3-7d{;)hAWp{9V%hYsN_BV!ZY#&amFlfs-5MfEgvb7k5!`V>O;R2PXCN z`WOjX;#p+fXJK3qiAO|^>%xrqH8=aj4A|V$vch;zI_H(jotO=43yzh3WKOjq(##xL zwlbyo1(rAD<$y$~fknliDbKIS=3=#0vJ0Xb*XQwvIhyjd(qD%~!6Gq++RO<%12k>< z!yWGF_FD-DrSH^%%Ss~ZjB%h-GF*=8MX~jJk2QW7lZCD2s7ZIRP2N$q$Ev;4IH>ds zoBa@mriF>IqrdAn2#qi$t7-ByS7C=&ppD<0Oyfb{F5EyHsH=5B;)lRcr0*MO!9^o>#|EG-DrTYbBrq^(<+)hNm=DR_>LB z3uMr@trD4Z&NzY4U57Y8Pwl2x5@O z&ur#%Qx>@kTjLFvf5?`mA9TaBa>6jUB@EGdOd+ISjU}L#aFP85>dCfSy}uzPUh66H zmPT6Evn+J!q8Xt0io~Z(>%~nd{7{obMb zhw_hQ8g5B|!ULW<^|Hi`0Jrt~K`OUI$jvwc;L;CcwWhGv#>x+Z(U=NhewYH8uyG<- zm`DPUbcjf?;Wn0`0f^YT_#kW|NYR~4coai$lF}!hMA7x~jM-Mp(jSRL5l0?Xw%?N5 zRA#zJ$cD~X3UghVEi{X`u0=%90(bs<) zB-=$**;MVeFJP(BoL~D$(*9T_0F@j6#(X^caD(uwtoIv&=Cyt!Kkh*#nnAi1|5EZs zt!M;i91|qloX3bYByF|cG`eI$JedEg=eiUjJP2glsS}FQh&ADQ{~hx%$fAu$BGxAr zpHKAA`FB`<@oD44AqRrd+vFt`TwuW}Usor0nV9TbgKh)q;3>%i5 zl%NI8;1QpYtEiOvHJgTd(hH4YVSNkl`9%UfvR`b4sHh^fXX}ItxFxCR#qd{HMlC1= z3e#;}A#Zv5{%ttWyTRO0XUIYvabi?K;se2ZkniSCJ;9S=)a+p)$%Y-wRh)Iu)A&9P z38$!yt5NFl7{DbJ3l5pBUYjgG3t8XG`*L8`GAmJ=HFOicwf#m1`d;F3`O4u!JS&%DeMU?K!c1 z8s=zozEfxu3SU5fv*SnYF#yxXCQDed&JKwF!>v{GFN-{UAJ)Uh^S95;?DNJ?iH{bX znCasTS2x>NZI{)aoAkO-ZtR&!2Ao)eY+{Tvuy6%VcGwz)YXJv@PU-~+>{4g296Z|*ovguGsOEw{xe&_*B;+9>?mWxHcV zhBz2?cEaX;AB%qNkPapQuep{5?wTX6xuC3IXU2owEtj8Wz%ZB<^bZfjd}9v zE{6CbKs6M8s3jBLx%7Qda94>i&g-7k`LWZk4j)Lcpg-HJ!Av`e&2<-VV;NUuwd+v! zyYFSF?ZoI=*89HdZ-Gi70=lD9ZigNr<=$DFuNEw&?d0K<+)I>((rBE*zNROlc?){M zP51T0a`aA|ewdA-2*N$9pDMD5w4IUPy@5E+JrE`#6pzF=_PWwFDUQAEFXe}$n=EgR zW>Jw>M@7JJj@tFkxK~Wn^B^)=%}q8?H3o)9_yPaZ=@fTTu|e_E`5RR8^3WMKl$-rD z%)Dw^-VqqyGo#+odlNdUc^idN+QG8$nQas2UARH%!{te6SB!A?bxIv5C+{%L}HTgDB{l} zQ}w<2KrVmC!XnE9ZPx7 z2zZ+KX1j_y2)xmFcWF{&-j32Pi>~5+mG82Pvh-9UKw=KIz;~QTQm5@KtI-cU(QSs= zu#4UkGQ|Or>YqRgsTFZLgz1uQ3SZXUOsdHKZXYMHHPbM0jjVX|HYxIhZt+RBw^qQV zZLDZxEe_XFVejkqm~o?$=tfz`jzR!X^uqYwO9OgBsk43NhV6uW$(ITpHpzUU#QE~l zyOCo|FEQ8mvS|_L%@mCDtaUiX<3VZ+7Or9J~!Uc4is{{48-xkZUqh%x}nx zHyNeAmmV>g_eqT%UmisQJH~~C67ZU9yW*o)2bxW)o!eu#sf*_nWHvN+5{?uFeyg@7 z{F(l;B7yW3WwQQOaeh%OIOZgZg~Ca5w$+HtX3S@b8T!{86Zcl zRx7ZK^V07=VTQ3g^J6~FpI7|N67&3{kS)ngeokYPD zV9V4ubhUqx=x*HR3l@pt>?L(P8~+A84miOTdRlI*q6lX5XqwEe#QO9ew`L%OZByIi zeHSCQ)d9Xsclq+q!74i(a5i+LEoGADvaSJ z{GEyk6k^%4d_raY#`Yj$I-}K9GJmr5B?osqvitW9xPs`-v0rmLPm42d)%1@f@l&E>njKA4RY-~^^+MhA$5KV@e^t`wQ6 z&Yv7~2nXBj&|%rV!(o!i8f)G28M>VpfZ3Y-Uf$Gwhv3x7VNM2XQiLu1lAwwM#3@bs&unc`oN8M175V`tV3 z^=^ZkyteXBayUJR47o#&m>J*AJ(K=EZ<`S{`TPTJdw1w`VBM8 z(voYvISQx-voXk<G03O9JaTb)`fN@WGW-+PHabD* zA`L8`==XJ73(mCEf_&PVXlVSOnSU4fHPG;Pu;!VdIDE%6n5W*N5~+&f{n>!tg#E}M z(IbVe+iDjf#Rb0KPSIKI^_!fNGV9RI#4i^OKqCLZXD4R5Xhxxf%d6t(`?;koeP!NL zI8-&&_i|M#630*!XmBjvtYszheh|=^*)_M<=we{SM;9F3)thdcl5gCHT9HWVy5?7a znE+|0MM>zKD5f>O&S<;tiB}G0KhV2ow!?=~3d!zhg}7zq98)s((ZcIgexjF%{an91 z=#5c%m*HI}IuDAcsC4I)1M@pu(p`T)MtalNIHhKqa&y_iFmwN>>w?NcM9?3RpC9Z& zhk8bd?)V0~kYr8(_F*q=Lz{ku9yFW>G=InE^}3w=bb#Ex7%-a=KmK&} zQp&ztj#n;62&DV*>Yaob-AX^ysBESAKu3Y3u5~q(HkC*NP$cG%E`+ zdNMniMaJL!!hT&t+eeH8%lS^XW>|F{%fEJx)PzJRKkv!Z&Slq2FwpDUi8%EbZLF=x zgb&xvyhIf!cy;7hG$t4vyL2gqNBllg(%^wRjLpvSLmHP6oQ;H^Lj0@EgG!5f@bptL zINQTh7F>Z5w^-~1*d`R9r&M_CClD9*<)7Jk#wrpkQRlktG_;S+@*)hWh0if}WW#zOk+CwGq3zw2#>u;U_fgIhLT{aG zB=PGljsl8r7PvzSM*KF0VqpY+#6J|#-L?fzmU_t=u!xNwyzS)T2TAvuVKDDZ-4AV% z2MJ2q_cByB`3Aok*h>1w&50v$kkhUo6aF%EH#YmeK^;+W6?}JH|9fdt+5p%kjMvEs zq6XyU+Z}SeBge|TP2NqOr zOW4mZIhnE<*8wzm#Kr)f&d(5mNTDn{S_DcE<0~hn<%Y!TNXfh5{sKd~Gh#)x#8;oi zch|8;UZy@FS(o1dG73=9lap{v=i5#8Og&A1LM)&Mg($F`>^oySHK8 zG7m&RK0={fuRBsTT9Q4tKTmFZ?LP37r8|E$;Au(Z2J~xTbnAU$-PVaZYM6FEe=Uy# zOSp9}Py>@9lRCMn$X+A!W8Y51*aC(c{!7Ht`ili)h$N3ObCPoNGILyhHFjbD5Dq_PFyhoR zJKA96`>4ufkUfRTpc1Q&#n6g$!fdY&=G=_>5FI9jJ~69{k47iDy=dYR=usP$%1WGN z(vCWXe|;{q3~-4~zUBmoPO7@zy8=SRDTy&`PyI-OA@m!q2sh|_497;BkJ;MeFS=;m zXPTmn+7s&nxuV+yh}};6aWyx1{&-epROUQnf@pm1X1cW>6BcRs_`GW(nV7Y)$;73u z%~Axix$%COHs|^q?O$im?OfC0UJk>o$*D!qqBqT6g`!ClE!-w*5fQe3272Oe=Hy)^C#Dn~ zE0B_TV$#HPg)pwRYbG9Q1-HWsmT8vvKz~!&hK1rNu|MWOEdm2lwDGT=tMgfU|7F2q zc{2lo0_2}}$wEl9b|0Y0OX^I@m%)rA;^(CsRA%ltU)`T~O_GGurIrQ}zXNPJ3*nHm zC{%&K$2Mv2yNH&H<~xH2qR-RM07rtdf^)|hf=NA@-G))TDkSc`Gi}!a z>8B7aN3HMpE<**FA`06E^yzA%xn>HU!Lc-#vaBM=ORUX>=~h=qfZ-$<53%hc;H4wq zK|`!bJ;ZtGTR=hunwx=lTD}^4uL>q(1G_$tCNzJ_Z>Ep*@90;50FB}?tmyY7;R!#a zwTSP`&h`w{m_y!Ghr5SNp^_JCl{#2tJ#vQC%5XdDsN;8q3SNH} zo!Rl+E7sE|{Nd7qJp-J#TtfZwIsYC%c5~!GsYaRC`~@R4qu1o!CK8}!kto?**W~1C z`n-WL16_KYo43>N=*{;4h^zKu_^QMa@@ab82#&NuecXm?vaqT$fGwrIT4j>B^18a(>V0qIDg2%A4c39|Y6B zKXBuf!k5mwbgSA;fkpqfVBQNo7`pPHd-1PbHrGozJCVh6?uoqFIWZ5+-BDHF^=J;$ zLOc4Q1)MsA{_qP;FJ+m(`(HE$+<4);Vi+ia`=g}+rONvDk=K0AOuNsN9EyYbi(Qt% z3A=<~o=zb9b-xa@!I?Aamo%{!!wK!BzU~EOU}&%24}JBT9L3a&rJ+s}O*hlZpb{|| zFTUJl$405Lq_`;^*M6>reLUyCF%BQqp`9HRf2U@BTNln)zj*EaONOdIXB{Ocn?fT+ z=`7hNMFF-JJUC+1<(#9auA^uwch>qS7ST=om9xtO;7Usw!oHEY@nlh`M*0Dh64J~j zA+X@hsfHneu)>Sc0Hbc4yW%q8182nTtgm7Bn;y)%mc?sUN~9Qly^n3DS5#`MCcA|# z+`cZt0(T~XY$f&tOy^mW6U?U*{nlN~^8+OyH88^6PL)m!JNw4^-SaeN zV&`rR&Sl(uZ*dH24P$X;k|~r^XM4V3TrAuA_Gks7j^MgIXqgZXhglBO&==LZ{VWdz zOHwivW^jG(tLypo*_J85nY6Bg);iA+vR=5$XG}aAy$qMiqlyk19^YM<$0A$r*fR>I z?O$^zaku4-;#~wga3QQU)c4g^(+rJQ68te$4^ixz_=8Ugot|H?M$zOr$4=ichqU@C zwK!}Vj33CQ_yDn7BIfY6n)AMnGud1&xxrr^#O!Qbna-oheU`gT&KxuOm813=T_Qc( zVUZThPm6gkm-{_q@BX^+7j$GaBoeJ+pj$dzbyeBX?;Nio>sRsU)f9`md#-UL{-c;3 zL9Zp+;PG_dI#*Q`#wEVPetJ8LO|p0!;GgFm4)^;CJO<^YR!-$h#>V^=HA0!xD2mjwGA}_{oieQs8tE`1onlOY5piW zaG806HyQV|Nf3>aZT8GMtY9O>AcBt2?b0+JVvJt0$S;gu=D1^Y-~QsvFeLbJ!gB*# zlI=P;nQ7ohu4YF}--OE-%x(c90&?4xpp@k?b<6S?Bt#k<456VXnuDu;bC(+=)y}W@ zFNakl1^OOyYT$bzOXoZLF5sV!X&IYwV07}-Lp*;Gj}Sg{yOV-MuXH@_+M!f&**d~_ zLBAwU9Id(3ELCn2|C>&B%d$~d*v+2oVgp?S5(r~EhbfxMikqoMF2BL|5Go?m5W;*d1C z>M_i3oLcU4EkmGg-UCvk8^TSm`{*tKQ=q`;lL*gvGW`6qEc|H_?4a~=%@O2c69U)# z5T%rgAFfdhA^T$rYs=a<3g~GxUdd4`$`fGClPRPp`PKO?nRSvuaa}y>rsNC7*=S-p z{rzgt)!_edzJ;NMDsIOsc|#$|-;$%CYnKOJO<#|3J3XQq!5f$_dhUVdyDR3mS5GLX zFp~^4Axd@(!2BHEC3o5P9zc3Tw*6a+rn*H3e^yvX7U z()VaZl?j$<7n=RwT`c7;)rTv`rXht(^~P9W+m0X44GRpr4E9N&BcA+#<+)pt#7Af zub(_40|_Z*0=Z(tD)!1D?ft|ATtnZY!Br7Y4Rc3Yh_j+=Z9)iM;~kXKaDHs4SC_&& zPMz$IBKc3Ro%T+MA^LzR9}`ZD?++j0!U_5=J(3bhQXk%Jim_?O4U;r9s56mlpNa@v-(Jn@0e+G{C;bvWTa6vjn?dI|#}&URC)cx+c}p z{X%SzQDzdO5I**i8K}nIl`qFS+*;~I)A>PH6rlR1xH5DO=}WGYE76mo{N&z;@0m<4!IoObAC>=c`3Xt;RqX@-RYshB_-TnBSss9;lrBuPiArHG6 zsWR(m!y0Ru0AEQ4t{y2Jbb9seywgB+}r$!V?P3c4D7t7TZgtd z5h!)P2%IMFnl%%~A24?oMNR;O>AJy0zIxIgcF=d9CXhL!Y>BTVsAD#|slBlu-0o%x zX0rVVQi%S1mp=uO9hl9;*J~P}yXSUs#`!#;P(^!JG(Xo7;;1HN^#@*x?-T!7U zuRIFbR+KrbKEg=CJ&_qF>b%T&KhrVUB=T{ba1+__wqS4ZH)d}t{l-A#o+{DMH4VZ0 z_TGM337=JvquxQfsb7zer!^C|<5VNHDhPS1TpnItVqQQd$fK0%lBR9DI`44RxVyo7 zqIN)pPt`Mx-21Zj>PcjRpD)nA3+hg3c(gOyElMJPyAE`rY^$L=IzA;6AgesQ<|s_$ z&HU>M`m2N0wY-bDdL572!Qeqm$ykm#c+;8nXIK>GQ+0b=hQ5E3_AI63X-!D7C(Bp6 z#I?oM+r1}NKf91d=R;?FQ&RN#&xM3h8>1Ib{qI1RlAEq|Z5vgG@BAH;&YF%16%w+j!_)i9^SuoFBlGB7^K5JFsBN7* zi@~m9^{tT4MHt@a?^?@CdCaqWAzqol(jA2zd_ z_S6RK{$Qa>(bi+i71plgkxvkKPRfNXSin*%AOn``xEx!02T_FYP_ z`LI*_IODk&QB+SnBYCpB`Up7~!F74{$Q6xo~&kbQ^@bDImoEd`T zFUKMup~~k;BZx=Dil2|197T)sqFS&%NqDsmGpOk*T@)%@(aORNLJ)S|G-%wnL272^ zA3IQY2|c&lb-|g;SDZaim^0Pgf3cr9l*Demb0Ei!lqAu&l~Vw|J`6T$?>kFbIb1hD z$8^Gu{wq)9y6J$A^JE8&g-VduHejUoiH|O0vcxK*6%@dN_wIBvIEbSwF7CFZhRlzF zprBJu*_a#xcgUq-L3{sA5lP4|5JFd{eh42z>OPudXebSCB)aUdz8staf!@fz#Y1&r zv7yBo_Myjjuc%1UX|#K=#8=YtaCwD&qMp9Ernj26gMh)myr@ zdkg$i=^1xGbM&i3}gGJa}ZIM=OHoD)Wp%i2Xp?f=kia0 zYb##$v`Ucj@GeoDL{hWFUq|ocBm9I_d$vf`;x=t2w!oZAfqkWhCnr4Bz8n0Jzb3b> z9O6XdmZLL1lTn(hy*>`){bcIyx6Db-jCCJt)G=FLSvNkh&&+!QpbUmvU%00-@18K| zEgFMKPH$})HnCgD(YGsozf>_EH+K=}64^81ize#5{{ccky}xyB1#2WK#0eXtow4Gc z6JiVOu};rYX#J&q{KHFC@bZch)Tg$y-(R)-lA6b^GhgsQdNS)3-_Ueo0iPx*Aic9b zYED|hx6vN|TyTR%k`E?^dc#E32gj3rF!VoPXv+FxsG28+U3b9C8D_{`Ad9c5<*Zb) zg?g4QoE|HQ_}j3gndLIod~K0WOYJxOTy=_m&)2c{_x-}dvw_Ln6n2w}$_F^`{st#t3_j>v&3$SLLSN-}+z|gN7>Qn+_HI*H?KH#N4~DRuWDFG>6EML9=qSk#@9K)) zkWHl)GL4!xNTKBHM`zm*u3jJy7UQGCp_Y$F=sicaTQPW z+eG!-SLo6tsZXoQDD5d|-&DcKA=>!Rpn)UX)i8XU9OB>Aa)f0m2bzB4xFMfela$TH zp~ZCQp2Kt9k~!S&2pxvs<=9un?E66uKKC@?EUC3Ld#WR5xFR-xk%e-`LPJO((f3G?{YKC0}Eb5@0L21D_fr_N56tyGYJrp*#|` zfg3GAP?HWAX@OOn98o0WhOWcBgqP>^12+t1?OZ@6k$CGBp>Lyek9JZJ#-!$uezYXRk0 za||psMV+oOdbFFMbDl9qUN8}L;jkl;Gm)i@FY($i+NchlbDFr|Dv5vnWnmcJPTAxl zetq|V!}{!@LBU}jdU>1gU)`nCs)Ibbd{DF6zcKaR8>)&#x#!s4L zHB+itm)>zFS}maDXe@ly(%R79Cy8TV3o?B)ZGQai(s1Lkw`y!q@o z@FEpUve+%Jj+2}e#VlAl|FfhKUP$sj#ad&L`{{O-#J%B0PyAI? z;h~vy*DB^ew=4Kut(FC~?Od`~4Nikh#JL}Q%>qV|hS+>U9zBD8@{UmwH@tq%zQ-E) za+@*gyEvfS#T^-UJkfuyANF7M6VGnx2RE#ibHV(LcGz&%5=q%=2p;pH8e?s~Gmh=_!~k_agf8;OxPU+`4ex-W&VJBZ z>y7U&p3s`)AZjM%d957i|CWnaY+&A(i45zpo{DFaIpJd-H|k4jmqYqcjIqVDFV0AM z?}qE$U4&Ou{f-^_&v(F}32u0-=!q3Rp2(i&i{h~zaeM6;Ec$c}%3VW{I8p=p!zJ%v zF9mEEA%}h=+UZ?g%YoIEoKarDBfYY@G3^siy?G&gI<8%|xWE z@b5}JMcX>bz`eYgCwkS2Is4;>h1}Zv{>`$`6;w*D49ITHxTV0&rtcBro|9GKBtTe=! zA!gW`XbIV#_9(M(MAIh+Xwx36@-1+?N&~mMkW!C*+`(2px6%>cPwLzCTJ44A>3%rk z=nwsWf}lM#2-*4maBb~?9j@LeOmxNGcsqnPs-yi_IWON!=2P8$e6VmkN7|j`8HM*k zf8v&{jq+M6kxQN%XD>cSZ>kk8bukirHYsCGW}YjGHGD8V%O6|5g~R9Q42<6L1arbd z@w%HXdPi%(oa#8NuY#>d6wsv-=w;W)jc>~7@g|p3PNlQ-{%v-fafQ1o?yz}51}{AN z#pO>c`0iO9cU)@bf6M!i9Tx0yf%!K#OgrX|Ze!g9CsD7vH*PDqVY!2|I3G)=*x*IH zh0xXK+F9ZJY8%8_+TlW|Gq(NXis=7bFmAmwerDKW^ha~}<{IGnB~|=YqAL88-KLtr zvC;tWQiQ{LpiE00y@N;ndcS=s7q9bxA>Z9u$PiTi$3kaTT87w&Ri<&Aga; zT4|Kue1?4!&+|*z7fSoV?luO(Q?#Xz4bl!;!mrKKT;BtjwnI@ssYY_b3|*M z2du(*Kj{r~1yU<8RY4uFd(*g@->eW!5M1>K8F{o8ptk(>iZ> zw(om3pZUNhjgK@o{4V@|uY1;Wk(QkBix1zTBQz5az9?Zxjyw*YXy7HoCO*=ULwlOK z=*jGuZ46HvYXsePz#$I@q(60ml7b7?MtCAa#S>3`e313U2Zq~ypxVh7-Ryi|s^bBN zGYVFxd5GyIh1xGhRZKcNJf zb^7S>%nW@VISCHWSla;Xc@l`*ejzw)+zD$pgdyfmDE?j>iXx+qxUn-3b5wny*25Lv zbIgUN+QPGzdWzpUWBd=^99_#e9SwByHO8keX0Vg7hFh#LddErXUYUGGZhgU4{cjBK zry%sF4nZ!sz9InsosYzSM`lAe@g=&<4HuljQz?4Fry-pM%0x)c=@w-~6v^YzMj11I6N`V%h zx~b!D3so#WqYb@@1~`)-`TIng!Bo!*o~HJQ>F0esK}ce|#rDOg?9BSRhUl?1Y%s%KHw$DAGDFuS4@hP2WAxE$h%Tmf)I*bn zCcJbt5I060Q|HQIP(U5cU5dFaIg_T}()qu?K8fD2m7XLn$L7%2=siszCNWC=By%M- zal79yT3a-8=wvc}nhcHyO78WQGC1tn$`7IS%=uq2m-We^P4+Lw&MM)_D|Iwhtf7u} zJ|`%qu^j(9-jUtiQ|`olm&a!1SfqDb=oq+RvEies**Etb^Traq#lDtFxr zb-g5K#>x}D<-8F4+zA;UY#{Y$75pyYLA8e*@bEdCWb3%V&H(FsI3atB7ydEqfHz-5 zFmX^AHmwYWW?(2fUh9ImvymuN>4MMQLb3BeARhMeheN8j;6J3#u@>{}Q@4x|bk|7m zB~xCTLuZ2(aM=`BHyA_e!@1I>-b^d#I8tLI1Bqz zUSQy~E-g<{s9{l$JU zKe<`<2hEmbQNjKd2diG8`m_^ty!C)3+kXg-K;KVwOxV`M={K4M7a%USncE5*xPN9N zzsuBe?wT4g#~RhAiQ`|@F>`DgcYD9)XW0vU?6pslCof^*v^D(r&knxtwTCt_SD5$V zoyZy1EN4_yaJ-ur<8sEYB&3SA#$WO(8A z5_i-FyW{Hua zJmECJ37W1J2&^^`zC5YUXI`-h4px~%%|MdRWLaTuu!+!`>&%tK<&J>iC1uQAD7gy_ ztkL%!%<0TvvUyc^Su z@kUZ_CY&%7z8~EMrdY7g4E<)A36^#Dt`>-%X^f6KmclzH&Gn@^IqBTwz+`Wsm7Fu% z7rpoTLfOR|x5oRT+}0bTmwSo4ATrt;!<+r!U*!kuWxmLI;En59k~;gfJLax;6wgBH zF_ZQo%rDpR@1z!X?4bz*EeDi_d86ODAW4E9hIQj3G44Sq&d7wKR52X=4Lc$HSty(@ zcEtPRA&_4cf`=0VaB02|PK4QtS%S3A>UKc^w@g*gahoo>`AK5GwFRcDnhTBd7gaO( zxf_ak-1PsMpkaz7I`($Nl{Pp0Gsqi8id|qicpPdh6R^`Y1SR%HLa!^;?dkQ_M}K)8 ztoyBj&H8Hid#(b~vRb*zxmj@Tx)_)8gijVPAG^=S)n~cx{4FNmNfW%Asdm}CQ&PyI zkIVTewvvsTfARO&kJR7vn%!1q3V+r0=z6|*-pu8vffiF4{8ul7AMUbZ_AcdljGtY} z)Jb_<(&qz5U3kg&MNe7fFS$E=zU7*US&ZIOLS4;TsyMXq%1v49NRStPv3pZA5qjs3 zS5T_$b$F&P^x&fsBr)I34BeNTVUDAb=t=bJrH>j#$ytw9hl7PS;+7j>fsGw@9Fx?{ z(QXLy_Q9tDPy90Q#km|mteod7czYJc{&<<=B^cakv;9Q9{(QS1Zbb!PMY9*y$9rRm zxfe3DU9rQ*9n;NhP~FW0SKal5=IrS&J50)U6K5&1M@JZc3d5`sp|Bkpj&WL{NM6)Q z@ZQ}Y1VbyS6MPSbV2f`j(Z|)E71nE72@SB69XBG(6?2>1;cpNk$(M#AM(+faUj@OeN*`C2>WUhD zqOLCf>82(02%8%yST5&UD`n`#`Vg~gV=ZSiI z(4cBo=-0B>u|M;}{v)okyx&Rw{UDXgy5-SwjO5POXrYg18|!Rk@p(SE_FQWA%n;{vtHjwkn169bV1-LCwr*mW7dlB9^u*!|s1- zcyd7l1~W7fJylb9CVnl@5I*pKFX$k~)==;sQm*OXUn^C?)hMX1q+DDhr7SC_Ek^kA z${I(_-O!@pg*$KDF>YD_Jo|bh@z)>DUYoKvGK#%LZBVko58F5UWB(X`#Psopv*ev# zbHW!7f_-s4*$ck@9tb+*jH6xb@pP{}3Pw2KMTsj;`}soqY#?SW4Z)Y4p;+b-h8;Ft zP&m3XhT3#S-n$TdG7mwJT__fv4u*c8j!^yRBkGLo(K;B|w~%igZgaxLM9zDZL#3m# z;`fqz{?B~0Lfm$9l&&<#nMg_PSzv{j?bcW~!w&nrokYzt`n@aEhj=44um|RU-6UoO zrrEmayh96XOtr-PL#iQ^at;H6YuL3{AzwcF$gv)O_&4c4UUQn$Yle5f!?dF}_;k@r ze(dvuA8wS>II)HXzZ)s#nJx0JX85H~?7HMU*U6mXvH_{A-ImXW`ITIDyq?FmwQz6= zd3KmA6xPbad4e34M95;rvSu2D)Cvzs^SP%~e0Y_6@7!kB-S22{F_%`gg&er9mOHOB zP}x!j`EL}2zN%4Dt6q5}hy1I`m>sT)4F!^VJ5dEbJ(Yn@ijXpiWoIa0L-`+OUD=x7 zd={T7czseo+TS@^sOw;bm6eV-Xyt(&S>7<0)B!Ipdn0hSJJvpPL-BZb!CM{kKOZc* z6o7%-e9%qaA8R-H;IgI<`kwRuvs>Z2H~fBkh@5a$Pj~!#&l4?g{qd@IAUe7S!k{1+ zucvmw;XzSIjgCb5%Pw%L>5TJxL-EeO6NZI^pkLQeR9key`H7uid`^;6(gBxN>ELOH zQrewM;r#QdjN0*2_<^PUbSZm4_J5}0*(a<`?wb`!C(%*Hd%HHts!{ zM2*aJ9y0hy&Hp|MAG|cy|D4@OX@9hFr9Ad$$wJP*nGJ`FxLH0!%o6tmeCC+}KX^K- zQ0TfIOqSdOijw-kLKe^N%b;hyEDoj1WBciL!Brf(IExy4uQRrB3)KVuGJr}IGO1HSXjA<<5f=`nA{D^^=#b5VvhT?k{oGWt^I~>aQJ42^g z^8OjSV8B6lu?NMjQ4)^_`=FPf4=xP%L!_oJydnco^UWV0y7}ScTR$j%@e_M!l&gL4 zYMmrTcy+@2;1HxPi-4+J7kp`p!E60)_&p;UPr{?1n;ni%`#NI;J7a#tpSyosMj)yt z`=I*?TO?1?fmEMA4&Q`6Y~j0Hp=hN0o*gx=fWr*<2f;)x4OchSm2~@VZ$Zagj2(HoTdOt;+a!_e?q;zRpwmOX(av zg<0!1^TeQJ{>3l!SIOsQl#9O2gF&rKc_|MmWB>U`HAsEZv3&#tHKzMp*cBayAHN7$-z znM%PAXg&6&$S?0H{oWQcw-U#U8jU6>U*f`D)L7%+wq`NQvH{BnB zmi`#@*$0313V__!4%pPU6Y@PmvG_$e2E6Zre>U`huTC%2Kk9{yH$CxhW;apuDShvZ zzhc9&{(UDD%?U&7jzFy5?v0wYHrRes2UB*{vAa_`4GSOg(#~hB^T_4Zxh*{ML7d_{s&p+6h($3W3au|~g z{77$Q`qu^~_A8~F$xmKg^_ut1uJil*J^bs=Zo&0D^3NNpC*(8HwT8X_X<|Z3i|D;c z`|c@&)$!uEmhjd|SrO7WD`n-zzi8xVwE{NVzM$sai>w%Si}^KQ_#wEI`AYSiHNBRj zpSMWv`gSh1Xrb4p2Da-LaOB;$tRHoki;C{B@oFN!WMAgi-if?+>VxPvx=8AUeTOQk z){;fzr-}S&c7=tl>0EDHCirtP^$j$8R>%<_A94D!%bc<9E#oFsveT(XQJmA$)b6=obkJfP6^tu+irsO9_ILAr;nW7lK3i9!ePCtcu-dzaX~i73wOoxx9+%J z=459i%<3%sI#Onjyp|JmBQ!B!Oc?{b zZ!!AlW)#)~F=vg|=~8=sZ*h(iumKi#{GqRl~a{a*)<(8?RQe zqxut`K5&x5humVk?t8lSEa2NmC6af%gx=m29I~Z_%MLbkNIq!tIfli1x>nQd*h2%Tn@lr?3=xQ(6`_IOEBVXGO zYYuflT|y^>HH5>xX9SKtj)A3WH$0lr9a^_~AfmD_LS=fuv!M%~o{zvJ??}YY|5Fc2 z`+vLF_~AsZD<&+~MakB3b_l%A;^nJ(p>z&gyPak1=4`6zlrY<{klJ&;aqRI|bS%3^ zx7v5~I#(;a(QB7!H4zWn-I?p-ZXODmvd{pp>M>KzN(5_PY^=%N|TirKG!vBw}>X>w010(EpgwA=1 zj3Hz@=?HDd-NnkNd($Yq#BCAjT&DY(SMKL>QtU6*27jm4(GN_1_mz7-m9eK$HGL13 z@^{ELu3MYJu*x(1-uW_>rzddm$}{}0{yP8bn8EqB74*JaFZdX6&0_NCr|dua8Bd)0 zDYTu=YXOTWIXqNoqqKiDrBgGdTtX>luko%r#;K{Hn^ik2{>`DXS0eqNC2{WEJX+jS z5T2#*N%~m&OckB1%Xl#39dqlyvRhw>9zsO6Jz|0!1e3qpZDZV6XDB?z)n3MknrDPP zFU;_BtsRmJU4coym^3aB^(~!{_aY3|!y_?xWfYIGm$A`s zcaK8fzanu*ITFT+T~M|m6pzjZJyMH{7wZ-50 zX3kR%`7;s_V!-eb}85?*_+DmbrFHicA^u92#PVs}sUj#vcww5Flxa&6GUVA|M$}~Q{n90OrCCoX~#B*7|b{AE_ z#IO6Mgm)$yg7I@ER#J0yG7uV1X*}2+tBy1;MdS}B$A(o2o_0Z7DSh&O(#-iWH$|M` zfgdUC8<4{r{#Cp>zl>wcGC6zkMf#a<7i{_QjgL86{S_Ba`pAjKgkkFIA*>0H%H%k6UTmQfaSr^L02NSUW1*hS0N=|E$v9=4|Iz-PP)UP4x{;U>c>X8Jr84Ca71P9YPZ`{a)5fgHmbfy<9`@sH&{;(n zu`Aoz^16m*?hVXNY-d)RJoFx^3$}a4DJv+dI-`4{50+*GV@Z!NwET|5qsLL$Y88!r zp8v$u85g-K

{n-rzvR`z$>6l`Z#MDD|48m+PY~-Ux&GnBz;O z6Oyj=z#-2=xNZ28X&z5#nfaDirl+y^aS~mh9AZVwa-P-M%&g8QX{CBz@EMMkUE`?X zX$yN}q>0QS8W{KC54Wl4mX_$RNIjv_o@`>0E;_@>cDvc^(>b3@kjQ|qvs$jubEur5DiPplKhx&-W zXN(0aOcA%h3~w5BG4`YeRIbUxTUj1P$2E|=)*M5lEMaG2gFCU7!Z&=Q#1L~Tj75J% zcd0#G?n>%;En^IJX=V?@SA6~Y0q4!n;05GxyHO4ozbm8k%$3HQKzFN)*fZDpTnGG} z(HW-}M7U!bjur^A}1EhNgW3xNp zbE+>+I(Xr7l>??Vm|)XeeY}*_5&jlw9DI}clq(}%aB7z71A6)|ma z2Qzf2v&Fgn-cWNNh#~p$@J=aY^wj%&U7W^!LsR*FOagBn+Q4<%QyB7dIltuY|C&g{xV};u^bpOe>l`WimO-?YN{Uv#GW3{SaPn>Mh#;ey_!tXQXjIMY`qzo}B zm#kZk4&F6u;kLFK2Fc05I-!DI(O>vJ^)+iYe&eUJH9Tnw;m_eeEn*%RshP(gKl2$E zCy5Ec#dMV1XLW@ooSIfCGy-yAU-@`{vf$S}N`EH!jj^Vc^ncOB!UP3;`l^Bb8T#lj z+Xx>1Mwq_H7%@*Q1*<9Pv>7J9mE7MGP2lEhDl~i9L+lW)>4ZUtU7(ZY44*1z-1m1z zj;|A@)j459o+BDxTH)I=9fUut;Jbdw%s6+GCHBv$F#ILQue!;Q#}8@0Lz0(nHiVzN zo0uP4{2PK*T|(hh8i5%hQMlsW6{pO5;oaXc=>1O&w6{f}SUwV!+oQ2UB@!ktqwu!9 z3mP25arkFv07nIHeeKOpT?C5~CODxg-h%ux)N=6K9WRJ`SI^St$dRJN8 z`Js&E)3h-uRTq1HnnTCL4$dWxPUPTk zXLX$Mcj}*ASub9BAj{JOb0WOa#nc0*8(d&I*%=#bY+(FYAN>^NQ2e%l;y`>g+1%ezBOM4X-3F9;rTp=g{Hj)@B4m?sy3>-EtXYSj}D*7e3{#vsT! z2FKH)aN$cA1baonFEAQc`bMHCBmz5}L(wBE2o(*1nAhF`?LGZrR_2XkKRjUQ=Pua7 z&^Lp>i@Mmq=G#*DmZdk85-6q!mn=BUlSj+wD3_*AC_Bn@Lf`e#zkl$?r&}6+e+%v zICT`yP{-W!N-!@_gSn9=8m4Mv+<#h7?5_H*LScEJ+R6|5_4<-o4F-M?bOelS-bQBZqJ`EofQmJwKUlkXXL!mtEVsGRs7M}(;p3^y>Vi?FV2*BV&qgw zy?5Rn_I37Xl(7_C!7s7$SmNC#SfTHB{mDbtEG?$hZAI)mZG~%_y>YoP5S357B!8l^{R3k`M9=s(=1ssa_5sThiTLb}WJ zW3D|$wR)q&$nIElV-=*mo5ANZ_`gL@I3n%>pN&~UWuwV_-g_;d9zDtfcMmg2c>_%z z?Bd|7g7f&&b21n*#d$Ru+t3DLed>o)-3IYhmMb zNzS!KSFnk;zt_a`3QeKMk?t6ivOaqZm)sS{{_Gi8m#rgoJYJ`?A)lrM<-z3p)fF^9 z`;HnP;~6sL1Wip}QRPShhlP}Kl|~+`mwjSg^D}0rpXAuf@nRnNcyloohqcr5ni}TR z>!YBnA>1#S3;)&r3M=>xbA;t#do0@F3d?L~#M(*zmN$Ixy~Ga_hljxRav*x02}Hph ze>{uwL$?n;V$Ys*1}xoA+xkB%F?(IPQWw%)4O0KY=OrqbZ={7$;|!sG*iq;)JFfA? z&6*(88g~XJghOdV7x8@WPK`q5z8DxW8lCS&Lv39+eqN0bdwaVKibSq=IN}V#VgIx< zmidREZFeB9wROO-=l=M2iXV2q4M4Zo0ir)xQ}2XtNhXNxpX@Bp5qnrd4IVq zj(h52(hm#l)Uw6zosQz(NcqweJ^bb)p)YAV<0BKop7OuAi`*Exo$5C>G41;?_I`Ms zr;_e4xZ*Z%r#)fs|78kam~`h&XIB{n+>%9v6KQ?5jh8%|xj(m^9k0mXzt8fb4`10$ z6JzgaqNkoF4rXZ}MnMg^FaPX~DJs*z&<1U+u+R~_*2Ob*g-3Sx<3IWNA_rY09oG@N zCK3-TN%G?cp5Bzrsh;=vSL*}zF3IM%Rdr$qzyak(UYJ-W{9&UvKA>f83Mcu0q2;Y| zIykn|C|e1^h3aCiX&h>V2M5g1X}t|Rl5BDIkuwIHJHuwPtI+X`boYaGXb?WGmDJrD zAvjPSh_5EWf}1v0)gSo}{c!G$C(LWzF>sb6R8HHAU82(7>e47vk@HDC73HzU&{VU= zqXUvNR^b61m0;wQgks{G2+aHti4%9iP!t!5Ub-lh@$_LAgoRJV?d~n?^50u(S3csam*-fG-CVfq zAU`x+r?q~8IG__0b&@PQWJlbit!U4#wP6}nn!FMGMC zA=qcA5Uzl^cjVC4o?XA$bSsi|=FH#e863urtzX?Vvi+2Az7C zAbEu@Rt-_dmM~3Rn`wkT-u6hB^~ItYAqbfn0@)sg^@$v`~EwDcyUwVnP$$E*H`x z`ZXtcz2Qs49LjC3qm*rtI#(XQ*C`;OS_!#f@^Etg&id(3Zf zLB>fBY`^9qdbQ&%I^fOZ0GM_S#(+0L@Tus4Q8NRvIyV@7XLrCP-vGGE`5|b|!1797AKAbS%FP^ozMbyJ zT7*BpaC`=zC!glZ>`OfEo=(@g5)Kb;X5;`_=$6T2`a1=D?9s{*gNtd~^C>Tly37f~ zp7P&gxm2Gm$vqR~abTDh5*ljYz@ zMh8sI3`Cb90odmm0Bw0M?8$LOs)>WhEmXQFqkMTj_Y~iy{MiI9YyL=s79|Y3ZG)y7 zZ-grZW5U-?C>;@kISV?8-rm6r{@B*R4RkZd*ulVq*Iz{ceX>;rmv=RS!EG0zN7{PD z8+lc(7?WU)89K%|Rc0Xi7!%}8adE9B#+h2;=X8CHw9*wmc}0G`a4_N>Ud(D9BwA4i9JYC?TK8xF6#Q}qNv(Pa9a}&nTp); z&~zOPU8jnnr<=t7-$S`~IKu57Tgr2}>Pizww<#fgyawD1wD4R>Td+5!`{VB9Hgn^I zPgJbGOXYX3g`Zx&SRUq$+9a~nA7!Dc44 zJmXtGVEY$i1i8DTI>86llJhibLy*uAtjO|3gq{!NE<2!4ixKpfwQ%OYujmwbU)+}! z!G>bT!~cq$vAeeu)?Ba_{yAx%F=cNfrJk+T$JMdzm9Ai3*n1kF?X;fQ8M^zUq0j=f zb}~fI_4?1Q!#&;vUY*J9AK^W0`Ok9A6inB#MuOCoM?i1%3z3OGX_jr;7g z@&mu-mQZJp=(mT-Uz>SaL!vMe*eug7i&21ket|m`e}Eg7h8kBTfgkSr5+9?`oUuM$#Z%joyH@#GaiGdC-{-Z7QU&TLlF?Ovs zrd$PVB5PQ={}**^N+=WCLY1)!Lfc%LrzP&|cOCwCb)}w;%O@RCFxUl|Yuqrb+yh2k zJz;;%69blbiMg*-53bSQ2NeOnn4an@qeCZ{v?) zy7)ES3YiUd_-m_;*q<%Ef43aY;TijPbbpr5=4A~Wxv!O9tJ*2$58eHs0sH0pxKnS0 zOJntMZSNnxm(-hOeoX@}ciE!pMigZK*#v`IkGN|54xW5^mOPnA)eToUbmul+3f{t& z9ydiD9WW=49XprNw5Xb9t+nhwwVsbYHgnCKRwl?+^VO&_)_Paa=Ry@1UajH9SB(ty zY2)->%}gEA#FC>m{Of)x6}9rX>UO5!ABMT)GxI|Ymm0Tnj+X*Lid6(7Ix9^ZvFQf5 zmu!Uf{>CueWq<|4^ssi14)&x;YOI=ex(>+W8~<0LXYDk$gwi@hT4zYl|M*79JJ4W= z3!{xNeT)%457NescopQA$zsM($#b}-gxy1x1k1jxN(oE1$U@4mKP#^!cFioCX@U4u zTbRZ>;`SD2d`xwM&u9niy=9NYtM-`k&o1eB0W0*-^P85~Z@I8pQ+Su9 zF|#0E4#5@5`0z{(u}(T-Z&A)eUBPMjcTzQHhTh}e>6iFu-aE$bC}gjtzxjI5FTrF> z%#?xO2~9NTYY9f@Dp@%=cc@{mq#jDY+sw(I^pW;gI81x)LI=M$G+ca)Ay=+5c-JEu z@3_sO8;`N@914&*e-9NlARU3W(Q^W=LHV$p6rHwT950()e_rGoR z@o#_$hHNl`O|B7!x?3PY!v^;|m|}~G0q(5TLF)gskbG1NbK*1*@KptBMWolD5`MIP z$LD@IY+Kt-DN}#UI}6N;aKMvFORSBwM!2&nwmmV#SOa~WTV^EK{?c4csu7*E(Hq;J zd%`i@8U8-jxPDa=QhxpPxCTn=BdOn0$}pGiY{>I5LML@AeD+U+)xdwH#R&XK1d@N~mh;geoo8i;BCZAOv81BUt? z=kU}2G3M_l^m9mK^qFT2RDCYE$HjF&*tvHu$F0xgj3!Aed!5E+pKsLP`kP6IO8MPha2N!rU}ldTViF2IS%>T2yfn%0&|4CFh#@};g7oJQh+h+f+OeWM`5T4P%o3Dg_dGRto zEJ|CA_QwxtJmMHXtV^QPG)c|aBa@ppe&^6RIdpIT#nf*3+??^9PwgLaWrsujYsp!< zhCZeHqi<}k`N``aesX-_S30I=vPa!F9+%JO@h(3&HX)sbKa+_3yHwnt!R((|+~@s; zpU=Id;hrQW&ACp?DR)`$GDC32mVK^eNAq?ryQP4;uXO}JN9x-+s$mR+M@ATEXCn4% z`N~Lg8y{^v>Z&DpBNknBFuuwN?#oP3@z?@Eh88dzX@#^UHb^~Yi^g1QnAckhf3#{Z zb0kKZ3J!}jH(dR&MKG0Yqh9h&>=zo?)$m)UGG3fj6T44S6WUoEQcdMCIb3@wm5(ED zFrY&g&99Tw3?=nRfE8RLt#E&*mGG8$jy6TnN*(-ItcBM#%2<1p9QL$^GnBK~b<|_( z_RHYJfEvcd|DU9@fXXWC!thIXcXxM*-Q6+v*xioZ-QC^Yfr$YK3L=V12#N@j(k;#V zfA{=r7Hej?W}MIO-gD00XYc(y%9xz5fbP|0R5pm=nCi!TYn#BPb=8zO@lsE$#6jC^ zmCE>ixA}f=7<sRB9C8`M#oI+jyph=Cc2@ z688UA$m1>9ynF1c;MZPy9m>pTS+1^G!hK&YG1g)caP2dX%3b2wEsv;hJci#xf3WOK zwwN1|ynHGFg`99Eoh6;0v+e2=>^15<^=iY|zB-;8G7@-y&*NOzs zOuADZsi2A0zjblyrWr16F~ynN#<()W6bkoE5ZuK`I0jVz8i|?xs2o!asW8LfaC10r zvc|YAwzxXi9*N3!I2G-P);k^Gx78Zy9=3=%V~dDBmZ+X&f+g#8@T-qJ8s=28Usf)k zZYbs1AI)qXtO@liYA8}`Vep>ceD>!ZXKx8*qUH@gskk8+?vle?r9uPOe`z5pNEMTx z$ziOG3f$zi(EpJd9vqj4{8BkQZ&lCJZ?if3@>3Re+RhTKb8M~pO?Y#{6#&1ydX~%9 zn6+~{^%v!Hib6HJJg%e7+3@)qv%V=kL!OINlTgSaeN;!I9wr~-> zy7G#RS3_x3`GH;@KZM7``eZgOJ$^EWi8Rmt#CvjYsB<@rMsBg3I5&-zhGcq&ei&pI zNsYdH*e>i01Ioih9V>U?9|rgr)5)ipHhcf@+JEVcO8-Ozz2xn|Pna?{lB#RoQa$Mv zJvE-O^MG@_W4@1%hTIj*+|c$v*!J9e<~fFN(~eh+s)^@?&)=E$Gn#TYPq6Kd*?ej= ziDO1?;@*EAF>zi3f6G4azc;J6@dYWdWLLH}gvmD9TJ<#-cd4uXwLotLGkm{gjMC>O z82rlwKmS;WUX75umawj~Ky+IhM69&Nv0w-Ek8#F&RaaCsJ3vwP+%0-oW6&R4>}+R) z?xkj8)=e#69`1hC^cY#f(Csxe8z_f+xPb0yztrE$QaIF5Msff{>4X)^LCJzTNsO<~VAsMl zM)gVMO4l?7x@L3R`W(hNrts3wr_}0pngQ~s>Dm1{Th%|~5tSG!#UzQ|-5oj2lpKI@ zQ}tosZGsJREih}fIpWI9a5u>um9jmyc7ZuYXcG!X2M>G0fY(=ee*8Oz zE>7j*{qGp(ewQ7tzv0Gxzd0>9mmRe~(PPO?sxP?5rkzO)cdKXfb9Jmapo#Bmlrd>Z zGvj;K09Qb$zjAzf4vS7kh;#a--A8WRQ$}fitiQQB4!l;!h6F%fKbOzcp777TbCmlT z!iAe3)9c15n#*0~&m&PB^5_e1_y5Y-?Voa5{b?==+QZ{L)^hQ#xwLUwEIWtyu=(&w zK3#HBeBURhM|0ndCxSJ3#x;c)MQo1Bnhv+NJU5y@r@mp0#WQvse4GD1d`Yk5UmVs`_PuviMYD&VUHM!AV}YGF zEyO*G?gvve$j$;OS4e76gL|O{eodFfhN}!PajB_bJDjq#Mb$1Vq$t=RzrY%CeQoe_ zl?{ro8VSFyRL3-49Lrrnw>hRLn#y}BDR)i<)ms0rIhB2GCT~~sQ%;SjBRMxJ;cu!Y z>RWVCp{#=BFS7l8e+qlY)6# z9HhKyb{yc{_OMbF4Hk&%mh1}^|&V!Q*dG6UKe*Aic7wj$x z&q1ioXF9tiGh}uwUp|w?3Q@;6&tfwN)gER4uGjdj`XP(Dyvgbd(nEuJt{CK#Y z3v`=VJWTf8U2hV6u_(1-b{m()VcF?yxfRd%6Yg>UhP@ncDfJ#Te_9bTR6O5>B_&Q|DoYaD%j;ql)}3y2AM= z-5u=kF+=irGvVQq{O|PvR)WW=aY7rjk5qEu`Y)6>eaW58$*d@^rQ|J@{1uYFx$`R> z%<|U4?q!B#(xoa zjrDt*Skzx0`hAV(g|*WRp)*n+tz_?#G-ul_rh%(F=ZoB{YiT$aZ%bi2 z_rI+ApoZby4dCpnkH8!1=w4q>zqm~1)TJ;h{HLhTL^^$?+{-8tgGs&NZH*q$*x?HM z6&&JR&mGihSVNt`EBL(kHpYAGXWZiL4AkGsiJ^aZFt>&;L#tV%R7;gU_00QH%SUs{ z_+w&%@L6Q4-r>+pSzdTBnfA(GcyIi31~i`I?e-@a-u@#0dv=v;HlJZh=n=m42;r2n zZ-Uj@)wn@8qc%k=38t6nIt7ee4^-LL(jd1)^pGyO^@}_6qp3UbAp@&i&BolF$exrvO>Qv2fT-!?c<0YPtlwWR|A=XkiFVw=@8ETO70_i;{ z?NrRiu77E{S`PbusAJq!9h|b(M?AF=o1%s1ZS*klh>`I6Nc&v7&Ps4QSV!efxm;NC zksZ~dcxP0KaN!+D);x{~_k=&1_S05v zpIDFP4<5<#jA!(AdcyR1_ZhS3D&J4KK!t(lxO4JR+Erf>xrwx=Nsg_Dzh&RqNO{cj zYUV1gAJKX$cny^-uQv}@jeJ?A<(W zhNCG4qTl`g6g_lT)xyAqx_FbWhb3wz_`TH#4^s`%wAc{y=j$LvNe>oJG~v=wUGz-- zRhPqsxEgkEPUkU=r?fJ@LiL0Xyl?uK@$)pq{^fhq9OHgiV1j`OGJJH9V4;pp#Vvwu z>t!U11=}2D$hgr=zcQZJryXYct{1$p>7$6p<$px*ahp4A2{}gpM+bQC`EeQyk;Nlc zdHg<34F|8Pp|6T6vdR^4^&TMA`15z=ae`Y4b(Is?@4-vz@w+?h_}{6cxz2;?W%;FE^=sZ zLNPa%D~rJ-zV+bMjr@=V)ZA6W>{OZ0yNk;i_^6N-vRr82*H7&B_cx`wL6eP!aN|fmAj!j9 z7H5X|VWwzOGR2?SR*2|oEBb*7FFK=ewyWs%lniguIXQBpXsVgJ5QVtgrDtKf-DmR}Fr$vPTP@4r(H9mxkaV z9lov%iG?HeZB&=bW6o;@3>>C}>z1m5moITokDoS%UsrP!UN(irBXg`fU?QB3k`K*g zuNnd(n|ZFRg4ddI#AlU!H7E4)Xp&XLH($y*;Nx#vk9;9c0nnWv$JEJsN zUirC~725u=5q*4xwRZTN;E4IDE?8UYhKqw;a7e)gqb9it=F7`iFBpFC#_r8tNc!Z9 zN7jCrGu#tx$9rJ8wkK+zcwoeIS9I;{jN~*|+&%4%h+}TJu)z(fF3$L8oi&OFnPWkm zGEzEM@t#)(KaKdtzs<6|{%QvAS^nS+=TFpekLTFb&vaM%#y+33>At;;dN%b`c}05k z*AkwqQT@y@YKjHgl^I}fj4DR_sif-3&z#%!I4$KS@$j69G|Sk}E@sa;c;i>TaQx1B z_VGME^DgVQ?xx}BHN0%PkmED9v#d=F6BCt1-<_0mPf1h7=IL^HFsqU-TR!q&*K<5N z@IKeQOX9k-1w5==PVY0NOdL`|mp+9&JiLsOJI=d4mou{SXdYTl1NnNE^#S(gDG5L9 z)ZrSikCE*$BV_OKF;$p5sA0<@6?AJ-7WXe5&#Iv1kp{FksS5_Ejg~IDm>6NpEHl9m zmDtu(T#W_O<(I1gbdSpRkV*15Tw2PVMkzd#7E8IqschGjE1VIRk5zL)QZ>VOWbyl* zP|nkS$d=z9**T+}!@8?t&nW|3R4~WiXI2=#(gqt^ti_DH#Qzwx+6A}fyW{g654;R^ z$EjIf(6sT#)NKK<9T$YQ1^$SL3qrA~KXej&P&LaNIh(z)X5jy8gw5sN=n&+K!M;97 ze(#5+nm&jg>5iIA2iOiagwYN;d^Z2isJGX+rS&D=`w+?0$`5>f_!%=FU*e;ZP<9AT zq`y}dSD0tBrs6wu1}3uW{CrC8(9c~BaOL>_`_{LfHbwmaT{QHl=ZkY`+?Mr_^X4w0 zf%(6TRocJ=zpFTBzICvS}X&X^x@)VutIdT|#x-D@wc=ANf$Id%zSlh9ZYd;lH zZdW0r;woqr+Q`^NX{)XlNdTl)v*&@MD^fyueu_-?HAWl9ESJ^}4Z`^)DE0D(1nfI>}?r zr9Z5%e90B>*Dz$w2*w^-$!D%1obC6G3&L_)ur;4u_vY~6rtciJDVCp0U$A4m2 zgHk^Gr$ryiJGAjzUq{qR7w(e9vBedPU!TthYYOPTp^64~n(3A+2ZQ+vf)ze7t(m>{ zHt^=h5*DAzV=MPeQPXuDUQUJ64UGTVLMh&RcU1xYUC0kzn|b`&Un-rgrNXdUO82Yn z-;+UZYIxFD1Bd=-!J?P0m=};7O`eZTaYo-n_;V$H$h&DKD4MK?P0N*Gv#ppLRl<1l z`9}Hd8_s;O2qAtmJx*wUgybwJkLKqlDzqI%qb~MXK-r9Z*M& zw6SQHI*!+A33rTIoE7?~dZ5iUZ^mdH6?ms#3n zKUY1yO{3n?{Al!+1@UjV{%#^S>149Ugba?F@{uKq4>+*Hbr#C@VyW+KVTvZaEsb%j z!W?!VjIr&eF5K=ZK-%xR?*GPD*X}X*(*eOq-aYuOs9Pxbl=IEQDoQ;NqZXFXsy?4V z?|(6Q`xpM6-9)KZah0k*x((JvQiKlf2k2nlNfjjRRsdx&nobNL<*$-op+!vvUh`D2 z`MV0F*}scD>NsFQAwz?|&`S9Q1H(Sh@Krt&e^%4@SUsoRs2BC!XAR|o1?8`BCRQ&o_MKs*R;wb-Y=4wk97`6}-t&mCDe|Q$@6<4q8VV z;7^{Om{F`t)WR48Ik-4f@St+8;9hs>pp6-0Y?1oa9aYc0u%a;#E`F`iP}de7r#d6# zMF$+a*bbA<1jDE_2tV!padoz@sQ*iILeGD=Aa{}@)CRf1F3?BtJ*AoOs&F@ykFyf2 zvOUpRw8w4XTb5?9UG2YdeA91!*qO_^IkMO_E}esV#4;r4HG3w$;)(|!xHTw;#=1=` zovJPJMr8wIY%tZw{>AFTqqq8MjbL%=H+{P_TE3t82gKl zY?Ihy<|pP^ycM--)%8EQ(5#4)%<4FPvl6Zzk>${d2FOp;LBMJwY%{VDJphu+*GSI< zQ{EV%m$5E9-4(DTqJ$k&;(1Ex47apB!pCnPQT5>$dhagaj30G0{UncRhKfktsDvyz zb%dSQ#4>$d!A6v3QoDZBLYp3n7&^LwGgij4XYxHpc8{m;c+xW041Gde&`aMFeewga zP_7M17IZ*pPCGHbSNo_9@?HePq9q7+&Aw>J@(`S6srP!|J$2zVlf1KM-Z){y6ffK@ z3lK9zzH@A`e}Eo5_SCb}tOO3c5JhkQZ*1;R!Z71n{=4fhGb}1uIx&qCOzy~@=`}X` zzM=ev-@I#8L(Rb&Vtzut#TX~g8sNOWreJ|ccX*aFa;WH(Pb8GkctauE-Y#I((_#+O zE9KSCWgN1zihk{Dc>QoCGkezX!XJoQNkIBEY&+?U{!4Y>cv%Y<vF;7vfOu89{W6!FB4FZ|LFL7PRNxpi(92c%WA zU~>&07!`1S(Py4K@j}eSjXZgU?zQ_E6n&Pf%ipkb@GmYtRn2SP75?uB?WiX_?c3%U zW4xm!x=pviz4tc4=_~d1NNoL;5ekspAf4AF(Q$qVdu3naht99KKkS?63%_JpDVS9w zE;n)85=FFeR>Aseb<94X4Xn@>-unBgYH*!H@xJ_<7sJMlA=F7q5dBtf&lusWgPmac zovZUk*|b2oziSQ4E^X1%s12SBZ;NpMR#>pT73P@uL$8mM=v9@R@Pqp%iub=MPL@O6 zwT8(w54Onc7b6_I>%I)`&7{J%%faa1MC zZxrx%$6s7CJ(B}#b6EW|mm3T-=unr%m5cv!#XT1+y1yFVPTAprraCJ9)bVby4zj=L zL20rPQsVV7=A{AFChFpDma7(Q2k`*7DlBJT9O4iZ17mQ8ni@dp|kGM>%Vl{bY-9ht$@*r6Gu2Mj@WlEo_L!bV50@hD z*`f%`IVN~pV2`C6U0~Ycj=@hoG2>ByV8G6yFE(8B6K)+3bsw0@*7bymj@YqB7YbMY za9Q^!Y@Ha(X%P*aX=a923!Ko}&_i%zi_f?qWV-{jPne-p+dwdPY%NrAsJ|Qv7q)PR zMH8Pc2PCf1qPs;RuX?oqFE9L1MdC<(G__d#pH8vu;g^- z*D0DY>HGQ5Qt!J7HQ^ zLmX|8owFS>IO}==&orq(d!;3;wLM@c=Oy_2Gkdu~cclx4YC8$$yzVAhJ~K)ayJDKz zb5<2k#TWDI<$CTM59rI{gcH^(Fm0oSsf&z7Jxk)14jbi!xrR=d7;KGDS4~Kc;oK}X-R`rk0d?L5ZGgZA)o8>sKn!SrmP(xdM-fxSIE9_A6&;q>==)*i8V*Wt#g*vsh zz+($XSe$f$LAfPnrRoW0qV!B>OtOUGJ4Y;c_QJ`NJ{aZT1G_P|x}(n&SNzm; zK;Qy%SiDk2yIyjT^N>U19Ysv-pboG1nvmAx*YIBqYJ@9ipxnyZSVmg=~5LIv?5ayU}lBx-s4nu~b% zMXI>(HB@=bY4<*Jd&FPPEK#3qu_ zwXs2mK@M=Sa0Au7u}GMjMkBX}~9^iJ#h5 zaCAbcnBBVeF^jH$!ue+0X@0N^XZH|2*~`2cU8_uy+FZ}pg)KBz12#NTfaFy^y-5i# zpULCq?G{eUljV+frL38n$!n|P=xlV0Jb#`uHhf@8MHQu9)py3Gf=}A(q6v2Iu|SZH zv0#eZZ!!?PlDrlboIJ0DkxyHwr&7n{3$>IS9r01h*gi&2+{tX&XpSH2t!0nb7Sq4m z;lXVi?3-eX#qexIh>Duk-QOt!%Bcncw^Er{AEn!YfoY=PN(g7ckhXj?%gHX{#o- zG;5<999!ivfQj=dGmwG&@b+dr4OXH-$z*8-8&j;P)0hVFd;kITTNNHwySe=-&ylG#+3kNgUrCAmuTSRj1pyzCt9Y(X5bgn+sBFj^B@i@-} zTjR`O|IHG`zE%jFX)T;7Uk_SgrM`t=;ot0Ugq82KL_AT~MIDz0X(GBr4?V6K!^7MR z#WO6h#?~5F)a`Jv#ZtuKk=?C?N6+`Y306Ea5}q)r2HLn+1v{Ern7*fx&*s%K*rY%( zwr@On&YTqwxP8h4{zwR+it9_JR=?tZlWy_X%foEZ+raqk6S;i!7&dlU&gRYcX;+s* z_XVXQ{@2z~giaR)NPJnTKH2iDj48)T1-ta1OA?zEQuu9o1*Lg~-B&DyZ&UIyi7F-AD)V*(>zLlI|A|5qOs!wk`}LRa|Jf<2T)zONnG11~*K;@vk{Y%0rz zu551R>Jfjr<*@8$bq(#`R`B0>MLaejTimm^+8N8kMvpnS%}svU{(_1w->H;T!|Ew& zm^9S@*A2{}pl*dPy0XvP$^n%Lw%FX>5%zxe*n8UnUHaQ$!fV-j36TZ+0so)tknY5l zvkkB+%T%1F!5?LDk+L0ZJ~^PF$y)eP%^R(RKWcY~A!5$yL-V{k9tW#I@|;WVrG~x= z$V{!{h!JHRx+;_YQL!|LyUm-)djz{8%I+Mk?Qii`=yl#TJ;T@o+nF+MBD2am^Y_qk zyc>Ce4?84rWLYT-@6~aBbQ7b-)lhq32A6$)MT6bpeDdEH&Oh~wdQUR>WKRJnWH#~K zMJ=p&Z;o~?&e-7TgP{gKa5*1{d9Q+DygdjZy@KKG8vqwiZ`5l!qVSfX*heZyq*HJE z7h1;Wh&-Wgh5~l4RfY3w4d~p}f_FOu!Ecv%eK!gX#NEf~Dl?p%U?$vo-IXn%>R~Tf z($e}LbIMVinb8wXaZ%kEJ_C&e%X@I30UBSL;De2&U;;}q=gl2fB39|&;)ovI`e5eh z%h-O^6TkN<;KI@dUiYlzU;kp-h35-DgXE?!FN~oE!$m*Aww7a@d2AQ6_C02;YqoHn zy$R65{+(tx``HR(ciP~&uY<@bQ^(8p#V7}8syX2HF9&>DY7aYWTM^68-D!nIUo3^OJN*~fuf_OoEm5x(xff!RGL z^GEDbs@L4+B#$J@pDpH{xz(cIa?pSIR7nV@VV^@Z3pmLVhr7(mdd-mY$-L5`Sg-}} z4%Wft7IV}sk^MQ(y5rD3Z>T%^LHAw&x-SgCneYI?SuSaD#n98%kUTsR-)h3m`>gJH zpI3vD8QVdY+rL%9ax+cXHmPIaa!owkt%AkYz{Qht$oi@xdWC$_v{ABL2~}T}uyeJB za0s=nw8fP(jwsc%L&p@^9$9CIhjZ0MUqJgnbqqhO50#E)=&C5o7f(B2p}Gq$+B)Hr ziZcqG{PEa%7*-#+1-X1*+_NVabgd9-#x6 zeLZtt?Bk{Dw|HdIHNI0k$04U~(o8#=-*=``ZEh|#eiU=erD7VIWpMnLD7LG*L7PG6 z`94gTA44V*#wBY zVUPK7v&iofRY@31u5O)h6=eGNRWwZsx_J8?gDL)${I%N(=osV85`9)GHZ3#+G# z5t?7xV|*7ERF87Q!v}8o^v4w&3xaU5dN>lt-+^iuKa73U#1mVJIpnJBd&qsmg0crJ z^0~oB!w=G`>tcRU+e)Q(hpDE1ln?jb;<2}%L~Upk$hVccxcbK!?L91UFvA92{yL#s zFMBlYbU>$U2aFulb#gYh{P4?QOAaf)y@4Hpi;l=4crt+uxU%V@eM@ z^h*E#Oog;Zq*j_?KwlHQ?V>AY$+ONh^1pAgoIbgPKd+Ut{oyL^eB4Nh5%qXfo#-ht z*_Xx3rr|t2;sk3ZY-hm!BXpm6nWoBD7}7qJg_mQw(drY|pNVJhYY9|-oG$J|Cj0(k z>nUlR+3N>yzWyQZ&^LYwr=2X8=z01CHP)Y?$FAorcmBfJXEPaTP(#V-)#K!JVB~fudY4P1a^fLB3Wr2(r z#^MY~?4K?2$f4TbxUX+L^%M25+Rp(cG4A3n`oL{(sMPvmaz;D6u^5YJgPSnj>5q** zb#yiR!yel|&~mhFt($D;w)@A$+Mn#ZpWTie0BFm{VYfk@Fm}SKkq{9yp`;r6WdYIN<6Q2bfjcp--wUR*bR6 z;yX4NQfP&9X4c3pwLHCVmx<2Ex9i-bgF=$CXq5P{{HVd|1QL<~Z_+beZ;Em}{HW!CFLE-|cS%gl1#|NN1s=0&pkYZkvh zQiN+aL;PA}iJ4byG3mN2UUX1|g-SA)gx%r7>h~Pnx|R{&jPO;q?j*jE<7W@lWqPCS zsQ^qm+7282CqP;LJ`S0+f@MLo@Sz{{{=!iP*EuHhIQNgc!-~(LoV@Zmb&ubtQ`aLL zYrL9C54JNc{~BL8e`ERf28R5jhJ*zMNXak}pC|mi6?V8@a0pmYjrPkd?49R%Ix{%NOb|RA#4wupFN)x{?Zxa1fvybJ78HKZF9#h{UntpB4 zDL1T`i*DC*_lg$b4cXa~9OlwYDTkDtGZn+1ap}1u?9%o+^)5#7{X7Pl-7^!IcjN+h+TY}slxVtzW^?9Z+4@JjYw1JOI!!Q2Fsvu$uG)ehq#&Bbi=Vx1Dpn&@?oYgki*-y= zxZ-^rrzE{*h0ApstUSU$>&|fY_OCoQwux=cHF5i<3C^9jfcY$QoE~G2gImlIx!M8) zUt2*}%L*F?SR-wQ73R*ih5KYnOxkFT+krO7uCm13P#gTUwib?phzFKf-^CO)wz^RC zS4BHr1)N#Y#Fvp->@emrpZ8tC&MW7Mvpl5uFy*|iaL1DvuBpi9WUXduzEi->;R^V# zPaR+774TO}9-&su-bOW)T!<~Bl<~k`U9bwfYH7l7l^XWFCTkv+5br+nP0$0T+f`jgmrTVrPZ*x| zPH>ndhKjtYI?fj8!ZX1Ljg>}7kJdw0zA zw<1o5X$ilnMz}rR^l}%D0^6y6$ow57T*Ia{?Gg3gB>eEdk59L}(PLE|D_#~+IzIyL zW%9L0CZ8V8<naQP@ix{20lU7d8xNu}H-P7tQ z)z*xzYGP5Q5)Q3V5d5FVdlk_yMF}x`wXiNoAEzf7V``>84*%2_tjaO#HL&(=GYj2I z_~YYG>N%uy^w}(i6{fNwKAiodA98wD1m#wzP~xg}+gLz%vnuNCR)B(%0S0c9y~nO@ zX#dg+-g>PtwAv5hLjq8r6o8gcU)WrCN0%LL*plpk%Lione4!;e*w_gkU(hKlB!-xv zOi>-qfqxmgGoNSs7PCtypu;f@3^-{F^+%R)iEx0!ZAbWhvctAnj$-cA{)D}7NNX9n z;n;5%Jn!uYr`c8*`N2Z$vy#(FEm#)o9rc9uMnBxF_Y)j(`w9M7(9j+o3a8+Y=1sw* z*!i?X%=@}WWN_85TrOK&#JxSr_+6utxicClF;`YZ*6@mT8OLoXp|)=ocaN4s;!#a( z9BYj8cMX7{Mi@895Z))$VYgNdk%_9pdAf3+4mM@$ig`=vj(5*R9UL8EEap0;JD=_M z%rQ0464$d$pzERwt$j)u)TfEcPL&+KF`t_wGR0lKM{^kOR9@ooTf6CFbAX#K9%0t# z8+?}<&(3oTIVed1ixRZpkYfn5sk&mmM&fVvsh9o#{^|<0(1~1Q=*jX8=}!0PNi*ac znxI~mYscxT!ToFt`(3K%pNa~>6udY+pLdUBa6)1lTXp@-l_QF{c2*q+)RD@+)No># z9_lZe!D6W`LjQ3WUMSOmV8mIp#zh8UZdDLQuM0%xd>`!Y?}Mp7y`kU1UEBqg7`tQl zC3nQj)~7VvaB`h7v^{mO^{hHB%+*AnNIlH-GZ!%`nuvvpfjGVbwa{-56o)k4gY3uIAroqkY_ZDa`SI+UR3puQ|f#bge8BvOuks*&^sm+4- zqt#j+gI>ti`AzqHq!R1A|a@m{f0HL%ZhRy%KF1DC4a|0cwr1IOwE3+8npQiR+el7H@|f+T(mHHyoYl zhW=9mpy%Tc$Ahw*;}xF-@e`{Glq2PO}4$Kmmw=dU%S3Nd^T$-C zF3Az@TeGacv`JME^#F-Sl>6CM^o_rdae&TNd(lH9-Sf6=^2Y`(f4GLULhW~dTzC>B z`h+ex_@l2`0A}3_#IK-0(MKTF1i$Y0z_(I&q(5~*);(vzq4aZgfqRQ9oceg9v4bDl zua)JJ6#-~o-3n^|1PJz^#2riR?gw>EUt}%x#P2dMywUZ9e3(1_8{mOO>)r7~!5t@; zx?ktS zIjqetst29ml`b2(!g3p<2HoSSzywyf{Shp(s_|rSHx*d*(GvTI))IYO?xu&2#hUo| zKoQq2DIxP;MSOatj;rHzg!g@4s0C6V+n~qFK-tSoPR)A4 zrE(9sEH{ob{PUQ3v4P?HRIqcnK4xZ_WBWo|gsC}UnY=R&KXySs9e0e~?gf`?z9?Vs zFBrYc4O$`MNH7wd+aPpjE6j=xg8rgbXz$WmeE&1d1BEl_KOH|@>fj4hd7#+E2fGqH z(Z$0byS0Nv?Wpy$0DKP%z+byo;#`q_Bo^g6_aJN@8X%aJ2W!01@u@dfEt16=8@H{{52Q0u>LNR*uo`)9hNdAtWMtn|Y3C0+4n`*P$Qmc?V&vbb*cXLj20 znVpw^V2k-{hOT?gobNwqnDCeLXDJ~rK^tKWdT1AHf;$f8f}5^hVJz705_?bb2Ynk= z&3BV>*!cJ(<2J_gmsJk?_NwNP0Y%Jr`M|K+TOzkOANYrxpFqsgF5jb%eQON_FZ_0a z0*qf(@}crK?k;-5Lu&W9dj1Vg?s<+uJ1&dffq0K!;+|T+8tAi44Iz1&FyC(g_a1sU z|4|no+i9X`sshZX)NpQ60hLGQ@$9y0u8UQH&OTi<{AU95Y)kC4w8zsk_E?u=3(4u? zQ>BAX*BZr)^IV5h;?AS8-#vCd{Z;rTPYhRr>sTXEm%g>s85VHG*l(UlzTz(W|GPB$ zV7qAmHf#^X1l3^pz77`mK<`fYVM$DDWS6x@?dD({z1s?YBZ8qZBM5g>{9$@A04EOx zVA*_ML@N4;9`dex1JHSK5CUa;h}T5fS#nnP`(14XMddcQlplxywIIQtE4k>04;O=w zecA_`|MP|WXm4EJ=ZR-GT=C(D6NW3gz;c!>Mx5h_iHn^@50bp0Gpz2oq1U6Xh)Z6I z$Wl9e-r2}&3uG~0-EXeA8P6UMuXCo#d0w0Rfg9&l^5h0p96G6u4@V4P;$Z~q>88-! zY9e}s3>~#0)vTpi`i6u`RtA-FwtN{CACwCBjH}6C?)qKMTeq@lvPk6{Nw|~&+Q!QURsbk!46VX>; zKhqXxZ#rU;haC#eSfbrHS?oDp0g`vjW6?AE{yfH!z4nT|K;mHxI;#X*KNI1Bdlut_ zv&Xz}{(*`w~B$=$(lcJfEq$yQ>04vYvwzr#W3+YkiTfHc_mIS^gWwMO?Lt>E%3Nc?v-YkZM1(+dV#uBf@>2J5NLs2lEr;C=QuSnh~f%J%5F z#2(sD90iB`)`%5YvCs>f_N(Asn|i)ES;^H;GQ|14@>>*7Z2QH{^O`wir5dJe)q>w( zeON9s5I$37bA8x0q}gH4uhAv7`IIyQvF!s<#y7NLu7U(V{}saV8v5*SbdqxKi=mL%~;;M zl)@Q}`CRq0ib}dIqUXqxn%MeD6CE=(VDGJt)&D3$;xnXn$fLPs7>CzwVnw$-bg6$U zcqS*eD+)&7lFeqg-QQO5n*3>tieDDOd#qWb1Yfr*4k?V6#Vt43Y?U3f#4hGkjq1M$O-QKApow9sxy(~WWs^y@5rSyy_{U z}i+Kud3B`<1RWyiGOmaPN5oqD3@yWd82{JGOeYsVsHj{3?+Mpt>xeJh=oU*KK8 zD9-Ma!qZpt1aoxY$`a39j5L zO@AzO@yCCgf}y)S2+qR;;djShGR5;1a{bU$d#S z{SPzxe&@%)H+=d!QuH>NKB%R~7$sC4)hLEJaFy-en5 zod*mJJ;i;Ew>VKPoV%}lVr=3Mb~#tT{>AmI|09PRTb0nTQ4UTWNoi(H;tt$QPUAls zvHbS-K6~Y!VB5OuYmz}-iAT!zPIM(6V^T_5H*`tgigfc=X zC1o@fEv1yAq0&(5`n~V>_wV~xpPuKr@9VzrYcMP57uVeAj7n>5C^i`*cdw<$iS`<7 zkMD&Jczw+orVAW!?t>dP{^JIlFYZt?a6-PS6Bf?&!tC!JD3$lYkWJpO(v^JU>OSyV z?~Q_6-tbQF#-`DIklo%F8}@j^FUu2)Hn}6u%L7s8U0}J}Rpd)bcXa25IAiNEN9;N6 z3_Cd|42W<=%zQ`uank`k*Vqb=;5<`HydTj^_+Cjff;w%q5c{I*<0W;juRf%CG?%g@ zeY?sS=adFQIWY+TnD-VQ1}*`Lh?uQ_|K`Xc;b0m~)RB9`uu7$NphVP&m&*x96;|DWQgADGw;W5%=PZ;a4fQE}Qpr zo49Oy7c9D^B5P?H`MAv(KF+7UK z7ZYjq@)JLI)G^tyjiKq}$Zx;7b!j6n?)=KR_8FWvBZjj}B3Mxx!K$SDJUR0n9WE3L z9Zb4DDR)**XiQ^uG%!j@1EH_f5k5st__axO5nWc1r;btREmxiIgaus{kk+j`5(D({ zb*>2>S6gHHGh4VVuoHRePI~{}?>ts_#_AUi(CO+1kN2*4)a)Yqyu7{_Zo(Ujnml2) z(;J;ud4c{u_&2~CW=p*gKg0{Wf4L(u-V?Rgys+(=i`dbaB)Qp;NnK z-7ytNc`H(H;C`}w(2%%8^dLT0$7c0z@QzVLLuD7phIB^gNE!Tjyqzz{x3Z}JFG_W$ z!`}Sl-#NcoWdIBeZewb26Az94PTiv!Oly3=u=r?}4Sp@O2yuGl!Vl+I$P?zKBr;=h zsmOMc=8~HEsG+!k1ct3I6n#$A$EtJQq_z{h~@PuWIDHGo29QCXZ|% z4QO7|#TP?K{~TqG*mP?wm$k*9r4kH1!bbGkajEtqS3OlSkLOHt$8~ErydU6+(}UbZ ze(R=v9;k?QL-0@!gg3k4^%755z4k=kI8WT~>y0CKB+uR18}{ow5ESTvqsA^mTbz2; z5gWHS;nQ?`gpYB8y}3QQu5*O4y$!5~+KF4cy0ba%EbE0`@@A;>HW9o#>Gv+>W9(mM zC^$pqvKlxWDubg7esW4_1ND5nU{I42I(^*=x1}C9bWRODFLo8${Ey)sG1P^fEDQ)P?k<8q^RdM2o3}#n0a{Z<+Y~T8Zkx}vd zX#0dKlOyQy?li9)I>g?qx3e_j3jemqqK8!tyQDR7Y~LmpXVtJ`L=hjZdCp~pdziOt zG7lF7aOl2cVutwcUn1VC1?!rHx6u8+D}>LC=cOOqmD9?zdpqHfOAEI=`^d<*_t<-8 zJQH=ku%nC|zEv4DAGAaUoWpoi!NHf-*vq%9kuG@#8yl>!f1fp$T#?M~Kb+u~=L)}i zH+-DrA@t(%lIOhQhbxX3df~gjGn8YT@#B>{E|<9Dk05uvOLfPUPOjo+D9wl4?c@gC zd+vghp|jZz{XOlm{fjLo*EwS75qq@WuowRC1D{(0yG=w!oz#<9dViF9<4E_Kr#|Un zRG>cg7V8Rbue*aLPVSP2@8@b-1iln^Q>j1K+~}V8rMz0?6sx{efmfTP-c9OYO;kO{ z_*YW1OCbaMwYt3&y%ESy6I9HZ=n9UPr;ga!fOv~G)M&fpK6{q`H@{HWtF1<8AO zvX~FDGFW;oit}amvDeWh+_rxMx0zjLiuqd(dsohgxO)B*_>+sGztB4(fzJ=caO8yd z?8vENNqv*>4cIw3neX3(uzzu^1oxLvx(lv-*G+g$rd8=cyW9{JTBi6~V}-l3ZLm|$ z0-Fw42~KvkWbSVFm-K;8F2dhE_?`={y>h{eBo73JdI(I{=bkhA1iQg&wiojJU9n`A zJN6#*z?Nkms2}8x`4w(h+ru4G3|!%*?1<`X_9z-;CpiDT|FeP702@4r7FhDk z5c%UIHCekWFWK z{>%%y*DGS;l?AYv;RKn_-5}NB@<1~u$W^d^_*=RM-DTOihlGAIO*0bMx8^$6%N(QS zx4T?(I+wp9zjLEr4Tr~mW{%}sj?lcve#;)vEFp&u^Cj5dQ3l1Qln^*TN%)#?oU0@9 ziKKYTXtpj6hUwx>Q+JFvQ^DcST`)Afn%Q4o(XQwe&)bHupI3ya;no3%InMc}m;oM# zykpq9TskZ)VUM}rI4Y`?6K7@8)!`8vkA`xy;US(qyoZi!_A@*xg!jvnSyfra_g9-Z zd~^e^SQhe9NF)u*LwRv+5?6+nv0-ZsrJ2eB*FMr#GnI3mexNjCclVigsw`0uS_}&b z_LJUMXJs28daWUD%`!#ACR0@XYk?9sb6AF2ppUf;Cac+t*?)DOBhp7oW_(vqJRazZ z;s{TX(=+O|E39|8;im+{Yj*R1${SbAyWoZ=c`m4u{H}4w35yM!k(lfXM|pd(zrPS` zfu5Tz(U@zAqbtp@cb_5JgS15-FTSIU-w_&Ew^13IS34nI=@)m|HSwNBXE9StH3rUM z%8+KHJWVX3m&{$BQwio|^$4oj{$y70bZi@Chwt)A@SfKRO&6MI7516_-=8o)_%K(L zoaeq#kD2&i96zKyV(5nZyj1^^$5cP@Yf_E4jSLx3!Us>%IXL4vTLO~Uuey*>X`*!h zD)nBM=0GRt>)_R81NbdAg0hqk-l~fOgL~j;wT|%OmEL<@%>>|9GFC_J}ETQ&E&_mIZU{i#oVh;c`N-Cd#7yTi|Yp^^X5?w z&HtBy9fuhnlE}yRzOkdBnSVF_VqEE0-i><8Cy_5{zA=l_pA|4bzla;F3b{17lpniP zP-S5qrw(bP$C@^7uoff`_34Zn~elNDAi3L25!2j zuI-M71FDGaB#RSvKgHefYgz$^d*#q9?2XVyS-wc(-jY}b#D>u|F@*XJ4>|bQOSXJ} z&R3&v(re>Eo?o_wGIx%1n9LQfa6ZpTS-0stESrPO>v`Csods8ZvrlUSlal+KgqAMx7b7xZ0`%e1^I-uT|g%I?1?^#-i7Y~qx|-}pc~hw{I&*eJn1(hR8_ zbtU23=i#o32n|KZsVE7}v{b+F>7a?&y12^Zlbu_Mk=_|FtywTJv|d$gSd`u%C2{{lVpkMKh016;W0DAziKQDOZ<;e)G}olUp1mE4`t zCVWUf$jc(Ou#Mq;o7pGo2h)s7IecIu<Q?AmlVC^YV>L0MGwS~?%OT~UznlTn59f7E(mz(g1=WgqF=QgE-!b4LAE3E?%81M7)cGN zx5xffRzgcC)%V0iDj;`uHOq^hFxB`rbsv1BYfvLwf`H5o4V>}vBX<|%Gx}2_rI`p) z{>YcUJ#lci0Uk_NN4I00VeR{iDPvo>Xi*Og@tKYrA=ao}p@rsG@^HxMj3WP5zGyGw z8koU1h!9T@FeO=ZxY@oMm{8-@2S+=dJ(n#;eD+9c_a#os zc*=#{GHE%un9_Min!zB|<9D9YN#sy{UfCkSu|GL1vXW8DvzU72G5gI+W_nDC$P2wZ zt4ZiyK3pu|f2nbdPCiMsfEB#4a~t1!M03LQQtCzhq_KGu%U0#_y!tIJRDD3_O`myg zC4>&|-=D_Veb5&1x%Sw3%>j?fCD^^y4))a!Sm^Bx*GNa<4S!2Od?MPIEZi^f|KFf)Gn6o=3Vt|ny{QGNRL6kDA z2gr;6zqGF>?M-7&?=##vYynGJHuJt!IP0dxv+`CZ2ilf$Y?l_!=-$DeOMY{Ib(7c| zPoCILso$5>mpA+`BjJ0MqHd1zgkH#*YbLU%YCX+yuGm!Ye{V*bh&qtcriB-(>X^1r z5sTGjV7jKBE2e(nu$$K;-*J%muD?EcP7CW-{Cp*Wz|IT2>eGla23@^T4@5AK# z)3~VHPBAkE&w0yKrO$l5v08k~HOCtH*uQ}??`lMTv}enA!C#m9Qun<2iN-_9xXSM* ztsI+K^|gVi)unXtOQ3_|ISH;=!TnR`a_sQk^qmpQ*@4A;E>};jmRhD?%Hp$*FnU~# zWL-!m)8_x=^?j;P>uZGTSFP}Go;|KRIpOsy2b|2dgYpLlWGXnI^syb5>~g?>6epZb zbb;}4R~)Q!!(L4{48P(iIGJf{?4iEP9+$Oj1jj_mm6v*8L|>JIG>1>>No?#RhZmL_ zIAzdX?Dm3o>L5=tQ^))Dfd36cT)Ec^p2ikJPlG}O95vU5v@Xl^lNEV5h5xz=EtyNJ zHfB7~!qy-axMay-RD2Wf3@fDVk$XH>dy+fDF0d&mjO!MLa@gm4T&`cp#M%~0{nKZR zR7A9sGK^m;VaH7k!D&oQGQ`RhBV4dCgVzr;sB2h4E6frK2hD_DazmO4YMx5^BKZ69;%e-f!LOy-IXN#=jR*PFK8oh}d zUB<8V zs#Nmux1TI__$e?#@`*Mc`&!R8J@aX{{W?P@Y-Cc(pN#vlgid=7^3L~IIxQ~Z$XzuY za%&D-qpaHS3A zY1)fhZJm=VGD_W`x!y(OI%l`IqA1lBS_v-LDeH#ZxenO7$4+qQHAk7k^r9io57tJn zo@#KCFCKcu2YVh+&hIS8=tQzs zy@0X5TX?Hc0d})g5r0SnDVo~YXs3${BP8GWU#2*{$sGUr_d=iHmKd97h0FRDICsfP zaGp*cwL(W969gLdM5&EFEC%X`YBJ8?9XoG$Pwin^Xda-T;Fr3;YTJfDdh0; zU$}65DeI%(v*-FaR!q4}%^f=!P&bz{?Gvc>)1UTd*KMa5Eap0Qh7Ow;bFWD7y|YFi=eLhR^!v1) zhdd7QpH8v7S^9}LU(|{`tX{1}{F)WV!&}Z$J>o8d%D?d8@otzh*bs#at>Dt#Nnp9Q z!S=XfDXHn+4)~^JkKl98V#nvW!4*$0xC<92I0d>MDGtq@ETf7pl1< zql_UlvY6@onhSfs7x%rkHd)b+v;wv9&D}s~RRHG#k<#7$zcDH7Hz$XG z{8c9WktcquaJ;}qr(>eZ8BURUR!n%yEn3bd;_|w~WHSo#8LblZ> z@u2o2hAoSr&*;0H_4F-Gr+gLp4L2{>b3^!7kwG?g-#u;~n#{!R^(@$=4(obToK&(E z8sN-tPWUO~0JE|7c${j7voaEFU*stAJ}=$zK-P0Fj7;#vU_CF)ap;YmYkjbyq&F65 zdf~LWCq{H}#+3{wJZ-as)U!wGHzLh)mR+Zdi4EFlxUMHMO{BV8DJNJd+7i3lEOE)s z7B(lX5Hv?JKRh(Ur2r#X)azj8WHn5k-Whk~$oO66=x1vQLmgctS7^XAR1GJKl|(ke z@7k{TCrl1SC7s~#=$FX!dKlZp%6Tm!+jGWg1z4u2<8Y`R{zx*xf_P)x$~A|_ZVL>k zmCUToc5wV)BQjO1S32URj4kdd+hO4eEBIeA#Ul$neEX#-e3$b+bb~Y_?_hcpC*G}O zOx{O^^-Ey?PB*!A-(`L@f6jr63+cV6jE37wXnHY?U36~HAo~c*P0rBOGn}K3T&4Sw zBdnOehEpf3V^H-Gb`-pzp86;HrGDb=!7uq~{}DFcp2-m^(H34mB4v;l zqk?Y9YUsa86+M;ZFsQtl_T{CVX(@SkdZ+P6Z3Z*z3fW^+CB5I*a*I_Rm$z2))0lD| znNdvB=qi3%-x+pqbj96G+Uw=7Ig6cO=SEi~7rJ0nlnV?s-SKXsD;%eLpqrB?ZpeAz zU56*MTYKaB=DyfJvM=(}20>A=A5Q)0gE9R*VQ%J*`RS5g(Qb*=r9BZGt`7AM1aMP{)}BC0yJrd3KII z*x1bm1+@}vc|{ACr~f~D@yr1g%#H1aivtzV;jMtsK8mRA-4)T<@<_e$|LjDmFVw#) zx?{#b1EJ^a{?P(OG3K~dZHoor_IS3&QQR)KBsjyj)B)GW*+c2L4SugQ#fl((%!$+# zd*K65WN;`Oh`H0o+R$pQeDaZf|A-;IE^_+0bv%`{kp)$E*!4oL;3Z4FQPVH zvGFVAcT`eobrpNeZegfWSFBLd6}+<40(R37OA2lu{k%JapMErStcITX>6zPKef2t9lD11tMt!d7pwFZ%ngweVIk z9rue2E~DkM5{}HT6nf95`CSk>Nmp>co445G&x>|A(A!4Lju-q4V7*3Dd}o^FLr^JWUfi8aK#QwCnRf0dfsmvBxac4 z_fd5$S|@`&rY#)Zt(lJ=O1`zVjcgw9h0A9?VfD4cR3E&VH=fPmrG?A+t}sIIQNz#F zutWJfZB}P;($f&GkKN7&#c)O@r&F<`f`tL!Ib})}+YNs(Pp5@b?~<3lwbR_Yk((Y@ z@;|>kR-H^`$FFyM^!Ytc$`v!%zlv48+d00W8<^UB)~e0w#U zL+0n-~3iJxHm~6U-ZNX(ytdVC$yfg0zR;ZQ#$|t^9!Z^M9Q6%kdvj3IZ0ah zW4#t!bhR)lTT|pfbSaY)`nx&jDn#al*{%Y~JeNbK;uL0FPZNCq!FF0AFL05g6&`k& z;lNNu#Q0Zm(DG+O2b}ZqE$92b^U2oi{@kDh$S%1 zS&E2Z^Ks;Jprp43BJs&=45|!3^X*CK+II}(pAA6GgI?IXwU&?To^f8U$F!=u#UERq zvR`2X=Y^XJpTfhB2V%RSFRWUsfHx1}M|3AljN|YWH6nY4pSu{^yU`_iEMj|s?eVZAM?>9k#xj7!FTH|q` z6Vw&Fku|;#c02gOJ9juX-yDU^i6e04+%TvW3_|h0{ZL`!jIbCjY|$uT;pjL~TcgMS zrrPFi_^2lbsjhpf^GE*sGlQ%3-f>hw9z)MIaIS_t@-($D-CY}619fply{GVrlVY8s z;T{4%<@#CQP^ue2#L!aAk8QE759p>f>QZU=6mW4dBnEB1mQKL5?;s^9!-4kuL;tc zhr+rv08hM^V&w0&h_?^I+KC6yta1n+)DEMn*8$Z0eF9tCPe4B5G`4>|ic!AD5O(0W zq%IuCv=t}t$S)WJhn~UgM@JxYe?R2Bc4OU|AO!gD!f(GFC_cCmmshU9_c?)>y0kAU z*P6jFUkzq`WQ1?GwAUI&O~s`r^U?D9FZe861jVs4al>>F{=BA#w97Sg+m=pQzannF Stb+VAo_J;GgZ}||Cx8{~Jac*g literal 0 HcmV?d00001 diff --git a/rsciio/tests/test_digitalsurf.py b/rsciio/tests/test_digitalsurf.py index 9121d90f8..2a51b663f 100644 --- a/rsciio/tests/test_digitalsurf.py +++ b/rsciio/tests/test_digitalsurf.py @@ -409,7 +409,7 @@ def test_load_spectrum(): def test_load_surface(): - fname = TEST_DATA_PATH / "test_surface.sur" + fname = TEST_DATA_PATH / "test_isurface.sur" s = hs.load(fname) md = s.metadata assert md.Signal.quantity == "CL Intensity (a.u.)" @@ -495,19 +495,7 @@ def test_metadata_mapping(): "exit_slit_width" ] == 7000 - ) - - -def test_get_n_obj_chn(): - - omd = {"Object_0_Channel_0":{}, - "Object_1_Channel_0":{}, - "Object_2_Channel_0":{}, - "Object_2_Channel_1":{}, - "Object_2_Channel_2":{}, - "Object_3_Channel_0":{},} - - assert DigitalSurfHandler._get_nobjects(omd)==3 + ) def test_compressdata(): @@ -545,43 +533,48 @@ def test_compressdata(): def test_get_comment_dict(): - tdh = DigitalSurfHandler() - tdh.signal_dict={'original_metadata':{ - 'Object_0_Channel_0':{ + omd={'Object_0_Channel_0':{ 'Parsed':{ 'key_1': 1, 'key_2':'2' } } - }} + } - assert tdh._get_comment_dict('auto')=={'key_1': 1,'key_2':'2'} - assert tdh._get_comment_dict('off')=={} - assert tdh._get_comment_dict('raw')=={'Object_0_Channel_0':{'Parsed':{'key_1': 1,'key_2':'2'}}} - assert tdh._get_comment_dict('custom',custom={'a':0}) == {'a':0} + assert DigitalSurfHandler._get_comment_dict(omd,'auto')=={'key_1': 1,'key_2':'2'} + assert DigitalSurfHandler._get_comment_dict(omd,'off')=={} + assert DigitalSurfHandler._get_comment_dict(omd,'raw')=={'Object_0_Channel_0':{'Parsed':{'key_1': 1,'key_2':'2'}}} + assert DigitalSurfHandler._get_comment_dict(omd,'custom',custom={'a':0}) == {'a':0} #Goes to second dict if only this one's valid - tdh.signal_dict={'original_metadata':{ + omd={ 'Object_0_Channel_0':{'Header':{}}, 'Object_0_Channel_1':{'Header':'ObjHead','Parsed':{'key_1': '0'}}, - }} - assert tdh._get_comment_dict('auto') == {'key_1': '0'} + } + assert DigitalSurfHandler._get_comment_dict(omd, 'auto') == {'key_1': '0'} #Return empty if none valid - tdh.signal_dict={'original_metadata':{ + omd={ 'Object_0_Channel_0':{'Header':{}}, 'Object_0_Channel_1':{'Header':'ObjHead'}, - }} - assert tdh._get_comment_dict('auto') == {} + } + assert DigitalSurfHandler._get_comment_dict(omd,'auto') == {} #Return dict-cast if a single field is named 'Parsed' (weird case) - tdh.signal_dict={'original_metadata':{ + omd={ 'Object_0_Channel_0':{'Header':{}}, 'Object_0_Channel_1':{'Header':'ObjHead','Parsed':'SomeContent'}, - }} - assert tdh._get_comment_dict('auto') == {'Parsed':'SomeContent'} + } + assert DigitalSurfHandler._get_comment_dict(omd,'auto') == {'Parsed':'SomeContent'} + -@pytest.mark.parametrize("test_object", ["test_profile.pro", "test_spectra.pro", "test_spectral_map.sur", "test_spectral_map_compressed.sur", "test_spectrum.pro", "test_spectrum_compressed.pro", "test_surface.sur"]) +@pytest.mark.parametrize("test_object", ["test_profile.pro", + "test_spectra.pro", + "test_spectral_map.sur", + "test_spectral_map_compressed.sur", + "test_spectrum.pro", + "test_spectrum_compressed.pro", + "test_isurface.sur"]) def test_writetestobjects(tmp_path,test_object): """Test data integrity of load/save functions. Starting from externally-generated data (i.e. not from hyperspy)""" @@ -613,8 +606,33 @@ def test_writetestobjects(tmp_path,test_object): assert np.allclose(ax.axis,ax2.axis) assert np.allclose(ax.axis,ax3.axis) +@pytest.mark.parametrize("test_tuple ", [("test_profile.pro",'_PROFILE'), + ("test_spectra.pro",'_SPECTRUM'), + ("test_spectral_map.sur",'_HYPCARD'), + ("test_spectral_map_compressed.sur",'_HYPCARD'), + ("test_spectrum.pro",'_SPECTRUM'), + ("test_spectrum_compressed.pro",'_SPECTRUM'), + ("test_surface.sur",'_SURFACE'), + ('test_RGB.sur','_RGBIMAGE')]) +def test_split(test_tuple): + """Test for expected object type in the reference dataset""" + obj = test_tuple[0] + res = test_tuple[1] + + df = TEST_DATA_PATH.joinpath(obj) + dh= DigitalSurfHandler(obj) + + d = hs.load(df) + dh.signal_dict = d._to_dictionary() + dh._n_ax_nav, dh._n_ax_sig = dh._get_n_axes(dh.signal_dict) + dh._split_signal_dict() + + assert dh._Object_type == res + def test_writeRGB(tmp_path): - + # This is just a different test function because the + # comparison of rgb data must be done differently + # (due to hyperspy underlying structure) df = TEST_DATA_PATH.joinpath("test_RGB.sur") d = hs.load(df) fn = tmp_path.joinpath("test_RGB.sur") @@ -644,8 +662,12 @@ def test_writeRGB(tmp_path): assert np.allclose(ax.axis,ax3.axis) @pytest.mark.parametrize("dtype", [np.int16, np.int32, np.float64, np.uint8, np.uint16]) -def test_writegeneric_validtypes(tmp_path,dtype): - +@pytest.mark.parametrize('compressed',[True,False]) +def test_writegeneric_validtypes(tmp_path,dtype,compressed): + """This test establish""" gen = hs.signals.Signal1D(np.arange(24,dtype=dtype))+25 fgen = tmp_path.joinpath('test.pro') - gen.save(fgen,overwrite=True) \ No newline at end of file + gen.save(fgen,compressed = compressed, overwrite=True) + + gen2 = hs.load(fgen) + assert np.allclose(gen2.data,gen.data) From cc52a66471daa3c6127072b394e8458896a57f92 Mon Sep 17 00:00:00 2001 From: Nicolas Tappy Date: Mon, 24 Jun 2024 18:26:54 +0200 Subject: [PATCH 04/21] Fix RGBImageSeries, enhance tests, improve doc --- rsciio/digitalsurf/Untitled-1.ipynb | 673 ++++++++++++++++++++++++++++ rsciio/digitalsurf/_api.py | 198 ++++---- rsciio/tests/test_digitalsurf.py | 96 +++- 3 files changed, 890 insertions(+), 77 deletions(-) create mode 100644 rsciio/digitalsurf/Untitled-1.ipynb diff --git a/rsciio/digitalsurf/Untitled-1.ipynb b/rsciio/digitalsurf/Untitled-1.ipynb new file mode 100644 index 000000000..c35673f23 --- /dev/null +++ b/rsciio/digitalsurf/Untitled-1.ipynb @@ -0,0 +1,673 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from rsciio.digitalsurf._api import DigitalSurfHandler\n", + "import hyperspy.api as hs\n", + "import numpy as np\n", + "import pathlib\n", + "import matplotlib.pyplot as plt\n", + "%matplotlib qt\n", + "\n", + "savedir = pathlib.Path().home().joinpath(\"OneDrive - Attolight/Desktop/\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "ddd = np.loadtxt(r\"C:\\Users\\NicolasTappy\\Attolight Dropbox\\ATT_RnD\\INJECT\\BEAMFOUR\\BeamFour-end-users_Windows\\histo2dim_500mmoffset.txt\",delimiter=',')" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "255" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.iinfo(np.uint8).max" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 35, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "dt = np.uint8\n", + "maxint = np.iinfo(dt).max\n", + "np.random.randint(low=0,high=maxint,size=(17,38,3),dtype=dt)\n", + "size = (5,17,38,3)\n", + "maxint = np.iinfo(dt).max\n", + "\n", + "gen = hs.signals.Signal1D(np.random.randint(low=0,high=maxint,size=size,dtype=dt))\n", + "gen\n", + "# gen.change_dtype('rgb8')" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "52c591c4ee4f44d6a582c5958ecffd12", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(HBox(children=(Label(value='Unnamed 0th axis', layout=Layout(width='15%')), IntSlider(value=0, …" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "gen.change_dtype('rgb8')\n", + "gen.plot()" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Text(0, 0.5, 'Y (um)')" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "plt.matshow(ddd)\n", + "plt.xlabel('X (um)')\n", + "plt.ylabel('Y (um)')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def diffdic(a:dict,b:dict):\n", + " set1 = set(a.items())\n", + " set2 = set(b.items())\n", + " return set1^set2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import struct\n", + "def _pack_str(val, size, encoding=\"latin-1\"):\n", + " \"\"\"Write a str of defined size in bytes to a file. struct.pack\n", + " will automatically trim the string if it is too long\"\"\"\n", + " return struct.pack(\"<{:d}s\".format(size), f\"{val}\".ljust(size).encode(encoding))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "testdir = pathlib.Path(r'C:\\Program Files\\Attolight\\AttoMap Advanced 7.4\\Example Data')\n", + "testfiles = list(testdir.glob('*.sur'))+list(testdir.glob('*pro'))\n", + "list(testfiles)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "testdir = pathlib.Path(r'C:\\Program Files\\Attolight\\AttoMap Advanced 7.4\\Example Data')\n", + "testfiles = list(testdir.glob('*.sur'))+list(testdir.glob('*pro'))\n", + "savedir = pathlib.Path(r'C:\\Users\\NicolasTappy\\OneDrive - Attolight\\Desktop\\ds_testfiles')\n", + "for tf in testfiles:\n", + " d = hs.load(tf)\n", + " comp = d.original_metadata.Object_0_Channel_0.Header.H01_Signature == 'DSCOMPRESSED'\n", + " nam = d.original_metadata.Object_0_Channel_0.Header.H06_Object_Name\n", + " abso = d.original_metadata.Object_0_Channel_0.Header.H12_Absolute\n", + " # print(tf.name)\n", + " # if d.original_metadata.Object_0_Channel_0.Header.H05_Object_Type == 12:\n", + " # print(d.original_metadata.Object_0_Channel_0.Header.H23_Z_Spacing)\n", + " nn = savedir.joinpath(f\"EXPORTED_{tf.name}\")\n", + " print(f\"{nn.name}: {comp}, {abso}\")\n", + " d.save(nn,object_name=nam,compressed=comp,absolute=abso,overwrite=True)\n", + " tmp = hs.load(nn)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "a = d.axes_manager[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "a.get_axis_dictionary()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "exptf = list(pathlib.Path(r'C:\\Users\\NicolasTappy\\OneDrive - Attolight\\Desktop\\ds_testfiles').glob('*.sur'))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "i = 26\n", + "print(testfiles[i].name)\n", + "d = hs.load(testfiles[i])\n", + "print(exptf[i].name)\n", + "ed = hs.load(exptf[i])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "diffdic(d.original_metadata.Object_0_Channel_0.Header.as_dictionary(),\n", + " ed.original_metadata.Object_0_Channel_0.Header.as_dictionary())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "d.plot(),ed.plot(),(d-ed).plot()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pathlib\n", + "d = pathlib.Path(r\"C:\\Users\\NicolasTappy\\OneDrive - Attolight\\Documents\\GIT\\rosettasciio\\rsciio\\tests\\data\\digitalsurf\")\n", + "fl = list(d.iterdir())\n", + "fl" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "a = hs.load(r\"C:\\Users\\NicolasTappy\\Attolight Dropbox\\ATT_RnD\\INJECT\\hyperspectral tests\\HYP-TEST-NOLASER\\HYPCard.sur\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "savedir = pathlib.Path(r'C:\\Users\\NicolasTappy\\OneDrive - Attolight\\Desktop\\ds_testfiles')\n", + "for tf in fl:\n", + " d = hs.load(tf)\n", + " try:\n", + " comp = d.original_metadata.Object_0_Channel_0.Header.H01_Signature == 'DSCOMPRESSED'\n", + " nam = d.original_metadata.Object_0_Channel_0.Header.H06_Object_Name\n", + " abso = d.original_metadata.Object_0_Channel_0.Header.H12_Absolute\n", + " except:\n", + " comp=False\n", + " nam= 'test'\n", + " abso = 0\n", + " # print(tf.name)\n", + " # if d.original_metadata.Object_0_Channel_0.Header.H05_Object_Type == 12:\n", + " # print(d.original_metadata.Object_0_Channel_0.Header.H23_Z_Spacing)\n", + " nn = savedir.joinpath(f\"EXPORTED_{tf.name}\")\n", + " print(f\"{nn.name}: {comp}, {abso}\")\n", + " d.save(nn,object_name=nam,compressed=comp,absolute=abso,overwrite=True)\n", + " tmp = hs.load(nn)\n", + "exptf = list(pathlib.Path(r'C:\\Users\\NicolasTappy\\OneDrive - Attolight\\Desktop\\ds_testfiles').glob('*'))\n", + "exptf" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "t = hs.load(r\"C:\\Users\\NicolasTappy\\OneDrive - Attolight\\Desktop\\ds_testfiles\\EXPORTED_test_spectral_map.sur\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "b'\\x19\\x00\\x00\\x00\\x1a\\x00\\x00\\x00\\x1b\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x1d\\x00\\x00\\x00\\x1e\\x00\\x00\\x00\\x1f\\x00\\x00\\x00 \\x00\\x00\\x00!\\x00\\x00\\x00\"\\x00\\x00\\x00#\\x00\\x00\\x00$\\x00\\x00\\x00'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "i = 4\n", + "print(fl[i].name)\n", + "# d = hs.load(fl[i])\n", + "print(exptf[i].name)\n", + "ed = hs.load(exptf[i])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "a = d.metadata\n", + "a.as_dictionary()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "d = hs.load(r\"C:\\Users\\NicolasTappy\\OneDrive - Attolight\\Pictures\\Untitled.jpg\")\n", + "n = savedir.joinpath(f\"EXPORTED_Untitled.sur\")\n", + "d.save(n)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "i = 1\n", + "d1 = hs.load(fl[i])\n", + "d1.save(savedir.joinpath(fl[i].name),overwrite=True)\n", + "d2 = hs.load(savedir.joinpath(fl[i].name))\n", + "for k in ['R','G','B']:\n", + " plt.figure()\n", + " plt.imshow(d1.data[k].astype(np.int16)-d2.data[k].astype(np.int16))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "aa[0].axis" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "k.plot()" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([111., 112., 113., 114., 115., 116., 117., 118., 119., 120., 121.,\n", + " 122., 123., 124., 125., 126., 127., 128., 129., 130., 131., 132.,\n", + " 133., 134.])" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import numpy as np\n", + "from rsciio.digitalsurf import file_writer,file_reader\n", + "md = { 'General': {},\n", + " 'Signal': {}}\n", + "\n", + "ax = {'name': 'X',\n", + " 'navigate': False,\n", + " }\n", + "\n", + "sd = {\"data\": np.arange(24)+111,\n", + " \"axes\": [ax],\n", + " \"metadata\": md,\n", + " \"original_metadata\": {}}\n", + "\n", + "file_writer(\"test.pro\",sd)\n", + "file_reader('test.pro')[0]['data']" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for f in fl:\n", + " print(f.name)\n", + " d = hs.load(f)\n", + " # d.plot()\n", + " # d.save(savedir.joinpath(f.name),overwrite=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for f in fl:\n", + " print(f.name)\n", + " d = hs.load(savedir.joinpath(f.name))\n", + " # d.plot()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "d.plot()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "k = hs.load(savedir.joinpath('test_RGB.sur'))\n", + "k.original_metadata" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for f in testrgbfiles:\n", + " print(pathlib.Path(f).name)\n", + " d = hs.load(f)\n", + " d.plot()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ds = DigitalSurfHandler(savedir.joinpath('test_spectra.pro'))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "d.save(savedir.joinpath('test_spectra.pro'),comment='off')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "gen = hs.signals.Signal1D(np.arange(24,dtype=np.float32))\n", + "fgen = savedir.joinpath('test.pro')\n", + "gen.save(fgen,overwrite=True,is_special=False)\n", + "gen.data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "hs.load(fgen).original_metadata" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "11.5+11.5" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [], + "source": [ + "from rsciio.utils import rgb_tools" + ] + }, + { + "cell_type": "code", + "execution_count": 74, + "metadata": {}, + "outputs": [], + "source": [ + "# a = np.random.randint(0,65535,size=(8,3,12,14),dtype=np.uint16)\n", + "a = np.random.randint(0,65535,size=(24,12,14),dtype=np.uint16)\n", + "a = a.reshape(8,3,12,14)" + ] + }, + { + "cell_type": "code", + "execution_count": 78, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(8, 12, 14, 3)" + ] + }, + "execution_count": 78, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.rollaxis(a,1,4).shape" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": {}, + "outputs": [], + "source": [ + "b = rgb_tools.regular_array2rgbx(a)" + ] + }, + { + "cell_type": "code", + "execution_count": 80, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,\n", + " 42, 43, 44, 45, 46, 47, 48], dtype=int8)" + ] + }, + "execution_count": 80, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "d=hs.signals.Signal1D(np.arange(24,dtype=np.int8))+25\n", + "d.data" + ] + }, + { + "cell_type": "code", + "execution_count": 58, + "metadata": {}, + "outputs": [], + "source": [ + "c = b[:8]" + ] + }, + { + "cell_type": "code", + "execution_count": 81, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "-128" + ] + }, + "execution_count": 81, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "- 2**(8-1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "hsdev", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index 2685fc622..2b1277552 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -49,7 +49,7 @@ # from hyperspy.misc.utils import DictionaryTreeBrowser from rsciio._docstrings import FILENAME_DOC, LAZY_UNSUPPORTED_DOC, RETURNS_DOC, SIGNAL_DOC from rsciio.utils.exceptions import MountainsMapFileError -from rsciio.utils.rgb_tools import is_rgb, is_rgba +from rsciio.utils.rgb_tools import is_rgb, is_rgba, rgbx2regular_array from rsciio.utils.date_time_tools import get_date_time_from_metadata _logger = logging.getLogger(__name__) @@ -98,7 +98,7 @@ class DigitalSurfHandler(object): 21: "_HYPCARD", } - def __init__(self, filename : str|None = None): + def __init__(self, filename : str = ''): # We do not need to check for file existence here because # io module implements it in the load function self.filename = filename @@ -607,18 +607,6 @@ def _is_spectrum(self) -> bool: return is_spec - def _is_surface(self) -> bool: - """Determine if a 2d-data-like signal_dict should be of surface type, ie the dataset - is a 2d surface of the 3d space. """ - is_surface = False - surfacelike_quantnames = ['Height', 'Altitude', 'Elevation', 'Depth', 'Z'] - quant: str = self.signal_dict['metadata']['Signal']['quantity'] - for name in surfacelike_quantnames: - if quant.startswith(name): - is_surface = True - - return is_surface - def _is_binary(self) -> bool: return self.signal_dict['data'].dtype == bool @@ -647,8 +635,6 @@ def _split_signal_dict(self): warnings.warn(f"A channel discarded upon saving \ RGBA signal in .sur format") self._split_rgb() - # elif self._is_surface(): #'_SURFACE' - # self._split_surface() else: # _INTENSITYSURFACE self._split_surface() elif (n_nav,n_sig) == (1,0): @@ -664,7 +650,7 @@ def _split_signal_dict(self): elif (n_nav,n_sig) == (1,2): if is_rgb(self.signal_dict['data']): self._split_rgbserie() - if is_rgba(self.signal_dict['data']): + elif is_rgba(self.signal_dict['data']): warnings.warn(f"Alpha channel discarded upon saving RGBA signal in .sur format") self._split_rgbserie() else: @@ -679,10 +665,8 @@ def _split_signal_dict(self): warnings.warn(f"A channel discarded upon saving \ RGBA signal in .sur format") self._split_rgb() - if self._is_surface(): - self._split_surface() else: - self._split_intensitysurface() + self._split_surface() elif (n_nav,n_sig) == (2,1): self._split_hyperspectral() else: @@ -716,12 +700,7 @@ def _split_profile(self,): obj_type = 1 self._Object_type = self._mountains_object_types[obj_type] - - if (self._n_ax_nav,self._n_ax_sig) in [(0,1),(1,0)]: - self.Xaxis = self.signal_dict['axes'][0] - else: - raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for a profile type") - + self.Xaxis = self.signal_dict['axes'][0] self.data_split = [self.signal_dict['data']] self.objtype_split = [obj_type] self._N_data_objects = 1 @@ -763,12 +742,8 @@ def _split_rgb(self,): """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" obj_type = 12 self._Object_type = self._mountains_object_types[obj_type] - if (self._n_ax_nav,self._n_ax_sig) in [(0,2),(2,0)]: - self.Xaxis = self.signal_dict['axes'][1] - self.Yaxis = self.signal_dict['axes'][0] - else: - raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._mountains_object_types[obj_type]} type") - + self.Xaxis = self.signal_dict['axes'][1] + self.Yaxis = self.signal_dict['axes'][0] self.data_split = [np.int32(self.signal_dict['data']['R']), np.int32(self.signal_dict['data']['G']), np.int32(self.signal_dict['data']['B']) @@ -781,25 +756,8 @@ def _split_surface(self,): """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" obj_type = 2 self._Object_type = self._mountains_object_types[obj_type] - if (self._n_ax_nav,self._n_ax_sig) in [(0,2),(2,0)]: - self.Xaxis = self.signal_dict['axes'][1] - self.Yaxis = self.signal_dict['axes'][0] - else: - raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._mountains_object_types[obj_type]} type") - self.data_split = [self.signal_dict['data']] - self.objtype_split = [obj_type] - self._N_data_objects = 1 - self._N_data_channels = 1 - - def _split_intensitysurface(self,): - """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" - obj_type = 10 - self._Object_type = self._mountains_object_types[obj_type] - if (self._n_ax_nav,self._n_ax_sig) in [(0,2),(2,0)]: - self.Xaxis = self.signal_dict['axes'][1] - self.Yaxis = self.signal_dict['axes'][0] - else: - raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._mountains_object_types[obj_type]} type") + self.Xaxis = self.signal_dict['axes'][1] + self.Yaxis = self.signal_dict['axes'][0] self.data_split = [self.signal_dict['data']] self.objtype_split = [obj_type] self._N_data_objects = 1 @@ -816,13 +774,18 @@ def _split_rgbserie(self): self.Taxis = next(ax for ax in self.signal_dict['axes'] if ax['navigate']) tmp_data_split = self._split_data_alongaxis(self.Taxis) - self.data_split = [] + # self.data_split = [] self.objtype_split = [] for d in tmp_data_split: - self.data_split += [d['R'].astype(np.int32), d['G'].astype(np.int32), d['B'].astype(np.int32)] - self.objtype_split += [12,10,10] + self.data_split += [d['R'].astype(np.int16).copy(), + d['G'].astype(np.int16).copy(), + d['B'].astype(np.int16).copy(), + ] + # self.objtype_split += [12,10,10] + self.objtype_split = [12,10,10]*self.Taxis['size'] self.objtype_split[0] = obj_type - + # self.data_split = rgbx2regular_array(self.signal_dict['data']) + self._N_data_objects = self.Taxis['size'] self._N_data_channels = 3 @@ -882,6 +845,13 @@ def _norm_data(self, data: np.ndarray, is_special: bool): if np.issubdtype(data_type,np.complexfloating): raise MountainsMapFileError(f"digitalsurf file formats do not support export of complex data. Convert data to real-value representations before before export") + elif data_type==bool: + pointsize = 16 + Zmin = 0 + Zmax = 1 + Zscale = 1 + Zoffset = 0 + data_int = data.astype(np.int16) elif data_type==np.uint8: warnings.warn("np.uint8 datatype exported as np.int16.") pointsize = 16 @@ -897,7 +867,7 @@ def _norm_data(self, data: np.ndarray, is_special: bool): elif data_type==np.int8: pointsize = 16 #Pointsize has to be 16 or 32 in surf format Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, 8, is_special) - data_int = data + data_int = data.astype(np.int16) elif data_type==np.int16: pointsize = 16 Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, pointsize, is_special) @@ -919,7 +889,7 @@ def _norm_signed_int(self, data:np.ndarray, intsize: int, is_special: bool = Fal if saturation needs to be flagged""" # There are no NaN values for integers. Special points means considering high/low saturation of integer scale. data_int_min = - 2**(intsize-1) - data_int_max = 2**(intsize -1) + data_int_max = 2**(intsize -1) - 1 is_satlo = (data==data_int_min).sum() >= 1 and is_special is_sathi = (data==data_int_max).sum() >= 1 and is_special @@ -1178,7 +1148,10 @@ def _build_sur_dict(self): self._build_general_1D_data() elif self._Object_type in ["_PROFILESERIE"]: self._build_1D_series() - elif self._Object_type in ["_SURFACE","_INTENSITYIMAGE","_BINARYIMAGE"]: + elif self._Object_type in ["_BINARYIMAGE"]: + self._build_surface() + self.signal_dict.update({"post_process": [self.post_process_binary]}) + elif self._Object_type in ["_SURFACE","_INTENSITYIMAGE"]: self._build_surface() elif self._Object_type in ["_SURFACESERIE"]: self._build_surface_series() @@ -1190,8 +1163,8 @@ def _build_sur_dict(self): self._build_RGB_image() elif self._Object_type in ["_RGBINTENSITYSURFACE"]: self._build_RGB_surface() - # elif self._Object_type in ["_BINARYIMAGE"]: - # self._build_surface() + elif self._Object_type in ['_SERIESOFRGBIMAGES']: + self._build_RGB_image_series() else: raise MountainsMapFileError( f"{self._Object_type} is not a supported mountain object." @@ -1480,6 +1453,54 @@ def _build_RGB_image(self,): self.signal_dict.update({"post_process": [self.post_process_RGB]}) + def _build_RGB_image_series(self,): + + # First object dictionary + hypdic = self._list_sur_file_content[0] + + # Metadata are set from first dictionary + self._set_metadata_and_original_metadata(hypdic) + + # We build the series-axis + self.signal_dict["axes"].append( + self._build_Tax(hypdic, "_03_Number_of_Objects", ind=0, nav=False) + ) + + # All objects must share the same signal axes + self.signal_dict["axes"].append(self._build_Yax(hypdic, ind=1, nav=False)) + self.signal_dict["axes"].append(self._build_Xax(hypdic, ind=2, nav=False)) + + # shape of the surfaces in the series + shape = (hypdic["_19_Number_of_Lines"], hypdic["_18_Number_of_Points"]) + nimg = hypdic["_03_Number_of_Objects"] + nchan = hypdic["_08_P_Size"] + # We put all the data together + data = np.empty(shape=(nimg,*shape,nchan)) + i = 0 + for imgidx in range(nimg): + for chanidx in range(nchan): + obj = self._list_sur_file_content[i] + data[imgidx,...,chanidx] = obj["_62_points"].reshape(shape) + i+=1 + + # for obj in self._list_sur_file_content: + # data.append(obj["_62_points"].reshape(shape)) + + # data = np.stack(data) + + # data = data.reshape(nimg,nchan,*shape) + # data = np.rollaxis(data,) + + # Pushing data into the dictionary + self.signal_dict["data"] = data + + # Add the color-axis to the signal dict so it can be consumed + self.signal_dict["axes"].append( + self._build_Tax(hypdic, "_08_P_Size", ind=3, nav=True) + ) + + self.signal_dict.update({"post_process": [self.post_process_RGB]}) + # Metadata utility methods @staticmethod @@ -1944,6 +1965,11 @@ def post_process_RGB(signal): ) return signal + + @staticmethod + def post_process_binary(signal): + signal.change_dtype('bool') + return signal # pack/unpack binary quantities @staticmethod @@ -2225,7 +2251,18 @@ def file_reader(filename, lazy=False): surdict, ] -def file_writer(filename, signal: dict, **kwds): +def file_writer(filename, + signal: dict, + set_comments: str = 'auto', + is_special: bool = False, + compressed: bool = True, + comments: dict = {}, + object_name: str = '', + operator_name: str = '', + absolute: int = 0, + private_zone: bytes = b'', + client_zone: bytes = b'' + ): """ Write a mountainsmap ``.sur`` or ``.pro`` file. @@ -2237,34 +2274,45 @@ def file_writer(filename, signal: dict, **kwds): Whether comments should be a simplified original_metadata ('auto'), exported as the raw original_metadata dictionary ('raw'), skipped ('off'), or supplied by the user as an additional kwarg ('custom'). - is_special : bool, default = False + is_special : bool , default = False If True, NaN values in the dataset or integers reaching boundary values are flagged in the export as non-measured and saturating, respectively. If False, those values are kept as-is. - compressed: bool, default =True + compressed : bool, default =True If True, compress the data in the export file using zlib. - comments: dict, default = {} + comments : dict, default = {} Set a custom dictionnary in the comments field of the exported file. - Ignored if set_comments is not set to 'custom'. - object_name: str, default = '' - Set the object name field in the output file - operator_name: str, default = '' + Ignored if set_comments is not set to 'custom'. + object_name : str, default = '' + Set the object name field in the output file. + operator_name : str, default = '' Set the operator name field in the exported file. - absolute: int, default = 0, + absolute : int, default = 0, Unsigned int capable of flagging whether surface heights are relative (0) or absolute (1). Higher unsigned int values can be used to distinguish several - data series sharing internal reference - private_zone: bytes, default = b'', + data series sharing internal reference. + private_zone : bytes, default = b'', Set arbitrary byte-content in the private_zone field of exported file metadata. - Maximum size is 32.0 kB and content will be cropped if this size is exceeded - client_zone: bytes, default = b'' + Maximum size is 32.0 kB and content will be cropped if this size is exceeded. + client_zone : bytes, default = b'' Set arbitrary byte-content in the client_zone field of exported file metadata. - Maximum size is 128B and and content will be cropped if this size is exceeded + Maximum size is 128B and and content will be cropped if this size is exceeded. + **kwds : dict + Unpacked keywords arguments dictionary. Does not accept other arguments than + those specified above. """ ds = DigitalSurfHandler(filename=filename) ds.signal_dict = signal - ds._build_sur_file_contents(**kwds) + ds._build_sur_file_contents(set_comments, + is_special, + compressed, + comments, + object_name, + operator_name, + absolute, + private_zone, + client_zone) ds._write_sur_file() file_reader.__doc__ %= (FILENAME_DOC,LAZY_UNSUPPORTED_DOC,RETURNS_DOC) diff --git a/rsciio/tests/test_digitalsurf.py b/rsciio/tests/test_digitalsurf.py index 2a51b663f..b9ce74d9b 100644 --- a/rsciio/tests/test_digitalsurf.py +++ b/rsciio/tests/test_digitalsurf.py @@ -606,6 +606,7 @@ def test_writetestobjects(tmp_path,test_object): assert np.allclose(ax.axis,ax2.axis) assert np.allclose(ax.axis,ax3.axis) + @pytest.mark.parametrize("test_tuple ", [("test_profile.pro",'_PROFILE'), ("test_spectra.pro",'_SPECTRUM'), ("test_spectral_map.sur",'_HYPCARD'), @@ -661,13 +662,104 @@ def test_writeRGB(tmp_path): assert np.allclose(ax.axis,ax2.axis) assert np.allclose(ax.axis,ax3.axis) -@pytest.mark.parametrize("dtype", [np.int16, np.int32, np.float64, np.uint8, np.uint16]) +@pytest.mark.parametrize("dtype", [np.int8, np.int16, np.int32, np.float64, np.uint8, np.uint16]) @pytest.mark.parametrize('compressed',[True,False]) def test_writegeneric_validtypes(tmp_path,dtype,compressed): - """This test establish""" + """This test establishes the capability of saving a generic hyperspy signals + generated from numpy array""" gen = hs.signals.Signal1D(np.arange(24,dtype=dtype))+25 fgen = tmp_path.joinpath('test.pro') gen.save(fgen,compressed = compressed, overwrite=True) gen2 = hs.load(fgen) assert np.allclose(gen2.data,gen.data) + +@pytest.mark.parametrize("dtype", [np.int64, np.complex64, np.uint64, ]) +def test_writegeneric_failingtypes(tmp_path,dtype): + gen = hs.signals.Signal1D(np.arange(24,dtype=dtype))+25 + fgen = tmp_path.joinpath('test.pro') + with pytest.raises(MountainsMapFileError): + gen.save(fgen,overwrite= True) + +@pytest.mark.parametrize("dtype", [(np.uint8,"rgba8"), (np.uint16,"rgba16")]) +@pytest.mark.parametrize('compressed',[True,False]) +@pytest.mark.parametrize('transpose',[True,False]) +def test_writegeneric_rgba(tmp_path,dtype,compressed,transpose): + """This test establishes the possibility of saving RGBA data while discarding + A channel and warning""" + size = (17,38,4) + maxint = np.iinfo(dtype[0]).max + + gen = hs.signals.Signal1D(np.random.randint(low=0,high=maxint,size=size,dtype=dtype[0])) + gen.change_dtype(dtype[1]) + + fgen = tmp_path.joinpath('test.sur') + + if transpose: + gen = gen.T + + with pytest.warns(): + gen.save(fgen,compressed = compressed, overwrite=True) + + gen2 = hs.load(fgen) + + for k in ['R','G','B']: + assert np.allclose(gen.data[k],gen2.data[k]) + assert np.allclose(gen.data[k],gen2.data[k]) + +@pytest.mark.parametrize('compressed',[True,False]) +@pytest.mark.parametrize('transpose',[True,False]) +def test_writegeneric_binaryimg(tmp_path,compressed,transpose): + + size = (76,3) + + gen = hs.signals.Signal2D(np.random.randint(low=0,high=1,size=size,dtype=bool)) + + fgen = tmp_path.joinpath('test.sur') + + if transpose: + gen = gen.T + with pytest.warns(): + gen.save(fgen,compressed = compressed, overwrite=True) + else: + gen.save(fgen,compressed = compressed, overwrite=True) + + gen2 = hs.load(fgen) + + assert np.allclose(gen.data,gen2.data) + +@pytest.mark.parametrize('compressed',[True,False]) +def test_writegeneric_profileseries(tmp_path,compressed): + + size = (9,655) + + gen = hs.signals.Signal1D(np.random.random(size=size)*1444+2550.) + fgen = tmp_path.joinpath('test.pro') + + gen.save(fgen,compressed = compressed, overwrite=True) + + gen2 = hs.load(fgen) + + assert np.allclose(gen.data,gen2.data) + + +@pytest.mark.parametrize("dtype", [(np.uint8,"rgb8"), (np.uint16,"rgb16")]) +@pytest.mark.parametrize('compressed',[True,False]) +def test_writegeneric_rgbseries(tmp_path,dtype,compressed): + """This test establishes the possibility of saving RGBA data while discarding + A channel and warning""" + size = (5,44,24,3) + maxint = np.iinfo(dtype[0]).max + + gen = hs.signals.Signal1D(np.random.randint(low=0,high=maxint,size=size,dtype=dtype[0])) + gen.change_dtype(dtype[1]) + + fgen = tmp_path.joinpath('test.sur') + + gen.save(fgen,compressed = compressed, overwrite=True) + + gen2 = hs.load(fgen) + + for k in ['R','G','B']: + assert np.allclose(gen.data[k],gen2.data[k]) + assert np.allclose(gen.data[k],gen2.data[k]) \ No newline at end of file From 1e6e4b910ae3dfa7aa189dbae5dca2b8155bda9f Mon Sep 17 00:00:00 2001 From: Nicolas Tappy Date: Tue, 25 Jun 2024 09:23:22 +0200 Subject: [PATCH 05/21] increase codecov, fix bugs --- .../supported_formats/digitalsurf.rst | 2 +- rsciio/digitalsurf/_api.py | 118 ++++++++---------- rsciio/tests/test_digitalsurf.py | 89 ++++++++++++- upcoming_changes/280.enhancements.rst | 1 + 4 files changed, 135 insertions(+), 75 deletions(-) create mode 100644 upcoming_changes/280.enhancements.rst diff --git a/doc/user_guide/supported_formats/digitalsurf.rst b/doc/user_guide/supported_formats/digitalsurf.rst index 8b5807abd..6b52cadc0 100644 --- a/doc/user_guide/supported_formats/digitalsurf.rst +++ b/doc/user_guide/supported_formats/digitalsurf.rst @@ -35,7 +35,7 @@ quantity are named. The criteria are listed here below: | 0 | 1 | ``.pro``: Spectrum (based on axes name), Profile (default) | +-----------------+---------------+------------------------------------------------------------------------------+ | 0 | 2 | ``.sur``: BinaryImage (based on dtype), RGBImage (based on dtype), | -| | | Surface (default), | +| | | Surface (default) | +-----------------+---------------+------------------------------------------------------------------------------+ | 1 | 0 | ``.pro``: same as (1,0) | +-----------------+---------------+------------------------------------------------------------------------------+ diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index 2b1277552..32a1bd16b 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -49,7 +49,7 @@ # from hyperspy.misc.utils import DictionaryTreeBrowser from rsciio._docstrings import FILENAME_DOC, LAZY_UNSUPPORTED_DOC, RETURNS_DOC, SIGNAL_DOC from rsciio.utils.exceptions import MountainsMapFileError -from rsciio.utils.rgb_tools import is_rgb, is_rgba, rgbx2regular_array +from rsciio.utils.rgb_tools import is_rgb, is_rgba from rsciio.utils.date_time_tools import get_date_time_from_metadata _logger = logging.getLogger(__name__) @@ -480,37 +480,6 @@ def _write_sur_file(self): for key in self._work_dict: self._work_dict[key]['b_pack_fn'](f,self._work_dict[key]['value']) - def _validate_filename(self): - - sur_only = ['_SURFACE', - '_BINARYIMAGE', - '_SURFACESERIE', - '_MULTILAYERSURFACE', - '_INTENSITYIMAGE', - '_INTENSITYSURFACE', - '_RGBIMAGE', - '_RGBSURFACE', - '_RGBINTENSITYSURFACE', - '_SERIESOFRGBIMAGES', - '_HYPCARD'] - - pro_only = ['_PROFILE', - '_PROFILESERIE', - '_MULTILAYERPROFILE', - '_FORCECURVE', - '_SERIEOFFORCECURVE', - '_CONTOURPROFILE', - '_SPECTRUM', - ] - - if self._Object_type in sur_only and not self.filename.lower().endswith('sur'): - raise MountainsMapFileError(f"Attempting save of DigitalSurf {self._Object_type} with\ - .{self.filename.split('.')[-1]} extension, which only supports .sur") - - if self._Object_type in pro_only and not self.filename.lower().endswith('pro'): - raise MountainsMapFileError(f"Attempting save of DigitalSurf {self._Object_type} with\ - .{self.filename.split('.')[-1]} extension, which only supports .pro") - def _build_sur_file_contents(self, set_comments:str='auto', is_special:bool=False, @@ -573,7 +542,7 @@ def _build_sur_file_contents(self, #Signal dictionary analysis methods @staticmethod - def _get_n_axes(sig_dict: dict) -> tuple[int,int]: + def _get_n_axes(sig_dict: dict): """Return number of navigation and signal axes in the signal dict (in that order). Could be moved away from the .sur api as other functions probably use this as well @@ -711,11 +680,8 @@ def _split_profileserie(self,): obj_type = 4 # '_PROFILESERIE' self._Object_type = self._mountains_object_types[obj_type] - if (self._n_ax_nav,self._n_ax_sig)==(1,1): - self.Xaxis = next(ax for ax in self.signal_dict['axes'] if not ax['navigate']) - self.Taxis = next(ax for ax in self.signal_dict['axes'] if ax['navigate']) - else: - raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._Object_type} type") + self.Xaxis = next(ax for ax in self.signal_dict['axes'] if not ax['navigate']) + self.Taxis = next(ax for ax in self.signal_dict['axes'] if ax['navigate']) self.data_split = self._split_data_alongaxis(self.Taxis) self.objtype_split = [obj_type] + [1]*(len(self.data_split)-1) @@ -727,11 +693,8 @@ def _split_binary_img(self,): obj_type = 3 self._Object_type = self._mountains_object_types[obj_type] - if (self._n_ax_nav,self._n_ax_sig) in [(0,2),(2,0)]: - self.Xaxis = self.signal_dict['axes'][1] - self.Yaxis = self.signal_dict['axes'][0] - else: - raise MountainsMapFileError(f"Invalid ({self._n_ax_nav},{self._n_ax_sig}) for {self._mountains_object_types[obj_type]} type") + self.Xaxis = self.signal_dict['axes'][1] + self.Yaxis = self.signal_dict['axes'][0] self.data_split = [self.signal_dict['data']] self.objtype_split = [obj_type] @@ -816,7 +779,7 @@ def _split_hyperspectral(self): self._N_data_objects = 1 self._N_data_channels = 1 - def _split_data_alongaxis(self, axis: dict) -> list[np.ndarray]: + def _split_data_alongaxis(self, axis: dict): """Split the data in a series of lower-dim datasets that can be exported to a surface / profile file""" idx = self.signal_dict['axes'].index(axis) @@ -855,27 +818,27 @@ def _norm_data(self, data: np.ndarray, is_special: bool): elif data_type==np.uint8: warnings.warn("np.uint8 datatype exported as np.int16.") pointsize = 16 - Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data.astype(np.int16), pointsize, is_special) + Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, is_special) data_int = data.astype(np.int16) elif data_type==np.uint16: warnings.warn("np.uint16 datatype exported as np.int32") pointsize = 32 #Pointsize has to be 16 or 32 in surf format - Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data.astype(np.int32), pointsize, is_special) + Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, is_special) data_int = data.astype(np.int32) elif np.issubdtype(data_type,np.unsignedinteger): raise MountainsMapFileError(f"digitalsurf file formats do not support unsigned int >16bits. Convert data to signed integers before export.") elif data_type==np.int8: pointsize = 16 #Pointsize has to be 16 or 32 in surf format - Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, 8, is_special) + Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, is_special) data_int = data.astype(np.int16) elif data_type==np.int16: pointsize = 16 - Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, pointsize, is_special) + Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, is_special) data_int = data elif data_type==np.int32: pointsize = 32 data_int = data - Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, pointsize, is_special) + Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, is_special) elif np.issubdtype(data_type,np.integer): raise MountainsMapFileError(f"digitalsurf file formats do not support export integers larger than 32 bits. Convert data to 32-bit representation before exporting") elif np.issubdtype(data_type,np.floating): @@ -884,12 +847,12 @@ def _norm_data(self, data: np.ndarray, is_special: bool): return pointsize, Zmin, Zmax, Zscale, Zoffset, data_int - def _norm_signed_int(self, data:np.ndarray, intsize: int, is_special: bool = False): - """Normalized data of integer type. No normalization per se, but the Zmin and Zmax threshold are set - if saturation needs to be flagged""" - # There are no NaN values for integers. Special points means considering high/low saturation of integer scale. - data_int_min = - 2**(intsize-1) - data_int_max = 2**(intsize -1) - 1 + def _norm_signed_int(self, data:np.ndarray, is_special: bool = False): + """Normalized data of integer type. No normalization per se, but the Zmin and Zmax + threshold are set if saturation flagging is asked.""" + # There are no NaN values for integers. Special points means saturation of integer scale. + data_int_min = np.iinfo(data.dtype).min + data_int_max = np.iinfo(data.dtype).max is_satlo = (data==data_int_min).sum() >= 1 and is_special is_sathi = (data==data_int_max).sum() >= 1 and is_special @@ -926,7 +889,7 @@ def _norm_float(self, data : np.ndarray, is_special: bool = False,): return Zmin, Zmax, Zscale, Zoffset_f, data_int - def _get_Zname_Zunit(self, metadata: dict) -> tuple[str,str]: + def _get_Zname_Zunit(self, metadata: dict): """Attempt reading Z-axis name and Unit from metadata.Signal.Quantity field. Return empty str if do not exist. @@ -2090,14 +2053,31 @@ def _pack_private(self, file, val, encoding="latin-1"): self._set_str(file, val, privatesize) def _is_data_int(self,): - if self._Object_type in ['_BINARYIMAGE', - '_RGBIMAGE', - '_RGBSURFACE', - '_SERIESOFRGBIMAGES']: - return True + """Determine wether data consists of unscaled int values. + This is not the case for all objects. Surface and surface series can admit + this logic. In theory, hyperspectral studiables as well but it is more convenient + to use them as floats due to typical data treatment in hyperspy (scaling etc)""" + objtype = self._mountains_object_types[self._get_work_dict_key_value("_05_Object_Type")] + if objtype in ['_SURFACESERIE','_SURFACE']: + scale = self._get_work_dict_key_value("_23_Z_Spacing") / self._get_work_dict_key_value("_35_Z_Unit_Ratio") + offset = self._get_work_dict_key_value("_55_Z_Offset") + if float(scale).is_integer() and float(offset).is_integer(): + return True + else: + return False else: return False + def _is_data_scaleint(self,): + """Digitalsurf image formats are not stored as their raw int values, but instead are + scaled and a scale / offset is set so that the data scales down to uint. Why this is + done this way is not clear to me. """ + objtype = self._mountains_object_types[self._get_work_dict_key_value("_05_Object_Type")] + if objtype in ['_BINARYIMAGE', '_RGBIMAGE', + '_RGBSURFACE', '_SERIESOFRGBIMAGES', + '_INTENSITYIMAGE']: + return True + def _get_uncompressed_datasize(self) -> int: """Return size of uncompressed data in bytes""" psize = int(self._get_work_dict_key_value("_15_Size_of_Points") / 8) @@ -2168,14 +2148,19 @@ def _unpack_data(self, file, encoding="latin-1"): nm = _points == self._get_work_dict_key_value("_16_Zmin") - 2 Zmin = self._get_work_dict_key_value("_16_Zmin") - _points = (_points.astype(float) - Zmin)*self._get_work_dict_key_value("_23_Z_Spacing") * self._get_work_dict_key_value("_35_Z_Unit_Ratio") + self._get_work_dict_key_value("_55_Z_Offset") + scale = self._get_work_dict_key_value("_23_Z_Spacing") / self._get_work_dict_key_value("_35_Z_Unit_Ratio") + offset = self._get_work_dict_key_value("_55_Z_Offset") - # We set the point in the numeric scale + # Packing data into ints or float, with or without scaling. if self._is_data_int(): + _points = _points + elif self._is_data_scaleint(): + _points = (_points.astype(float) - Zmin)*scale + offset _points = np.round(_points).astype(int) else: - _points[nm] = np.nan - + _points = (_points.astype(float) - Zmin)*scale + offset + _points[nm] = np.nan #Ints have no nans + # Return the points, rescaled return _points @@ -2297,9 +2282,6 @@ def file_writer(filename, client_zone : bytes, default = b'' Set arbitrary byte-content in the client_zone field of exported file metadata. Maximum size is 128B and and content will be cropped if this size is exceeded. - **kwds : dict - Unpacked keywords arguments dictionary. Does not accept other arguments than - those specified above. """ ds = DigitalSurfHandler(filename=filename) ds.signal_dict = signal diff --git a/rsciio/tests/test_digitalsurf.py b/rsciio/tests/test_digitalsurf.py index b9ce74d9b..9caba8958 100644 --- a/rsciio/tests/test_digitalsurf.py +++ b/rsciio/tests/test_digitalsurf.py @@ -606,7 +606,6 @@ def test_writetestobjects(tmp_path,test_object): assert np.allclose(ax.axis,ax2.axis) assert np.allclose(ax.axis,ax3.axis) - @pytest.mark.parametrize("test_tuple ", [("test_profile.pro",'_PROFILE'), ("test_spectra.pro",'_SPECTRUM'), ("test_spectral_map.sur",'_HYPCARD'), @@ -630,6 +629,38 @@ def test_split(test_tuple): assert dh._Object_type == res +@pytest.mark.parametrize("dtype", [np.int8, np.int16, np.int32, np.uint8, np.uint16]) +@pytest.mark.parametrize('special',[True,False]) +@pytest.mark.parametrize('fullscale',[True,False]) +def test_norm_int_data(dtype,special,fullscale): + dh = DigitalSurfHandler() + + if fullscale: + minint = np.iinfo(dtype).min + maxint = np.iinfo(dtype).max + else: + minint = np.iinfo(dtype).min + 23 + maxint = np.iinfo(dtype).max - 9 + + dat = np.random.randint(low=minint,high=maxint,size=222,dtype=dtype) + #Ensure the maximum and minimum off the int scale is actually present in data + if fullscale: + dat[2] = minint + dat[11] = maxint + + pointsize, Zmin, Zmax, Zscale, Zoffset, data_int = dh._norm_data(dat,special) + + off = minint+1 if special and fullscale else dat.min() + maxval = maxint-1 if special and fullscale else dat.max() + + assert np.isclose(Zscale,1.0) + assert np.isclose(Zoffset,off) + assert np.allclose(data_int,dat) + assert Zmin==off + assert Zmax==maxval + + + def test_writeRGB(tmp_path): # This is just a different test function because the # comparison of rgb data must be done differently @@ -688,9 +719,10 @@ def test_writegeneric_rgba(tmp_path,dtype,compressed,transpose): """This test establishes the possibility of saving RGBA data while discarding A channel and warning""" size = (17,38,4) + minint = np.iinfo(dtype[0]).min maxint = np.iinfo(dtype[0]).max - gen = hs.signals.Signal1D(np.random.randint(low=0,high=maxint,size=size,dtype=dtype[0])) + gen = hs.signals.Signal1D(np.random.randint(low=minint,high=maxint,size=size,dtype=dtype[0])) gen.change_dtype(dtype[1]) fgen = tmp_path.joinpath('test.sur') @@ -746,12 +778,12 @@ def test_writegeneric_profileseries(tmp_path,compressed): @pytest.mark.parametrize("dtype", [(np.uint8,"rgb8"), (np.uint16,"rgb16")]) @pytest.mark.parametrize('compressed',[True,False]) def test_writegeneric_rgbseries(tmp_path,dtype,compressed): - """This test establishes the possibility of saving RGBA data while discarding - A channel and warning""" + """This test establishes the possibility of saving RGB surface series""" size = (5,44,24,3) + minint = np.iinfo(dtype[0]).min maxint = np.iinfo(dtype[0]).max - gen = hs.signals.Signal1D(np.random.randint(low=0,high=maxint,size=size,dtype=dtype[0])) + gen = hs.signals.Signal1D(np.random.randint(low=minint,high=maxint,size=size,dtype=dtype[0])) gen.change_dtype(dtype[1]) fgen = tmp_path.joinpath('test.sur') @@ -762,4 +794,49 @@ def test_writegeneric_rgbseries(tmp_path,dtype,compressed): for k in ['R','G','B']: assert np.allclose(gen.data[k],gen2.data[k]) - assert np.allclose(gen.data[k],gen2.data[k]) \ No newline at end of file + + +@pytest.mark.parametrize("dtype", [(np.uint8,"rgba8"), (np.uint16,"rgba16")]) +@pytest.mark.parametrize('compressed',[True,False]) +def test_writegeneric_rgbaseries(tmp_path,dtype,compressed): + """This test establishes the possibility of saving RGBA data while discarding + A channel and warning""" + size = (5,44,24,4) + minint = np.iinfo(dtype[0]).min + maxint = np.iinfo(dtype[0]).max + + gen = hs.signals.Signal1D(np.random.randint(low=minint,high=maxint,size=size,dtype=dtype[0])) + gen.change_dtype(dtype[1]) + + fgen = tmp_path.joinpath('test.sur') + + with pytest.warns(): + gen.save(fgen,compressed = compressed, overwrite=True) + + gen2 = hs.load(fgen) + + for k in ['R','G','B']: + assert np.allclose(gen.data[k],gen2.data[k]) + + +@pytest.mark.parametrize("dtype", [np.int16, np.int32, np.float64]) +@pytest.mark.parametrize("compressed",[True,False]) +def test_writegeneric_surfaceseries(tmp_path,dtype,compressed): + """This test establishes the possibility of saving RGBA surface series while discarding + A channel and warning""" + size = (9,44,58) + + if np.issubdtype(dtype,np.integer): + minint = np.iinfo(dtype).min + maxint = np.iinfo(dtype).max + gen = hs.signals.Signal2D(np.random.randint(low=minint,high=maxint,size=size,dtype=dtype)) + else: + gen = hs.signals.Signal2D(np.random.random(size=size).astype(dtype)*1e6) + + fgen = tmp_path.joinpath('test.sur') + + gen.save(fgen,compressed = compressed, overwrite=True) + + gen2 = hs.load(fgen) + + assert np.allclose(gen.data,gen2.data) \ No newline at end of file diff --git a/upcoming_changes/280.enhancements.rst b/upcoming_changes/280.enhancements.rst new file mode 100644 index 000000000..bd637c83b --- /dev/null +++ b/upcoming_changes/280.enhancements.rst @@ -0,0 +1 @@ +:ref:`DigitalSurf surfaces `: Add file_writer support, add series of RGB images / surfaces support. \ No newline at end of file From f968fe5d192b2a7fa7b78172ec5489576a3ff520 Mon Sep 17 00:00:00 2001 From: Nicolas Tappy Date: Tue, 25 Jun 2024 09:40:06 +0200 Subject: [PATCH 06/21] Fix bug causing error from untitled metadata --- rsciio/digitalsurf/_api.py | 5 +++++ rsciio/tests/test_digitalsurf.py | 2 -- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index 32a1bd16b..d3558173c 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -1783,6 +1783,8 @@ def _MS_parse(str_ms, prefix, delimiter): # Title lines start with an underscore titlestart = "{:s}_".format(prefix) + keymain = None + for line in str_ms.splitlines(): # Here we ignore any empty line or line starting with @@ ignore = False @@ -1795,6 +1797,9 @@ def _MS_parse(str_ms, prefix, delimiter): key_main = line[len(titlestart) :].strip() dict_ms[key_main] = {} elif line.startswith(prefix): + if keymain is None: + keymain = 'UNTITLED' + dict_ms[key_main] = {} key, *li_value = line.split(delimiter) # Key is also stripped from beginning or end whitespace key = key[len(prefix) :].strip() diff --git a/rsciio/tests/test_digitalsurf.py b/rsciio/tests/test_digitalsurf.py index 9caba8958..d08f27ba2 100644 --- a/rsciio/tests/test_digitalsurf.py +++ b/rsciio/tests/test_digitalsurf.py @@ -659,8 +659,6 @@ def test_norm_int_data(dtype,special,fullscale): assert Zmin==off assert Zmax==maxval - - def test_writeRGB(tmp_path): # This is just a different test function because the # comparison of rgb data must be done differently From c6587c92a661e417c87147e2a60e56ef45b41580 Mon Sep 17 00:00:00 2001 From: Nicolas Tappy Date: Tue, 25 Jun 2024 09:42:04 +0200 Subject: [PATCH 07/21] Linting using black --- rsciio/digitalsurf/__init__.py | 5 +- rsciio/digitalsurf/_api.py | 953 ++++++++++++++++++------------- rsciio/tests/test_digitalsurf.py | 381 ++++++------ 3 files changed, 753 insertions(+), 586 deletions(-) diff --git a/rsciio/digitalsurf/__init__.py b/rsciio/digitalsurf/__init__.py index 7db9455d9..49230cbba 100644 --- a/rsciio/digitalsurf/__init__.py +++ b/rsciio/digitalsurf/__init__.py @@ -1,9 +1,6 @@ from ._api import file_reader, file_writer -__all__ = [ - "file_reader", - "file_writer" -] +__all__ = ["file_reader", "file_writer"] def __dir__(): diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index d3558173c..0930c9da0 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -47,13 +47,19 @@ # import rsciio.utils.tools # DictionaryTreeBrowser class handles the fancy metadata dictionnaries # from hyperspy.misc.utils import DictionaryTreeBrowser -from rsciio._docstrings import FILENAME_DOC, LAZY_UNSUPPORTED_DOC, RETURNS_DOC, SIGNAL_DOC +from rsciio._docstrings import ( + FILENAME_DOC, + LAZY_UNSUPPORTED_DOC, + RETURNS_DOC, + SIGNAL_DOC, +) from rsciio.utils.exceptions import MountainsMapFileError from rsciio.utils.rgb_tools import is_rgb, is_rgba from rsciio.utils.date_time_tools import get_date_time_from_metadata _logger = logging.getLogger(__name__) + class DigitalSurfHandler(object): """Class to read Digital Surf MountainsMap files. @@ -84,21 +90,21 @@ class DigitalSurfHandler(object): 6: "_MERIDIANDISC", 7: "_MULTILAYERPROFILE", 8: "_MULTILAYERSURFACE", - 9: "_PARALLELDISC", #not implemented + 9: "_PARALLELDISC", # not implemented 10: "_INTENSITYIMAGE", 11: "_INTENSITYSURFACE", 12: "_RGBIMAGE", - 13: "_RGBSURFACE", #Deprecated - 14: "_FORCECURVE", #Deprecated - 15: "_SERIEOFFORCECURVE", #Deprecated - 16: "_RGBINTENSITYSURFACE", #Surface + Image + 13: "_RGBSURFACE", # Deprecated + 14: "_FORCECURVE", # Deprecated + 15: "_SERIEOFFORCECURVE", # Deprecated + 16: "_RGBINTENSITYSURFACE", # Surface + Image 17: "_CONTOURPROFILE", 18: "_SERIESOFRGBIMAGES", 20: "_SPECTRUM", 21: "_HYPCARD", } - def __init__(self, filename : str = ''): + def __init__(self, filename: str = ""): # We do not need to check for file existence here because # io module implements it in the load function self.filename = filename @@ -120,7 +126,7 @@ def __init__(self, filename : str = ''): # _work_dict['Field']['b_pack_fn'](f,v): pack value v in file f self._work_dict = { "_01_Signature": { - "value": "DSCOMPRESSED", #Uncompressed key is DIGITAL SURF + "value": "DSCOMPRESSED", # Uncompressed key is DIGITAL SURF "b_unpack_fn": lambda f: self._get_str(f, 12, "DSCOMPRESSED"), "b_pack_fn": lambda f, v: self._set_str(f, v, 12), }, @@ -146,12 +152,12 @@ def __init__(self, filename : str = ''): }, "_06_Object_Name": { "value": "", - "b_unpack_fn": lambda f: self._get_str(f, 30, ''), + "b_unpack_fn": lambda f: self._get_str(f, 30, ""), "b_pack_fn": lambda f, v: self._set_str(f, v, 30), }, "_07_Operator_Name": { "value": "ROSETTA", - "b_unpack_fn": lambda f: self._get_str(f, 30, ''), + "b_unpack_fn": lambda f: self._get_str(f, 30, ""), "b_pack_fn": lambda f, v: self._set_str(f, v, 30), }, "_08_P_Size": { @@ -310,7 +316,7 @@ def __init__(self, filename : str = ''): "b_pack_fn": self._set_int16, }, "_39_Obsolete": { - "value": b'', + "value": b"", "b_unpack_fn": lambda f: self._get_bytes(f, 12), "b_pack_fn": lambda f, v: self._set_bytes(f, v, 12), }, @@ -360,7 +366,7 @@ def __init__(self, filename : str = ''): "b_pack_fn": self._set_uint32, }, "_49_Obsolete": { - "value": b'', + "value": b"", "b_unpack_fn": lambda f: self._get_bytes(f, 6), "b_pack_fn": lambda f, v: self._set_bytes(f, v, 6), }, @@ -375,7 +381,7 @@ def __init__(self, filename : str = ''): "b_pack_fn": self._set_int16, }, "_52_Client_zone": { - "value": b'', + "value": b"", "b_unpack_fn": lambda f: self._get_bytes(f, 128), "b_pack_fn": lambda f, v: self._set_bytes(f, v, 128), }, @@ -420,7 +426,7 @@ def __init__(self, filename : str = ''): "b_pack_fn": self._pack_comment, }, "_61_Private_zone": { - "value": b'', + "value": b"", "b_unpack_fn": self._unpack_private, "b_pack_fn": self._pack_private, }, @@ -454,52 +460,55 @@ def __init__(self, filename : str = ''): self._n_ax_sig: int = 0 # All as a rsciio-convention axis dict or empty - self.Xaxis: dict = {} - self.Yaxis: dict = {} - self.Zaxis: dict = {} + self.Xaxis: dict = {} + self.Yaxis: dict = {} + self.Zaxis: dict = {} self.Taxis: dict = {} # These must be set in the split functions self.data_split = [] self.objtype_split = [] - + # File Writer Inner methods def _write_sur_file(self): - """Write self._list_sur_file_content to a file. This method is + """Write self._list_sur_file_content to a file. This method is start-and-forget. The brainwork is performed in the construction of sur_file_content list of dictionaries.""" with open(self.filename, "wb") as f: for dic in self._list_sur_file_content: - # Extremely important! self._work_dict must access - # other fields to properly encode and decode data, + # Extremely important! self._work_dict must access + # other fields to properly encode and decode data, # comments etc. etc. self._move_values_to_workdict(dic) # Then inner consistency is trivial for key in self._work_dict: - self._work_dict[key]['b_pack_fn'](f,self._work_dict[key]['value']) - - def _build_sur_file_contents(self, - set_comments:str='auto', - is_special:bool=False, - compressed:bool=True, - comments: dict = {}, - object_name: str = '', - operator_name: str = '', - absolute: int = 0, - private_zone: bytes = b'', - client_zone: bytes = b'' - ): - """Build the _sur_file_content list necessary to write a signal dictionary to - a ``.sur`` or ``.pro`` file. The signal dictionary's inner consistency is the + self._work_dict[key]["b_pack_fn"](f, self._work_dict[key]["value"]) + + def _build_sur_file_contents( + self, + set_comments: str = "auto", + is_special: bool = False, + compressed: bool = True, + comments: dict = {}, + object_name: str = "", + operator_name: str = "", + absolute: int = 0, + private_zone: bytes = b"", + client_zone: bytes = b"", + ): + """Build the _sur_file_content list necessary to write a signal dictionary to + a ``.sur`` or ``.pro`` file. The signal dictionary's inner consistency is the responsibility of hyperspy, and the this function's responsibility is to make a consistent list of _sur_file_content.""" self._list_sur_file_content = [] - #Compute number of navigation / signal axes - self._n_ax_nav, self._n_ax_sig = DigitalSurfHandler._get_n_axes(self.signal_dict) + # Compute number of navigation / signal axes + self._n_ax_nav, self._n_ax_sig = DigitalSurfHandler._get_n_axes( + self.signal_dict + ) # Choose object type based on number of navigation and signal axes # Populate self._Object_type @@ -507,40 +516,42 @@ def _build_sur_file_contents(self, # Populate self.data_split and self.objtype_split (always) self._split_signal_dict() - #Raise error if wrong extension + # Raise error if wrong extension # self._validate_filename() - #Get a dictionary to be saved in the comment fielt of exported file - comment_dict = self._get_comment_dict(self.signal_dict['original_metadata'], - method=set_comments, - custom=comments) - #Convert the dictionary to a string of suitable format. - comment_str = self._stringify_dict(comment_dict) + # Get a dictionary to be saved in the comment fielt of exported file + comment_dict = self._get_comment_dict( + self.signal_dict["original_metadata"], method=set_comments, custom=comments + ) + # Convert the dictionary to a string of suitable format. + comment_str = self._stringify_dict(comment_dict) # A _work_dict is created for each of the data arrays and object # that have splitted from the main object. In most cases, only a # single object is present in the split. - for data,objtype in zip(self.data_split,self.objtype_split): - self._build_workdict(data, - objtype, - self.signal_dict['metadata'], - comment=comment_str, - is_special=is_special, - compressed=compressed, - object_name=object_name, - operator_name=operator_name, - absolute=absolute, - private_zone=private_zone, - client_zone=client_zone) - # if the objects are multiple, comment is erased after the first + for data, objtype in zip(self.data_split, self.objtype_split): + self._build_workdict( + data, + objtype, + self.signal_dict["metadata"], + comment=comment_str, + is_special=is_special, + compressed=compressed, + object_name=object_name, + operator_name=operator_name, + absolute=absolute, + private_zone=private_zone, + client_zone=client_zone, + ) + # if the objects are multiple, comment is erased after the first # object. This is not mandatory, but makes marginally smaller files. if comment_str: - comment_str = '' + comment_str = "" # Finally we push it all to the content list. self._append_work_dict_to_content() - - #Signal dictionary analysis methods + + # Signal dictionary analysis methods @staticmethod def _get_n_axes(sig_dict: dict): """Return number of navigation and signal axes in the signal dict (in that order). @@ -554,214 +565,244 @@ def _get_n_axes(sig_dict: dict): """ nax_nav = 0 nax_sig = 0 - for ax in sig_dict['axes']: - if ax['navigate']: + for ax in sig_dict["axes"]: + if ax["navigate"]: nax_nav += 1 else: nax_sig += 1 return nax_nav, nax_sig - + def _is_spectrum(self) -> bool: """Determine if a signal is a spectrum type based on axes naming for export of sur_files. Could be cross-checked with other criteria such as hyperspy subclass etc... For now we keep it simple. If it has - an ax named like a spectral axis, then probably its a spectrum. """ + an ax named like a spectral axis, then probably its a spectrum.""" - spectrumlike_axnames = ['Wavelength', 'Energy', 'Energy Loss', 'E'] + spectrumlike_axnames = ["Wavelength", "Energy", "Energy Loss", "E"] is_spec = False - for ax in self.signal_dict['axes']: - if ax['name'] in spectrumlike_axnames: + for ax in self.signal_dict["axes"]: + if ax["name"] in spectrumlike_axnames: is_spec = True return is_spec def _is_binary(self) -> bool: - return self.signal_dict['data'].dtype == bool + return self.signal_dict["data"].dtype == bool - #Splitting /subclassing methods + # Splitting /subclassing methods def _split_signal_dict(self): - """Select the suitable _mountains_object_types """ - + """Select the suitable _mountains_object_types""" + n_nav = self._n_ax_nav n_sig = self._n_ax_sig - #Here, I manually unfold the nested conditions for legibility. - #Since there are a fixed number of dimensions supported by - # digitalsurf .sur/.pro files, I think this is the best way to + # Here, I manually unfold the nested conditions for legibility. + # Since there are a fixed number of dimensions supported by + # digitalsurf .sur/.pro files, I think this is the best way to # proceed. - if (n_nav,n_sig) == (0,1): + if (n_nav, n_sig) == (0, 1): if self._is_spectrum(): self._split_spectrum() else: self._split_profile() - elif (n_nav,n_sig) == (0,2): + elif (n_nav, n_sig) == (0, 2): if self._is_binary(): self._split_binary_img() - elif is_rgb(self.signal_dict['data']): #"_RGBIMAGE" + elif is_rgb(self.signal_dict["data"]): # "_RGBIMAGE" self._split_rgb() - elif is_rgba(self.signal_dict['data']): - warnings.warn(f"A channel discarded upon saving \ - RGBA signal in .sur format") + elif is_rgba(self.signal_dict["data"]): + warnings.warn( + f"A channel discarded upon saving \ + RGBA signal in .sur format" + ) self._split_rgb() - else: # _INTENSITYSURFACE + else: # _INTENSITYSURFACE self._split_surface() - elif (n_nav,n_sig) == (1,0): - warnings.warn(f"Exporting surface signal dimension {n_sig} and navigation dimension \ + elif (n_nav, n_sig) == (1, 0): + warnings.warn( + f"Exporting surface signal dimension {n_sig} and navigation dimension \ {n_nav} falls back on profile type but is not good practice. Consider \ - transposing before saving to avoid unexpected behaviour.") + transposing before saving to avoid unexpected behaviour." + ) self._split_profile() - elif (n_nav,n_sig) == (1,1): + elif (n_nav, n_sig) == (1, 1): if self._is_spectrum(): self._split_spectrum() else: self._split_profileserie() - elif (n_nav,n_sig) == (1,2): - if is_rgb(self.signal_dict['data']): + elif (n_nav, n_sig) == (1, 2): + if is_rgb(self.signal_dict["data"]): self._split_rgbserie() - elif is_rgba(self.signal_dict['data']): - warnings.warn(f"Alpha channel discarded upon saving RGBA signal in .sur format") + elif is_rgba(self.signal_dict["data"]): + warnings.warn( + f"Alpha channel discarded upon saving RGBA signal in .sur format" + ) self._split_rgbserie() else: self._split_surfaceserie() - elif (n_nav,n_sig) == (2,0): - warnings.warn(f"Signal dimension {n_sig} and navigation dimension {n_nav} exported as surface type. Consider transposing signal object before exporting if this is intentional.") + elif (n_nav, n_sig) == (2, 0): + warnings.warn( + f"Signal dimension {n_sig} and navigation dimension {n_nav} exported as surface type. Consider transposing signal object before exporting if this is intentional." + ) if self._is_binary(): self._split_binary_img() - elif is_rgb(self.signal_dict['data']): #"_RGBIMAGE" + elif is_rgb(self.signal_dict["data"]): # "_RGBIMAGE" self._split_rgb() - elif is_rgba(self.signal_dict['data']): - warnings.warn(f"A channel discarded upon saving \ - RGBA signal in .sur format") + elif is_rgba(self.signal_dict["data"]): + warnings.warn( + f"A channel discarded upon saving \ + RGBA signal in .sur format" + ) self._split_rgb() else: self._split_surface() - elif (n_nav,n_sig) == (2,1): + elif (n_nav, n_sig) == (2, 1): self._split_hyperspectral() else: - raise MountainsMapFileError(msg=f"Object with signal dimension {n_sig} and navigation dimension {n_nav} not supported for .sur export") + raise MountainsMapFileError( + msg=f"Object with signal dimension {n_sig} and navigation dimension {n_nav} not supported for .sur export" + ) - def _split_spectrum(self,): + def _split_spectrum( + self, + ): """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" - #When splitting spectrum, no series axis (T/W), - #X axis is the spectral dimension and Y the series dimension (if series). + # When splitting spectrum, no series axis (T/W), + # X axis is the spectral dimension and Y the series dimension (if series). obj_type = 20 self._Object_type = self._mountains_object_types[obj_type] nax_nav = self._n_ax_nav nax_sig = self._n_ax_sig - if (nax_nav,nax_sig)==(0,1) or (nax_nav,nax_sig)==(1,0): - self.Xaxis = self.signal_dict['axes'][0] - elif (nax_nav,nax_sig)==(1,1): - self.Xaxis = next(ax for ax in self.signal_dict['axes'] if not ax['navigate']) - self.Yaxis = next(ax for ax in self.signal_dict['axes'] if ax['navigate']) + if (nax_nav, nax_sig) == (0, 1) or (nax_nav, nax_sig) == (1, 0): + self.Xaxis = self.signal_dict["axes"][0] + elif (nax_nav, nax_sig) == (1, 1): + self.Xaxis = next( + ax for ax in self.signal_dict["axes"] if not ax["navigate"] + ) + self.Yaxis = next(ax for ax in self.signal_dict["axes"] if ax["navigate"]) else: - raise MountainsMapFileError(f"Dimensions ({nax_nav})|{nax_sig}) invalid for export as spectrum type") - - self.data_split = [self.signal_dict['data']] + raise MountainsMapFileError( + f"Dimensions ({nax_nav})|{nax_sig}) invalid for export as spectrum type" + ) + + self.data_split = [self.signal_dict["data"]] self.objtype_split = [obj_type] self._N_data_objects = 1 self._N_data_channels = 1 - - def _split_profile(self,): + + def _split_profile( + self, + ): """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" - + obj_type = 1 self._Object_type = self._mountains_object_types[obj_type] - self.Xaxis = self.signal_dict['axes'][0] - self.data_split = [self.signal_dict['data']] + self.Xaxis = self.signal_dict["axes"][0] + self.data_split = [self.signal_dict["data"]] self.objtype_split = [obj_type] self._N_data_objects = 1 self._N_data_channels = 1 - def _split_profileserie(self,): + def _split_profileserie( + self, + ): """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" obj_type = 4 # '_PROFILESERIE' self._Object_type = self._mountains_object_types[obj_type] - self.Xaxis = next(ax for ax in self.signal_dict['axes'] if not ax['navigate']) - self.Taxis = next(ax for ax in self.signal_dict['axes'] if ax['navigate']) - + self.Xaxis = next(ax for ax in self.signal_dict["axes"] if not ax["navigate"]) + self.Taxis = next(ax for ax in self.signal_dict["axes"] if ax["navigate"]) + self.data_split = self._split_data_alongaxis(self.Taxis) - self.objtype_split = [obj_type] + [1]*(len(self.data_split)-1) + self.objtype_split = [obj_type] + [1] * (len(self.data_split) - 1) self._N_data_objects = len(self.objtype_split) self._N_data_channels = 1 - def _split_binary_img(self,): + def _split_binary_img( + self, + ): """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" obj_type = 3 self._Object_type = self._mountains_object_types[obj_type] - self.Xaxis = self.signal_dict['axes'][1] - self.Yaxis = self.signal_dict['axes'][0] + self.Xaxis = self.signal_dict["axes"][1] + self.Yaxis = self.signal_dict["axes"][0] - self.data_split = [self.signal_dict['data']] + self.data_split = [self.signal_dict["data"]] self.objtype_split = [obj_type] self._N_data_objects = 1 self._N_data_channels = 1 - def _split_rgb(self,): + def _split_rgb( + self, + ): """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" obj_type = 12 self._Object_type = self._mountains_object_types[obj_type] - self.Xaxis = self.signal_dict['axes'][1] - self.Yaxis = self.signal_dict['axes'][0] - self.data_split = [np.int32(self.signal_dict['data']['R']), - np.int32(self.signal_dict['data']['G']), - np.int32(self.signal_dict['data']['B']) - ] - self.objtype_split = [obj_type] + [10,10] + self.Xaxis = self.signal_dict["axes"][1] + self.Yaxis = self.signal_dict["axes"][0] + self.data_split = [ + np.int32(self.signal_dict["data"]["R"]), + np.int32(self.signal_dict["data"]["G"]), + np.int32(self.signal_dict["data"]["B"]), + ] + self.objtype_split = [obj_type] + [10, 10] self._N_data_objects = 1 self._N_data_channels = 3 - def _split_surface(self,): + def _split_surface( + self, + ): """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" obj_type = 2 self._Object_type = self._mountains_object_types[obj_type] - self.Xaxis = self.signal_dict['axes'][1] - self.Yaxis = self.signal_dict['axes'][0] - self.data_split = [self.signal_dict['data']] + self.Xaxis = self.signal_dict["axes"][1] + self.Yaxis = self.signal_dict["axes"][0] + self.data_split = [self.signal_dict["data"]] self.objtype_split = [obj_type] self._N_data_objects = 1 self._N_data_channels = 1 def _split_rgbserie(self): """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" - obj_type = 18 #"_SERIESOFRGBIMAGE" + obj_type = 18 # "_SERIESOFRGBIMAGE" self._Object_type = self._mountains_object_types[obj_type] - sigaxes_iter = iter(ax for ax in self.signal_dict['axes'] if not ax['navigate']) + sigaxes_iter = iter(ax for ax in self.signal_dict["axes"] if not ax["navigate"]) self.Yaxis = next(sigaxes_iter) self.Xaxis = next(sigaxes_iter) - self.Taxis = next(ax for ax in self.signal_dict['axes'] if ax['navigate']) + self.Taxis = next(ax for ax in self.signal_dict["axes"] if ax["navigate"]) tmp_data_split = self._split_data_alongaxis(self.Taxis) # self.data_split = [] self.objtype_split = [] for d in tmp_data_split: - self.data_split += [d['R'].astype(np.int16).copy(), - d['G'].astype(np.int16).copy(), - d['B'].astype(np.int16).copy(), - ] + self.data_split += [ + d["R"].astype(np.int16).copy(), + d["G"].astype(np.int16).copy(), + d["B"].astype(np.int16).copy(), + ] # self.objtype_split += [12,10,10] - self.objtype_split = [12,10,10]*self.Taxis['size'] + self.objtype_split = [12, 10, 10] * self.Taxis["size"] self.objtype_split[0] = obj_type # self.data_split = rgbx2regular_array(self.signal_dict['data']) - self._N_data_objects = self.Taxis['size'] + self._N_data_objects = self.Taxis["size"] self._N_data_channels = 3 def _split_surfaceserie(self): """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" obj_type = 5 self._Object_type = self._mountains_object_types[obj_type] - sigaxes_iter = iter(ax for ax in self.signal_dict['axes'] if not ax['navigate']) + sigaxes_iter = iter(ax for ax in self.signal_dict["axes"] if not ax["navigate"]) self.Yaxis = next(sigaxes_iter) self.Xaxis = next(sigaxes_iter) - self.Taxis = next(ax for ax in self.signal_dict['axes'] if ax['navigate']) + self.Taxis = next(ax for ax in self.signal_dict["axes"] if ax["navigate"]) self.data_split = self._split_data_alongaxis(self.Taxis) - self.objtype_split = [2]*len(self.data_split) + self.objtype_split = [2] * len(self.data_split) self.objtype_split[0] = obj_type self._N_data_objects = len(self.data_split) self._N_data_channels = 1 @@ -770,22 +811,22 @@ def _split_hyperspectral(self): """Set _Object_type, axes except Z, data_split, objtype_split _N_data_objects, _N_data_channels""" obj_type = 21 self._Object_type = self._mountains_object_types[obj_type] - sigaxes_iter = iter(ax for ax in self.signal_dict['axes'] if ax['navigate']) + sigaxes_iter = iter(ax for ax in self.signal_dict["axes"] if ax["navigate"]) self.Yaxis = next(sigaxes_iter) self.Xaxis = next(sigaxes_iter) - self.Taxis = next(ax for ax in self.signal_dict['axes'] if not ax['navigate']) - self.data_split = [self.signal_dict['data']] + self.Taxis = next(ax for ax in self.signal_dict["axes"] if not ax["navigate"]) + self.data_split = [self.signal_dict["data"]] self.objtype_split = [obj_type] self._N_data_objects = 1 self._N_data_channels = 1 def _split_data_alongaxis(self, axis: dict): - """Split the data in a series of lower-dim datasets that can be exported to + """Split the data in a series of lower-dim datasets that can be exported to a surface / profile file""" - idx = self.signal_dict['axes'].index(axis) + idx = self.signal_dict["axes"].index(axis) # return idx datasplit = [] - for dslice in np.rollaxis(self.signal_dict['data'],idx): + for dslice in np.rollaxis(self.signal_dict["data"], idx): datasplit.append(dslice) return datasplit @@ -805,57 +846,63 @@ def _norm_data(self, data: np.ndarray, is_special: bool): tuple[int,int,int,float,float,np.ndarray[int]]: pointsize, Zmin, Zmax, Zscale, Zoffset, data_int """ data_type = data.dtype - - if np.issubdtype(data_type,np.complexfloating): - raise MountainsMapFileError(f"digitalsurf file formats do not support export of complex data. Convert data to real-value representations before before export") - elif data_type==bool: + + if np.issubdtype(data_type, np.complexfloating): + raise MountainsMapFileError( + f"digitalsurf file formats do not support export of complex data. Convert data to real-value representations before before export" + ) + elif data_type == bool: pointsize = 16 Zmin = 0 Zmax = 1 Zscale = 1 Zoffset = 0 data_int = data.astype(np.int16) - elif data_type==np.uint8: + elif data_type == np.uint8: warnings.warn("np.uint8 datatype exported as np.int16.") pointsize = 16 Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, is_special) data_int = data.astype(np.int16) - elif data_type==np.uint16: + elif data_type == np.uint16: warnings.warn("np.uint16 datatype exported as np.int32") - pointsize = 32 #Pointsize has to be 16 or 32 in surf format + pointsize = 32 # Pointsize has to be 16 or 32 in surf format Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, is_special) data_int = data.astype(np.int32) - elif np.issubdtype(data_type,np.unsignedinteger): - raise MountainsMapFileError(f"digitalsurf file formats do not support unsigned int >16bits. Convert data to signed integers before export.") - elif data_type==np.int8: - pointsize = 16 #Pointsize has to be 16 or 32 in surf format + elif np.issubdtype(data_type, np.unsignedinteger): + raise MountainsMapFileError( + f"digitalsurf file formats do not support unsigned int >16bits. Convert data to signed integers before export." + ) + elif data_type == np.int8: + pointsize = 16 # Pointsize has to be 16 or 32 in surf format Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, is_special) data_int = data.astype(np.int16) - elif data_type==np.int16: + elif data_type == np.int16: pointsize = 16 Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, is_special) data_int = data - elif data_type==np.int32: + elif data_type == np.int32: pointsize = 32 data_int = data Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, is_special) - elif np.issubdtype(data_type,np.integer): - raise MountainsMapFileError(f"digitalsurf file formats do not support export integers larger than 32 bits. Convert data to 32-bit representation before exporting") - elif np.issubdtype(data_type,np.floating): + elif np.issubdtype(data_type, np.integer): + raise MountainsMapFileError( + f"digitalsurf file formats do not support export integers larger than 32 bits. Convert data to 32-bit representation before exporting" + ) + elif np.issubdtype(data_type, np.floating): pointsize = 32 Zmin, Zmax, Zscale, Zoffset, data_int = self._norm_float(data, is_special) return pointsize, Zmin, Zmax, Zscale, Zoffset, data_int - def _norm_signed_int(self, data:np.ndarray, is_special: bool = False): - """Normalized data of integer type. No normalization per se, but the Zmin and Zmax + def _norm_signed_int(self, data: np.ndarray, is_special: bool = False): + """Normalized data of integer type. No normalization per se, but the Zmin and Zmax threshold are set if saturation flagging is asked.""" # There are no NaN values for integers. Special points means saturation of integer scale. data_int_min = np.iinfo(data.dtype).min data_int_max = np.iinfo(data.dtype).max - is_satlo = (data==data_int_min).sum() >= 1 and is_special - is_sathi = (data==data_int_max).sum() >= 1 and is_special + is_satlo = (data == data_int_min).sum() >= 1 and is_special + is_sathi = (data == data_int_max).sum() >= 1 and is_special Zmin = data_int_min + 1 if is_satlo else data.min() Zmax = data_int_max - 1 if is_sathi else data.max() @@ -864,24 +911,28 @@ def _norm_signed_int(self, data:np.ndarray, is_special: bool = False): return Zmin, Zmax, Zscale, Zoffset - def _norm_float(self, data : np.ndarray, is_special: bool = False,): + def _norm_float( + self, + data: np.ndarray, + is_special: bool = False, + ): """Normalize float data on a 32 bits int scale. Inherently lossy - but that's how things are with mountainsmap files. """ + but that's how things are with mountainsmap files.""" - Zoffset_f = np.nanmin(data) - Zmax_f = np.nanmax(data) - is_nan = np.any(np.isnan(data)) + Zoffset_f = np.nanmin(data) + Zmax_f = np.nanmax(data) + is_nan = np.any(np.isnan(data)) if is_special and is_nan: - Zmin = - 2**(32-1) + 2 - Zmax = 2**32 + Zmin - 3 + Zmin = -(2 ** (32 - 1)) + 2 + Zmax = 2**32 + Zmin - 3 else: - Zmin = - 2**(32-1) - Zmax = 2**32 + Zmin - 1 - - Zscale = (Zmax_f - Zoffset_f)/(Zmax - Zmin) - data_int = (data - Zoffset_f)/Zscale + Zmin - + Zmin = -(2 ** (32 - 1)) + Zmax = 2**32 + Zmin - 1 + + Zscale = (Zmax_f - Zoffset_f) / (Zmax - Zmin) + data_int = (data - Zoffset_f) / Zscale + Zmin + if is_special and is_nan: data_int[np.isnan(data)] = Zmin - 2 @@ -890,156 +941,187 @@ def _norm_float(self, data : np.ndarray, is_special: bool = False,): return Zmin, Zmax, Zscale, Zoffset_f, data_int def _get_Zname_Zunit(self, metadata: dict): - """Attempt reading Z-axis name and Unit from metadata.Signal.Quantity field. + """Attempt reading Z-axis name and Unit from metadata.Signal.Quantity field. Return empty str if do not exist. Returns: tuple[str,str]: Zname,Zunit """ - quantitystr: str = metadata.get('Signal',{}).get('quantity','') + quantitystr: str = metadata.get("Signal", {}).get("quantity", "") quantitystr = quantitystr.strip() - quantity = quantitystr.split(' ') - if len(quantity)>1: + quantity = quantitystr.split(" ") + if len(quantity) > 1: Zunit = quantity.pop() - Zunit = Zunit.strip('()') - Zname = ' '.join(quantity) - elif len(quantity)==1: + Zunit = Zunit.strip("()") + Zname = " ".join(quantity) + elif len(quantity) == 1: Zname = quantity.pop() - Zunit = '' + Zunit = "" else: - Zname = '' - Zunit = '' - - return Zname,Zunit - - def _build_workdict(self, - data: np.ndarray, - obj_type: int, - metadata: dict = {}, - comment: str = "", - is_special: bool = True, - compressed: bool = True, - object_name: str = '', - operator_name: str = '', - absolute: int = 0, - private_zone: bytes = b'', - client_zone: bytes = b'' - ): - """Populate _work_dict with the """ + Zname = "" + Zunit = "" + + return Zname, Zunit + + def _build_workdict( + self, + data: np.ndarray, + obj_type: int, + metadata: dict = {}, + comment: str = "", + is_special: bool = True, + compressed: bool = True, + object_name: str = "", + operator_name: str = "", + absolute: int = 0, + private_zone: bytes = b"", + client_zone: bytes = b"", + ): + """Populate _work_dict with the""" if not compressed: - self._work_dict['_01_Signature']['value'] = 'DIGITAL SURF' # DSCOMPRESSED by default + self._work_dict["_01_Signature"][ + "value" + ] = "DIGITAL SURF" # DSCOMPRESSED by default else: - self._work_dict['_01_Signature']['value'] = 'DSCOMPRESSED' # DSCOMPRESSED by default + self._work_dict["_01_Signature"][ + "value" + ] = "DSCOMPRESSED" # DSCOMPRESSED by default # self._work_dict['_02_Format']['value'] = 0 # Dft. other possible value is 257 for MacintoshII computers with Motorola CPUs. Obv not supported... - self._work_dict['_03_Number_of_Objects']['value'] = self._N_data_objects + self._work_dict["_03_Number_of_Objects"]["value"] = self._N_data_objects # self._work_dict['_04_Version']['value'] = 1 # Version number. Always default. - self._work_dict['_05_Object_Type']['value'] = obj_type - self._work_dict['_06_Object_Name']['value'] = object_name #Obsolete, DOS-version only (Not supported) - self._work_dict['_07_Operator_Name']['value'] = operator_name #Should be settable from kwargs - self._work_dict['_08_P_Size']['value'] = self._N_data_channels - - self._work_dict['_09_Acquisition_Type']['value'] = 0 # AFM data only, could be inferred - self._work_dict['_10_Range_Type']['value'] = 0 #Only 1 for high-range (z-stage scanning), AFM data only, could be inferred - - self._work_dict['_11_Special_Points']['value'] = int(is_special) - - self._work_dict['_12_Absolute']['value'] = absolute #Probably irrelevant in most cases. Absolute vs rel heights (for profilometers), can be inferred - self._work_dict['_13_Gauge_Resolution']['value'] = 0.0 #Probably irrelevant. Only for profilometers (maybe AFM), can be inferred + self._work_dict["_05_Object_Type"]["value"] = obj_type + self._work_dict["_06_Object_Name"][ + "value" + ] = object_name # Obsolete, DOS-version only (Not supported) + self._work_dict["_07_Operator_Name"][ + "value" + ] = operator_name # Should be settable from kwargs + self._work_dict["_08_P_Size"]["value"] = self._N_data_channels + + self._work_dict["_09_Acquisition_Type"][ + "value" + ] = 0 # AFM data only, could be inferred + self._work_dict["_10_Range_Type"][ + "value" + ] = 0 # Only 1 for high-range (z-stage scanning), AFM data only, could be inferred + + self._work_dict["_11_Special_Points"]["value"] = int(is_special) + + self._work_dict["_12_Absolute"][ + "value" + ] = absolute # Probably irrelevant in most cases. Absolute vs rel heights (for profilometers), can be inferred + self._work_dict["_13_Gauge_Resolution"][ + "value" + ] = 0.0 # Probably irrelevant. Only for profilometers (maybe AFM), can be inferred # T-axis acts as W-axis for spectrum / hyperspectrum surfaces. if obj_type in [21]: - ws = self.Taxis.get('size',0) + ws = self.Taxis.get("size", 0) else: ws = 0 - self._work_dict['_14_W_Size']['value'] = ws + self._work_dict["_14_W_Size"]["value"] = ws - bsize, Zmin, Zmax, Zscale, Zoffset, data_int = self._norm_data(data,is_special) + bsize, Zmin, Zmax, Zscale, Zoffset, data_int = self._norm_data(data, is_special) Zname, Zunit = self._get_Zname_Zunit(metadata) - #Axes element set regardless of object size - self._work_dict['_15_Size_of_Points']['value'] = bsize - self._work_dict['_16_Zmin']['value'] = Zmin - self._work_dict['_17_Zmax']['value'] = Zmax - self._work_dict['_18_Number_of_Points']['value']= self.Xaxis.get('size',1) - self._work_dict['_19_Number_of_Lines']['value'] = self.Yaxis.get('size',1) - #This needs to be this way due to the way we export our hyp maps - self._work_dict['_20_Total_Nb_of_Pts']['value'] = self.Xaxis.get('size',1)*self.Yaxis.get('size',1) - - self._work_dict['_21_X_Spacing']['value'] = self.Xaxis.get('scale',0.0) - self._work_dict['_22_Y_Spacing']['value'] = self.Yaxis.get('scale',0.0) - self._work_dict['_23_Z_Spacing']['value'] = Zscale - self._work_dict['_24_Name_of_X_Axis']['value'] = self.Xaxis.get('name','') - self._work_dict['_25_Name_of_Y_Axis']['value'] = self.Yaxis.get('name','') - self._work_dict['_26_Name_of_Z_Axis']['value'] = Zname - self._work_dict['_27_X_Step_Unit']['value'] = self.Xaxis.get('units','') - self._work_dict['_28_Y_Step_Unit']['value'] = self.Yaxis.get('units','') - self._work_dict['_29_Z_Step_Unit']['value'] = Zunit - self._work_dict['_30_X_Length_Unit']['value'] = self.Xaxis.get('units','') - self._work_dict['_31_Y_Length_Unit']['value'] = self.Yaxis.get('units','') - self._work_dict['_32_Z_Length_Unit']['value'] = Zunit - self._work_dict['_33_X_Unit_Ratio']['value'] = 1 - self._work_dict['_34_Y_Unit_Ratio']['value'] = 1 - self._work_dict['_35_Z_Unit_Ratio']['value'] = 1 - + # Axes element set regardless of object size + self._work_dict["_15_Size_of_Points"]["value"] = bsize + self._work_dict["_16_Zmin"]["value"] = Zmin + self._work_dict["_17_Zmax"]["value"] = Zmax + self._work_dict["_18_Number_of_Points"]["value"] = self.Xaxis.get("size", 1) + self._work_dict["_19_Number_of_Lines"]["value"] = self.Yaxis.get("size", 1) + # This needs to be this way due to the way we export our hyp maps + self._work_dict["_20_Total_Nb_of_Pts"]["value"] = self.Xaxis.get( + "size", 1 + ) * self.Yaxis.get("size", 1) + + self._work_dict["_21_X_Spacing"]["value"] = self.Xaxis.get("scale", 0.0) + self._work_dict["_22_Y_Spacing"]["value"] = self.Yaxis.get("scale", 0.0) + self._work_dict["_23_Z_Spacing"]["value"] = Zscale + self._work_dict["_24_Name_of_X_Axis"]["value"] = self.Xaxis.get("name", "") + self._work_dict["_25_Name_of_Y_Axis"]["value"] = self.Yaxis.get("name", "") + self._work_dict["_26_Name_of_Z_Axis"]["value"] = Zname + self._work_dict["_27_X_Step_Unit"]["value"] = self.Xaxis.get("units", "") + self._work_dict["_28_Y_Step_Unit"]["value"] = self.Yaxis.get("units", "") + self._work_dict["_29_Z_Step_Unit"]["value"] = Zunit + self._work_dict["_30_X_Length_Unit"]["value"] = self.Xaxis.get("units", "") + self._work_dict["_31_Y_Length_Unit"]["value"] = self.Yaxis.get("units", "") + self._work_dict["_32_Z_Length_Unit"]["value"] = Zunit + self._work_dict["_33_X_Unit_Ratio"]["value"] = 1 + self._work_dict["_34_Y_Unit_Ratio"]["value"] = 1 + self._work_dict["_35_Z_Unit_Ratio"]["value"] = 1 + # _36_Imprint -> Obsolete # _37_Inverted -> Always No # _38_Levelled -> Always No # _39_Obsolete -> Obsolete - - dt: datetime.datetime = get_date_time_from_metadata(metadata,formatting='datetime') + + dt: datetime.datetime = get_date_time_from_metadata( + metadata, formatting="datetime" + ) if dt is not None: - self._work_dict['_40_Seconds']['value'] = dt.second - self._work_dict['_41_Minutes']['value'] = dt.minute - self._work_dict['_42_Hours']['value'] = dt.hour - self._work_dict['_43_Day']['value'] = dt.day - self._work_dict['_44_Month']['value'] = dt.month - self._work_dict['_45_Year']['value'] = dt.year - self._work_dict['_46_Day_of_week']['value'] = dt.weekday() + self._work_dict["_40_Seconds"]["value"] = dt.second + self._work_dict["_41_Minutes"]["value"] = dt.minute + self._work_dict["_42_Hours"]["value"] = dt.hour + self._work_dict["_43_Day"]["value"] = dt.day + self._work_dict["_44_Month"]["value"] = dt.month + self._work_dict["_45_Year"]["value"] = dt.year + self._work_dict["_46_Day_of_week"]["value"] = dt.weekday() # _47_Measurement_duration -> Nonsaved and non-metadata, but float in seconds - + if compressed: - data_bin = self._compress_data(data_int,nstreams=1) #nstreams hard-set to 1. Could be unlocked in the future + data_bin = self._compress_data( + data_int, nstreams=1 + ) # nstreams hard-set to 1. Could be unlocked in the future compressed_size = len(data_bin) else: - fmt = " 2**15: - warnings.warn(f"Comment exceeding max length of 32.0 kB and will be cropped") + warnings.warn( + f"Comment exceeding max length of 32.0 kB and will be cropped" + ) comment_len = np.int16(2**15) - self._work_dict['_50_Comment_size']['value'] = comment_len - + self._work_dict["_50_Comment_size"]["value"] = comment_len + privatesize = len(private_zone) if privatesize > 2**15: - warnings.warn(f"Private size exceeding max length of 32.0 kB and will be cropped") + warnings.warn( + f"Private size exceeding max length of 32.0 kB and will be cropped" + ) privatesize = np.int16(2**15) - - self._work_dict['_51_Private_size']['value'] = privatesize - - self._work_dict['_52_Client_zone']['value'] = client_zone - self._work_dict['_53_X_Offset']['value'] = self.Xaxis.get('offset',0.0) - self._work_dict['_54_Y_Offset']['value'] = self.Yaxis.get('offset',0.0) - self._work_dict['_55_Z_Offset']['value'] = Zoffset - self._work_dict['_56_T_Spacing']['value'] = self.Taxis.get('scale',0.0) - self._work_dict['_57_T_Offset']['value'] = self.Taxis.get('offset',0.0) - self._work_dict['_58_T_Axis_Name']['value'] = self.Taxis.get('name','') - self._work_dict['_59_T_Step_Unit']['value'] = self.Taxis.get('units','') + self._work_dict["_51_Private_size"]["value"] = privatesize + + self._work_dict["_52_Client_zone"]["value"] = client_zone + + self._work_dict["_53_X_Offset"]["value"] = self.Xaxis.get("offset", 0.0) + self._work_dict["_54_Y_Offset"]["value"] = self.Yaxis.get("offset", 0.0) + self._work_dict["_55_Z_Offset"]["value"] = Zoffset + self._work_dict["_56_T_Spacing"]["value"] = self.Taxis.get("scale", 0.0) + self._work_dict["_57_T_Offset"]["value"] = self.Taxis.get("offset", 0.0) + self._work_dict["_58_T_Axis_Name"]["value"] = self.Taxis.get("name", "") + self._work_dict["_59_T_Step_Unit"]["value"] = self.Taxis.get("units", "") - self._work_dict['_60_Comment']['value'] = comment + self._work_dict["_60_Comment"]["value"] = comment - self._work_dict['_61_Private_zone']['value'] = private_zone - self._work_dict['_62_points']['value'] = data_bin + self._work_dict["_61_Private_zone"]["value"] = private_zone + self._work_dict["_62_points"]["value"] = data_bin # Read methods def _read_sur_file(self): @@ -1054,7 +1136,9 @@ def _read_sur_file(self): # We append the first object to the content list self._append_work_dict_to_content() # Lookup how many objects are stored in the file and save - self._N_data_objects = self._get_work_dict_key_value("_03_Number_of_Objects") + self._N_data_objects = self._get_work_dict_key_value( + "_03_Number_of_Objects" + ) self._N_data_channels = self._get_work_dict_key_value("_08_P_Size") # Determine how many objects we need to read @@ -1091,9 +1175,9 @@ def _append_work_dict_to_content(self): datadict = deepcopy({key: val["value"] for key, val in self._work_dict.items()}) self._list_sur_file_content.append(datadict) - def _move_values_to_workdict(self,dic:dict): + def _move_values_to_workdict(self, dic: dict): for key in self._work_dict: - self._work_dict[key]['value'] = deepcopy(dic[key]) + self._work_dict[key]["value"] = deepcopy(dic[key]) def _get_work_dict_key_value(self, key): return self._work_dict[key]["value"] @@ -1114,7 +1198,7 @@ def _build_sur_dict(self): elif self._Object_type in ["_BINARYIMAGE"]: self._build_surface() self.signal_dict.update({"post_process": [self.post_process_binary]}) - elif self._Object_type in ["_SURFACE","_INTENSITYIMAGE"]: + elif self._Object_type in ["_SURFACE", "_INTENSITYIMAGE"]: self._build_surface() elif self._Object_type in ["_SURFACESERIE"]: self._build_surface_series() @@ -1126,12 +1210,12 @@ def _build_sur_dict(self): self._build_RGB_image() elif self._Object_type in ["_RGBINTENSITYSURFACE"]: self._build_RGB_surface() - elif self._Object_type in ['_SERIESOFRGBIMAGES']: + elif self._Object_type in ["_SERIESOFRGBIMAGES"]: self._build_RGB_image_series() else: raise MountainsMapFileError( f"{self._Object_type} is not a supported mountain object." - ) + ) return self.signal_dict @@ -1305,7 +1389,9 @@ def _build_1D_series( self.signal_dict["data"] = np.stack(data) - def _build_surface(self,): + def _build_surface( + self, + ): """Build a surface""" # Check that the object contained only one object. @@ -1326,7 +1412,9 @@ def _build_surface(self,): self._set_metadata_and_original_metadata(hypdic) - def _build_surface_series(self,): + def _build_surface_series( + self, + ): """Build a series of surfaces. The T axis is navigation and set from the first object""" @@ -1385,7 +1473,9 @@ def _build_RGB_surface( # Pushing data into the dictionary self.signal_dict["data"] = np.stack(data) - def _build_RGB_image(self,): + def _build_RGB_image( + self, + ): """Build an RGB image. The T axis is navigation and set from P Size""" @@ -1416,8 +1506,10 @@ def _build_RGB_image(self,): self.signal_dict.update({"post_process": [self.post_process_RGB]}) - def _build_RGB_image_series(self,): - + def _build_RGB_image_series( + self, + ): + # First object dictionary hypdic = self._list_sur_file_content[0] @@ -1438,17 +1530,17 @@ def _build_RGB_image_series(self,): nimg = hypdic["_03_Number_of_Objects"] nchan = hypdic["_08_P_Size"] # We put all the data together - data = np.empty(shape=(nimg,*shape,nchan)) + data = np.empty(shape=(nimg, *shape, nchan)) i = 0 for imgidx in range(nimg): for chanidx in range(nchan): obj = self._list_sur_file_content[i] - data[imgidx,...,chanidx] = obj["_62_points"].reshape(shape) - i+=1 + data[imgidx, ..., chanidx] = obj["_62_points"].reshape(shape) + i += 1 # for obj in self._list_sur_file_content: # data.append(obj["_62_points"].reshape(shape)) - + # data = np.stack(data) # data = data.reshape(nimg,nchan,*shape) @@ -1540,14 +1632,16 @@ def _build_generic_metadata(self, unpacked_dict): return metadict - def _build_original_metadata(self,): + def _build_original_metadata( + self, + ): """Builds a metadata dictionary from the header""" original_metadata_dict = {} # Iteration over Number of data objects for i in range(self._N_data_objects): # Iteration over the Number of Data channels - for j in range(max(self._N_data_channels,1)): + for j in range(max(self._N_data_channels, 1)): # Creating a dictionary key for each object k = (i + 1) * (j + 1) key = "Object_{:d}_Channel_{:d}".format(i, j) @@ -1575,7 +1669,9 @@ def _build_original_metadata(self,): return original_metadata_dict - def _build_signal_specific_metadata(self,) -> dict: + def _build_signal_specific_metadata( + self, + ) -> dict: """Build additional metadata specific to signal type. return a dictionary for update in the metadata.""" if self.signal_dict["metadata"]["Signal"]["signal_type"] == "CL": @@ -1798,7 +1894,7 @@ def _MS_parse(str_ms, prefix, delimiter): dict_ms[key_main] = {} elif line.startswith(prefix): if keymain is None: - keymain = 'UNTITLED' + keymain = "UNTITLED" dict_ms[key_main] = {} key, *li_value = line.split(delimiter) # Key is also stripped from beginning or end whitespace @@ -1809,7 +1905,9 @@ def _MS_parse(str_ms, prefix, delimiter): li_value = str_value.split(" ") try: if key == "Grating": - dict_ms[key_main][key] = li_value[0] # we don't want to eval this one + dict_ms[key_main][key] = li_value[ + 0 + ] # we don't want to eval this one else: dict_ms[key_main][key] = ast.literal_eval(li_value[0]) except Exception: @@ -1819,20 +1917,22 @@ def _MS_parse(str_ms, prefix, delimiter): return dict_ms @staticmethod - def _get_comment_dict(original_metadata: dict, method: str = 'auto', custom: dict = {}) -> dict: + def _get_comment_dict( + original_metadata: dict, method: str = "auto", custom: dict = {} + ) -> dict: """Return the dictionary used to set the dataset comments (akA custom parameters) while exporting a file. By default (method='auto'), tries to identify if the object was originally imported by rosettasciio - from a digitalsurf .sur/.pro file with a comment field parsed as original_metadata (i.e. - Object_0_Channel_0.Parsed). In that case, digitalsurf ignores non-parsed original metadata - (ie .sur/.pro file headers). If the original metadata contains multiple objects with + from a digitalsurf .sur/.pro file with a comment field parsed as original_metadata (i.e. + Object_0_Channel_0.Parsed). In that case, digitalsurf ignores non-parsed original metadata + (ie .sur/.pro file headers). If the original metadata contains multiple objects with non-empty parsed content (Object_0_Channel_0.Parsed, Object_0_Channel_1.Parsed etc...), only - the first non-empty X.Parsed sub-dictionary is returned. This falls back on returning the + the first non-empty X.Parsed sub-dictionary is returned. This falls back on returning the raw 'original_metadata' Optionally the raw 'original_metadata' dictionary can be exported (method='raw'), a custom dictionary provided by the user (method='custom'), or no comment at all (method='off') - + Args: method (str, optional): method to export. Defaults to 'auto'. custom (dict, optional): custom dictionary. Ignored unless method is set to 'custom', Defaults to {}. @@ -1842,77 +1942,83 @@ def _get_comment_dict(original_metadata: dict, method: str = 'auto', custom: dic Returns: dict: dictionary to be exported as a .sur object - """ - if method == 'raw': + """ + if method == "raw": return original_metadata - elif method == 'custom': + elif method == "custom": return custom - elif method == 'off': + elif method == "off": return {} - elif method == 'auto': + elif method == "auto": pattern = re.compile("Object_\d*_Channel_\d*") omd = original_metadata - #filter original metadata content of dict type and matching pattern. - validfields = [omd[key] for key in omd if pattern.match(key) and isinstance(omd[key],dict)] - #In case none match, give up filtering and return raw + # filter original metadata content of dict type and matching pattern. + validfields = [ + omd[key] + for key in omd + if pattern.match(key) and isinstance(omd[key], dict) + ] + # In case none match, give up filtering and return raw if not validfields: return omd - #In case some match, return first non-empty "Parsed" sub-dict + # In case some match, return first non-empty "Parsed" sub-dict for field in validfields: - #Return none for non-existing "Parsed" key - candidate = field.get('Parsed') - #For non-none, non-empty dict-type candidate - if candidate and isinstance(candidate,dict): + # Return none for non-existing "Parsed" key + candidate = field.get("Parsed") + # For non-none, non-empty dict-type candidate + if candidate and isinstance(candidate, dict): return candidate - #dict casting for non-none but non-dict candidate + # dict casting for non-none but non-dict candidate elif candidate is not None: - return {'Parsed': candidate} - #else none candidate, or empty dict -> do nothing - #Finally, if valid fields are present but no candidate - #did a non-empty return, it is safe to return empty + return {"Parsed": candidate} + # else none candidate, or empty dict -> do nothing + # Finally, if valid fields are present but no candidate + # did a non-empty return, it is safe to return empty return {} else: - raise MountainsMapFileError(f"Non-valid method for setting mountainsmap file comment. Choose one of: 'auto','raw','custom','off' ") - + raise MountainsMapFileError( + f"Non-valid method for setting mountainsmap file comment. Choose one of: 'auto','raw','custom','off' " + ) + @staticmethod def _stringify_dict(omd: dict): """Pack nested dictionary metadata into a string. Pack dictionary-type elements into digitalsurf "Section title" metadata type ('$_ preceding section title). Pack other elements into equal-sign separated key-value pairs. - + Supports the key-units logic {'key': value, 'key_units': 'un'} used in hyperspy. """ - #Separate dict into list of keys and list of values to authorize index-based pop/insert + # Separate dict into list of keys and list of values to authorize index-based pop/insert keys_queue = list(omd.keys()) vals_queue = list(omd.values()) - #commentstring to be returned + # commentstring to be returned cmtstr: str = "" - #Loop until queues are empty + # Loop until queues are empty while keys_queue: - #pop first object + # pop first object k = keys_queue.pop(0) v = vals_queue.pop(0) - #if object is header - if isinstance(v,dict): + # if object is header + if isinstance(v, dict): cmtstr += f"$_{k}\n" keys_queue = list(v.keys()) + keys_queue vals_queue = list(v.values()) + vals_queue else: try: - ku_idx = keys_queue.index(k + '_units') + ku_idx = keys_queue.index(k + "_units") has_units = True except ValueError: ku_idx = None has_units = False - + if has_units: _ = keys_queue.pop(ku_idx) vu = vals_queue.pop(ku_idx) cmtstr += f"${k} = {v.__repr__()} {vu}\n" else: cmtstr += f"${k} = {v.__repr__()}\n" - + return cmtstr # Post processing @@ -1928,16 +2034,17 @@ def post_process_RGB(signal): signal.change_dtype("rgb16") else: warnings.warn( - """RGB-announced data could not be converted to + """RGB-announced data could not be converted to uint8 or uint16 datatype""" ) return signal - + @staticmethod def post_process_binary(signal): - signal.change_dtype('bool') + signal.change_dtype("bool") return signal + # pack/unpack binary quantities @staticmethod @@ -2057,14 +2164,20 @@ def _pack_private(self, file, val, encoding="latin-1"): privatesize = self._get_work_dict_key_value("_51_Private_size") self._set_str(file, val, privatesize) - def _is_data_int(self,): + def _is_data_int( + self, + ): """Determine wether data consists of unscaled int values. - This is not the case for all objects. Surface and surface series can admit + This is not the case for all objects. Surface and surface series can admit this logic. In theory, hyperspectral studiables as well but it is more convenient to use them as floats due to typical data treatment in hyperspy (scaling etc)""" - objtype = self._mountains_object_types[self._get_work_dict_key_value("_05_Object_Type")] - if objtype in ['_SURFACESERIE','_SURFACE']: - scale = self._get_work_dict_key_value("_23_Z_Spacing") / self._get_work_dict_key_value("_35_Z_Unit_Ratio") + objtype = self._mountains_object_types[ + self._get_work_dict_key_value("_05_Object_Type") + ] + if objtype in ["_SURFACESERIE", "_SURFACE"]: + scale = self._get_work_dict_key_value( + "_23_Z_Spacing" + ) / self._get_work_dict_key_value("_35_Z_Unit_Ratio") offset = self._get_work_dict_key_value("_55_Z_Offset") if float(scale).is_integer() and float(offset).is_integer(): return True @@ -2073,14 +2186,22 @@ def _is_data_int(self,): else: return False - def _is_data_scaleint(self,): + def _is_data_scaleint( + self, + ): """Digitalsurf image formats are not stored as their raw int values, but instead are - scaled and a scale / offset is set so that the data scales down to uint. Why this is - done this way is not clear to me. """ - objtype = self._mountains_object_types[self._get_work_dict_key_value("_05_Object_Type")] - if objtype in ['_BINARYIMAGE', '_RGBIMAGE', - '_RGBSURFACE', '_SERIESOFRGBIMAGES', - '_INTENSITYIMAGE']: + scaled and a scale / offset is set so that the data scales down to uint. Why this is + done this way is not clear to me.""" + objtype = self._mountains_object_types[ + self._get_work_dict_key_value("_05_Object_Type") + ] + if objtype in [ + "_BINARYIMAGE", + "_RGBIMAGE", + "_RGBSURFACE", + "_SERIESOFRGBIMAGES", + "_INTENSITYIMAGE", + ]: return True def _get_uncompressed_datasize(self) -> int: @@ -2089,9 +2210,9 @@ def _get_uncompressed_datasize(self) -> int: # Datapoints in X and Y dimensions Npts_tot = self._get_work_dict_key_value("_20_Total_Nb_of_Pts") # Datasize in WL. max between value and 1 as often W_Size saved as 0 - Wsize = max(self._get_work_dict_key_value("_14_W_Size"),1) + Wsize = max(self._get_work_dict_key_value("_14_W_Size"), 1) # Wsize = 1 - + datasize = Npts_tot * Wsize * psize return datasize @@ -2109,7 +2230,7 @@ def _unpack_data(self, file, encoding="latin-1"): # Datapoints in X and Y dimensions Npts_tot = self._get_work_dict_key_value("_20_Total_Nb_of_Pts") # Datasize in WL - Wsize = max(self._get_work_dict_key_value("_14_W_Size"),1) + Wsize = max(self._get_work_dict_key_value("_14_W_Size"), 1) # We need to take into account the fact that Wsize is often # set to 0 instead of 1 in non-spectral data to compute the @@ -2151,64 +2272,68 @@ def _unpack_data(self, file, encoding="latin-1"): if self._get_work_dict_key_value("_11_Special_Points") == 1: # has non-measured points nm = _points == self._get_work_dict_key_value("_16_Zmin") - 2 - + Zmin = self._get_work_dict_key_value("_16_Zmin") - scale = self._get_work_dict_key_value("_23_Z_Spacing") / self._get_work_dict_key_value("_35_Z_Unit_Ratio") + scale = self._get_work_dict_key_value( + "_23_Z_Spacing" + ) / self._get_work_dict_key_value("_35_Z_Unit_Ratio") offset = self._get_work_dict_key_value("_55_Z_Offset") # Packing data into ints or float, with or without scaling. if self._is_data_int(): _points = _points elif self._is_data_scaleint(): - _points = (_points.astype(float) - Zmin)*scale + offset + _points = (_points.astype(float) - Zmin) * scale + offset _points = np.round(_points).astype(int) else: - _points = (_points.astype(float) - Zmin)*scale + offset - _points[nm] = np.nan #Ints have no nans + _points = (_points.astype(float) - Zmin) * scale + offset + _points[nm] = np.nan # Ints have no nans # Return the points, rescaled return _points def _pack_data(self, file, val, encoding="latin-1"): """This needs to be special because it writes until the end of file.""" - #Also valid for uncompressed + # Also valid for uncompressed if self._get_work_dict_key_value("_01_Signature") != "DSCOMPRESSED": datasize = self._get_uncompressed_datasize() else: - datasize = self._get_work_dict_key_value('_48_Compressed_data_size') - self._set_bytes(file,val,datasize) + datasize = self._get_work_dict_key_value("_48_Compressed_data_size") + self._set_bytes(file, val, datasize) @staticmethod def _compress_data(data_int, nstreams: int = 1) -> bytes: """Pack the input data using the digitalsurf zip approach and return the result as a - binary string ready to be written onto a file. """ + binary string ready to be written onto a file.""" - if nstreams <= 0 or nstreams >8 : - raise MountainsMapFileError(f"Number of compression streams must be >= 1, <= 8") - - bstr = b'' + if nstreams <= 0 or nstreams > 8: + raise MountainsMapFileError( + f"Number of compression streams must be >= 1, <= 8" + ) + + bstr = b"" bstr += struct.pack(" bytes: return bstr + def file_reader(filename, lazy=False): """ Read a mountainsmap ``.sur`` or ``.pro`` file. @@ -2241,18 +2367,20 @@ def file_reader(filename, lazy=False): surdict, ] -def file_writer(filename, - signal: dict, - set_comments: str = 'auto', - is_special: bool = False, - compressed: bool = True, - comments: dict = {}, - object_name: str = '', - operator_name: str = '', - absolute: int = 0, - private_zone: bytes = b'', - client_zone: bytes = b'' - ): + +def file_writer( + filename, + signal: dict, + set_comments: str = "auto", + is_special: bool = False, + compressed: bool = True, + comments: dict = {}, + object_name: str = "", + operator_name: str = "", + absolute: int = 0, + private_zone: bytes = b"", + client_zone: bytes = b"", +): """ Write a mountainsmap ``.sur`` or ``.pro`` file. @@ -2265,13 +2393,13 @@ def file_writer(filename, exported as the raw original_metadata dictionary ('raw'), skipped ('off'), or supplied by the user as an additional kwarg ('custom'). is_special : bool , default = False - If True, NaN values in the dataset or integers reaching boundary values are + If True, NaN values in the dataset or integers reaching boundary values are flagged in the export as non-measured and saturating, respectively. If False, those values are kept as-is. compressed : bool, default =True If True, compress the data in the export file using zlib. comments : dict, default = {} - Set a custom dictionnary in the comments field of the exported file. + Set a custom dictionnary in the comments field of the exported file. Ignored if set_comments is not set to 'custom'. object_name : str, default = '' Set the object name field in the output file. @@ -2279,10 +2407,10 @@ def file_writer(filename, Set the operator name field in the exported file. absolute : int, default = 0, Unsigned int capable of flagging whether surface heights are relative (0) or - absolute (1). Higher unsigned int values can be used to distinguish several + absolute (1). Higher unsigned int values can be used to distinguish several data series sharing internal reference. private_zone : bytes, default = b'', - Set arbitrary byte-content in the private_zone field of exported file metadata. + Set arbitrary byte-content in the private_zone field of exported file metadata. Maximum size is 32.0 kB and content will be cropped if this size is exceeded. client_zone : bytes, default = b'' Set arbitrary byte-content in the client_zone field of exported file metadata. @@ -2291,16 +2419,19 @@ def file_writer(filename, ds = DigitalSurfHandler(filename=filename) ds.signal_dict = signal - ds._build_sur_file_contents(set_comments, - is_special, - compressed, - comments, - object_name, - operator_name, - absolute, - private_zone, - client_zone) + ds._build_sur_file_contents( + set_comments, + is_special, + compressed, + comments, + object_name, + operator_name, + absolute, + private_zone, + client_zone, + ) ds._write_sur_file() -file_reader.__doc__ %= (FILENAME_DOC,LAZY_UNSUPPORTED_DOC,RETURNS_DOC) -file_writer.__doc__ %= (FILENAME_DOC,SIGNAL_DOC) + +file_reader.__doc__ %= (FILENAME_DOC, LAZY_UNSUPPORTED_DOC, RETURNS_DOC) +file_writer.__doc__ %= (FILENAME_DOC, SIGNAL_DOC) diff --git a/rsciio/tests/test_digitalsurf.py b/rsciio/tests/test_digitalsurf.py index d08f27ba2..9fdafcdd2 100644 --- a/rsciio/tests/test_digitalsurf.py +++ b/rsciio/tests/test_digitalsurf.py @@ -495,132 +495,147 @@ def test_metadata_mapping(): "exit_slit_width" ] == 7000 - ) + ) def test_compressdata(): testdat = np.arange(120, dtype=np.int32) - #Refuse too many / neg streams + # Refuse too many / neg streams with pytest.raises(MountainsMapFileError): - DigitalSurfHandler._compress_data(testdat,nstreams=9) + DigitalSurfHandler._compress_data(testdat, nstreams=9) with pytest.raises(MountainsMapFileError): - DigitalSurfHandler._compress_data(testdat,nstreams=-1) - + DigitalSurfHandler._compress_data(testdat, nstreams=-1) + # Accept 1 (dft) or several streams bcomp = DigitalSurfHandler._compress_data(testdat) - assert bcomp.startswith(b'\x01\x00\x00\x00\xe0\x01\x00\x00') - bcomp = DigitalSurfHandler._compress_data(testdat,nstreams=2) - assert bcomp.startswith(b'\x02\x00\x00\x00\xf0\x00\x00\x00_\x00\x00\x00') + assert bcomp.startswith(b"\x01\x00\x00\x00\xe0\x01\x00\x00") + bcomp = DigitalSurfHandler._compress_data(testdat, nstreams=2) + assert bcomp.startswith(b"\x02\x00\x00\x00\xf0\x00\x00\x00_\x00\x00\x00") # Accept 16-bits int as well as 32 testdat = np.arange(120, dtype=np.int16) bcomp = DigitalSurfHandler._compress_data(testdat) - assert bcomp.startswith(b'\x01\x00\x00\x00\xf0\x00\x00\x00') - + assert bcomp.startswith(b"\x01\x00\x00\x00\xf0\x00\x00\x00") # Also streams non-perfectly divided data testdat = np.arange(120, dtype=np.int16) bcomp = DigitalSurfHandler._compress_data(testdat) - assert bcomp.startswith(b'\x01\x00\x00\x00\xf0\x00\x00\x00') + assert bcomp.startswith(b"\x01\x00\x00\x00\xf0\x00\x00\x00") testdat = np.arange(127, dtype=np.int16) - bcomp = DigitalSurfHandler._compress_data(testdat,nstreams=3) - assert bcomp.startswith(b'\x03\x00\x00\x00V\x00\x00\x00C\x00\x00\x00'+ - b'V\x00\x00\x00F\x00\x00\x00'+ - b'R\x00\x00\x00B\x00\x00\x00') + bcomp = DigitalSurfHandler._compress_data(testdat, nstreams=3) + assert bcomp.startswith( + b"\x03\x00\x00\x00V\x00\x00\x00C\x00\x00\x00" + + b"V\x00\x00\x00F\x00\x00\x00" + + b"R\x00\x00\x00B\x00\x00\x00" + ) def test_get_comment_dict(): - omd={'Object_0_Channel_0':{ - 'Parsed':{ - 'key_1': 1, - 'key_2':'2' - } - } - } + omd = {"Object_0_Channel_0": {"Parsed": {"key_1": 1, "key_2": "2"}}} - assert DigitalSurfHandler._get_comment_dict(omd,'auto')=={'key_1': 1,'key_2':'2'} - assert DigitalSurfHandler._get_comment_dict(omd,'off')=={} - assert DigitalSurfHandler._get_comment_dict(omd,'raw')=={'Object_0_Channel_0':{'Parsed':{'key_1': 1,'key_2':'2'}}} - assert DigitalSurfHandler._get_comment_dict(omd,'custom',custom={'a':0}) == {'a':0} + assert DigitalSurfHandler._get_comment_dict(omd, "auto") == { + "key_1": 1, + "key_2": "2", + } + assert DigitalSurfHandler._get_comment_dict(omd, "off") == {} + assert DigitalSurfHandler._get_comment_dict(omd, "raw") == { + "Object_0_Channel_0": {"Parsed": {"key_1": 1, "key_2": "2"}} + } + assert DigitalSurfHandler._get_comment_dict(omd, "custom", custom={"a": 0}) == { + "a": 0 + } - #Goes to second dict if only this one's valid - omd={ - 'Object_0_Channel_0':{'Header':{}}, - 'Object_0_Channel_1':{'Header':'ObjHead','Parsed':{'key_1': '0'}}, + # Goes to second dict if only this one's valid + omd = { + "Object_0_Channel_0": {"Header": {}}, + "Object_0_Channel_1": {"Header": "ObjHead", "Parsed": {"key_1": "0"}}, } - assert DigitalSurfHandler._get_comment_dict(omd, 'auto') == {'key_1': '0'} + assert DigitalSurfHandler._get_comment_dict(omd, "auto") == {"key_1": "0"} - #Return empty if none valid - omd={ - 'Object_0_Channel_0':{'Header':{}}, - 'Object_0_Channel_1':{'Header':'ObjHead'}, + # Return empty if none valid + omd = { + "Object_0_Channel_0": {"Header": {}}, + "Object_0_Channel_1": {"Header": "ObjHead"}, } - assert DigitalSurfHandler._get_comment_dict(omd,'auto') == {} + assert DigitalSurfHandler._get_comment_dict(omd, "auto") == {} - #Return dict-cast if a single field is named 'Parsed' (weird case) - omd={ - 'Object_0_Channel_0':{'Header':{}}, - 'Object_0_Channel_1':{'Header':'ObjHead','Parsed':'SomeContent'}, + # Return dict-cast if a single field is named 'Parsed' (weird case) + omd = { + "Object_0_Channel_0": {"Header": {}}, + "Object_0_Channel_1": {"Header": "ObjHead", "Parsed": "SomeContent"}, + } + assert DigitalSurfHandler._get_comment_dict(omd, "auto") == { + "Parsed": "SomeContent" } - assert DigitalSurfHandler._get_comment_dict(omd,'auto') == {'Parsed':'SomeContent'} -@pytest.mark.parametrize("test_object", ["test_profile.pro", - "test_spectra.pro", - "test_spectral_map.sur", - "test_spectral_map_compressed.sur", - "test_spectrum.pro", - "test_spectrum_compressed.pro", - "test_isurface.sur"]) -def test_writetestobjects(tmp_path,test_object): +@pytest.mark.parametrize( + "test_object", + [ + "test_profile.pro", + "test_spectra.pro", + "test_spectral_map.sur", + "test_spectral_map_compressed.sur", + "test_spectrum.pro", + "test_spectrum_compressed.pro", + "test_isurface.sur", + ], +) +def test_writetestobjects(tmp_path, test_object): """Test data integrity of load/save functions. Starting from externally-generated data (i.e. not from hyperspy)""" df = TEST_DATA_PATH.joinpath(test_object) d = hs.load(df) fn = tmp_path.joinpath(test_object) - d.save(fn,is_special=False) + d.save(fn, is_special=False) d2 = hs.load(fn) - d2.save(fn,is_special=False) + d2.save(fn, is_special=False) d3 = hs.load(fn) - assert np.allclose(d2.data,d.data) - assert np.allclose(d2.data,d3.data) - + assert np.allclose(d2.data, d.data) + assert np.allclose(d2.data, d3.data) + a = d.axes_manager.navigation_axes b = d2.axes_manager.navigation_axes c = d3.axes_manager.navigation_axes - for ax,ax2,ax3 in zip(a,b,c): - assert np.allclose(ax.axis,ax2.axis) - assert np.allclose(ax.axis,ax3.axis) + for ax, ax2, ax3 in zip(a, b, c): + assert np.allclose(ax.axis, ax2.axis) + assert np.allclose(ax.axis, ax3.axis) a = d.axes_manager.signal_axes b = d2.axes_manager.signal_axes c = d3.axes_manager.signal_axes - for ax,ax2,ax3 in zip(a,b,c): - assert np.allclose(ax.axis,ax2.axis) - assert np.allclose(ax.axis,ax3.axis) - -@pytest.mark.parametrize("test_tuple ", [("test_profile.pro",'_PROFILE'), - ("test_spectra.pro",'_SPECTRUM'), - ("test_spectral_map.sur",'_HYPCARD'), - ("test_spectral_map_compressed.sur",'_HYPCARD'), - ("test_spectrum.pro",'_SPECTRUM'), - ("test_spectrum_compressed.pro",'_SPECTRUM'), - ("test_surface.sur",'_SURFACE'), - ('test_RGB.sur','_RGBIMAGE')]) + for ax, ax2, ax3 in zip(a, b, c): + assert np.allclose(ax.axis, ax2.axis) + assert np.allclose(ax.axis, ax3.axis) + + +@pytest.mark.parametrize( + "test_tuple ", + [ + ("test_profile.pro", "_PROFILE"), + ("test_spectra.pro", "_SPECTRUM"), + ("test_spectral_map.sur", "_HYPCARD"), + ("test_spectral_map_compressed.sur", "_HYPCARD"), + ("test_spectrum.pro", "_SPECTRUM"), + ("test_spectrum_compressed.pro", "_SPECTRUM"), + ("test_surface.sur", "_SURFACE"), + ("test_RGB.sur", "_RGBIMAGE"), + ], +) def test_split(test_tuple): """Test for expected object type in the reference dataset""" obj = test_tuple[0] res = test_tuple[1] df = TEST_DATA_PATH.joinpath(obj) - dh= DigitalSurfHandler(obj) + dh = DigitalSurfHandler(obj) d = hs.load(df) dh.signal_dict = d._to_dictionary() @@ -629,12 +644,13 @@ def test_split(test_tuple): assert dh._Object_type == res + @pytest.mark.parametrize("dtype", [np.int8, np.int16, np.int32, np.uint8, np.uint16]) -@pytest.mark.parametrize('special',[True,False]) -@pytest.mark.parametrize('fullscale',[True,False]) -def test_norm_int_data(dtype,special,fullscale): +@pytest.mark.parametrize("special", [True, False]) +@pytest.mark.parametrize("fullscale", [True, False]) +def test_norm_int_data(dtype, special, fullscale): dh = DigitalSurfHandler() - + if fullscale: minint = np.iinfo(dtype).min maxint = np.iinfo(dtype).max @@ -642,199 +658,222 @@ def test_norm_int_data(dtype,special,fullscale): minint = np.iinfo(dtype).min + 23 maxint = np.iinfo(dtype).max - 9 - dat = np.random.randint(low=minint,high=maxint,size=222,dtype=dtype) - #Ensure the maximum and minimum off the int scale is actually present in data + dat = np.random.randint(low=minint, high=maxint, size=222, dtype=dtype) + # Ensure the maximum and minimum off the int scale is actually present in data if fullscale: dat[2] = minint dat[11] = maxint - pointsize, Zmin, Zmax, Zscale, Zoffset, data_int = dh._norm_data(dat,special) + pointsize, Zmin, Zmax, Zscale, Zoffset, data_int = dh._norm_data(dat, special) + + off = minint + 1 if special and fullscale else dat.min() + maxval = maxint - 1 if special and fullscale else dat.max() - off = minint+1 if special and fullscale else dat.min() - maxval = maxint-1 if special and fullscale else dat.max() + assert np.isclose(Zscale, 1.0) + assert np.isclose(Zoffset, off) + assert np.allclose(data_int, dat) + assert Zmin == off + assert Zmax == maxval - assert np.isclose(Zscale,1.0) - assert np.isclose(Zoffset,off) - assert np.allclose(data_int,dat) - assert Zmin==off - assert Zmax==maxval def test_writeRGB(tmp_path): # This is just a different test function because the - # comparison of rgb data must be done differently + # comparison of rgb data must be done differently # (due to hyperspy underlying structure) df = TEST_DATA_PATH.joinpath("test_RGB.sur") d = hs.load(df) fn = tmp_path.joinpath("test_RGB.sur") - d.save(fn,is_special=False) + d.save(fn, is_special=False) d2 = hs.load(fn) - d2.save(fn,is_special=False) + d2.save(fn, is_special=False) d3 = hs.load(fn) - for k in ['R','G','B']: - assert np.allclose(d2.data[k],d.data[k]) - assert np.allclose(d3.data[k],d.data[k]) + for k in ["R", "G", "B"]: + assert np.allclose(d2.data[k], d.data[k]) + assert np.allclose(d3.data[k], d.data[k]) a = d.axes_manager.navigation_axes b = d2.axes_manager.navigation_axes c = d3.axes_manager.navigation_axes - for ax,ax2,ax3 in zip(a,b,c): - assert np.allclose(ax.axis,ax2.axis) - assert np.allclose(ax.axis,ax3.axis) + for ax, ax2, ax3 in zip(a, b, c): + assert np.allclose(ax.axis, ax2.axis) + assert np.allclose(ax.axis, ax3.axis) a = d.axes_manager.signal_axes b = d2.axes_manager.signal_axes c = d3.axes_manager.signal_axes - for ax,ax2,ax3 in zip(a,b,c): - assert np.allclose(ax.axis,ax2.axis) - assert np.allclose(ax.axis,ax3.axis) + for ax, ax2, ax3 in zip(a, b, c): + assert np.allclose(ax.axis, ax2.axis) + assert np.allclose(ax.axis, ax3.axis) -@pytest.mark.parametrize("dtype", [np.int8, np.int16, np.int32, np.float64, np.uint8, np.uint16]) -@pytest.mark.parametrize('compressed',[True,False]) -def test_writegeneric_validtypes(tmp_path,dtype,compressed): - """This test establishes the capability of saving a generic hyperspy signals + +@pytest.mark.parametrize( + "dtype", [np.int8, np.int16, np.int32, np.float64, np.uint8, np.uint16] +) +@pytest.mark.parametrize("compressed", [True, False]) +def test_writegeneric_validtypes(tmp_path, dtype, compressed): + """This test establishes the capability of saving a generic hyperspy signals generated from numpy array""" - gen = hs.signals.Signal1D(np.arange(24,dtype=dtype))+25 - fgen = tmp_path.joinpath('test.pro') - gen.save(fgen,compressed = compressed, overwrite=True) + gen = hs.signals.Signal1D(np.arange(24, dtype=dtype)) + 25 + fgen = tmp_path.joinpath("test.pro") + gen.save(fgen, compressed=compressed, overwrite=True) gen2 = hs.load(fgen) - assert np.allclose(gen2.data,gen.data) - -@pytest.mark.parametrize("dtype", [np.int64, np.complex64, np.uint64, ]) -def test_writegeneric_failingtypes(tmp_path,dtype): - gen = hs.signals.Signal1D(np.arange(24,dtype=dtype))+25 - fgen = tmp_path.joinpath('test.pro') + assert np.allclose(gen2.data, gen.data) + + +@pytest.mark.parametrize( + "dtype", + [ + np.int64, + np.complex64, + np.uint64, + ], +) +def test_writegeneric_failingtypes(tmp_path, dtype): + gen = hs.signals.Signal1D(np.arange(24, dtype=dtype)) + 25 + fgen = tmp_path.joinpath("test.pro") with pytest.raises(MountainsMapFileError): - gen.save(fgen,overwrite= True) + gen.save(fgen, overwrite=True) -@pytest.mark.parametrize("dtype", [(np.uint8,"rgba8"), (np.uint16,"rgba16")]) -@pytest.mark.parametrize('compressed',[True,False]) -@pytest.mark.parametrize('transpose',[True,False]) -def test_writegeneric_rgba(tmp_path,dtype,compressed,transpose): - """This test establishes the possibility of saving RGBA data while discarding + +@pytest.mark.parametrize("dtype", [(np.uint8, "rgba8"), (np.uint16, "rgba16")]) +@pytest.mark.parametrize("compressed", [True, False]) +@pytest.mark.parametrize("transpose", [True, False]) +def test_writegeneric_rgba(tmp_path, dtype, compressed, transpose): + """This test establishes the possibility of saving RGBA data while discarding A channel and warning""" - size = (17,38,4) + size = (17, 38, 4) minint = np.iinfo(dtype[0]).min maxint = np.iinfo(dtype[0]).max - gen = hs.signals.Signal1D(np.random.randint(low=minint,high=maxint,size=size,dtype=dtype[0])) + gen = hs.signals.Signal1D( + np.random.randint(low=minint, high=maxint, size=size, dtype=dtype[0]) + ) gen.change_dtype(dtype[1]) - fgen = tmp_path.joinpath('test.sur') - + fgen = tmp_path.joinpath("test.sur") + if transpose: gen = gen.T with pytest.warns(): - gen.save(fgen,compressed = compressed, overwrite=True) + gen.save(fgen, compressed=compressed, overwrite=True) gen2 = hs.load(fgen) - for k in ['R','G','B']: - assert np.allclose(gen.data[k],gen2.data[k]) - assert np.allclose(gen.data[k],gen2.data[k]) + for k in ["R", "G", "B"]: + assert np.allclose(gen.data[k], gen2.data[k]) + assert np.allclose(gen.data[k], gen2.data[k]) + + +@pytest.mark.parametrize("compressed", [True, False]) +@pytest.mark.parametrize("transpose", [True, False]) +def test_writegeneric_binaryimg(tmp_path, compressed, transpose): -@pytest.mark.parametrize('compressed',[True,False]) -@pytest.mark.parametrize('transpose',[True,False]) -def test_writegeneric_binaryimg(tmp_path,compressed,transpose): - - size = (76,3) + size = (76, 3) - gen = hs.signals.Signal2D(np.random.randint(low=0,high=1,size=size,dtype=bool)) + gen = hs.signals.Signal2D(np.random.randint(low=0, high=1, size=size, dtype=bool)) + + fgen = tmp_path.joinpath("test.sur") - fgen = tmp_path.joinpath('test.sur') - if transpose: gen = gen.T with pytest.warns(): - gen.save(fgen,compressed = compressed, overwrite=True) + gen.save(fgen, compressed=compressed, overwrite=True) else: - gen.save(fgen,compressed = compressed, overwrite=True) + gen.save(fgen, compressed=compressed, overwrite=True) gen2 = hs.load(fgen) - assert np.allclose(gen.data,gen2.data) + assert np.allclose(gen.data, gen2.data) + + +@pytest.mark.parametrize("compressed", [True, False]) +def test_writegeneric_profileseries(tmp_path, compressed): -@pytest.mark.parametrize('compressed',[True,False]) -def test_writegeneric_profileseries(tmp_path,compressed): + size = (9, 655) - size = (9,655) + gen = hs.signals.Signal1D(np.random.random(size=size) * 1444 + 2550.0) + fgen = tmp_path.joinpath("test.pro") - gen = hs.signals.Signal1D(np.random.random(size=size)*1444+2550.) - fgen = tmp_path.joinpath('test.pro') - - gen.save(fgen,compressed = compressed, overwrite=True) + gen.save(fgen, compressed=compressed, overwrite=True) gen2 = hs.load(fgen) - assert np.allclose(gen.data,gen2.data) + assert np.allclose(gen.data, gen2.data) -@pytest.mark.parametrize("dtype", [(np.uint8,"rgb8"), (np.uint16,"rgb16")]) -@pytest.mark.parametrize('compressed',[True,False]) -def test_writegeneric_rgbseries(tmp_path,dtype,compressed): +@pytest.mark.parametrize("dtype", [(np.uint8, "rgb8"), (np.uint16, "rgb16")]) +@pytest.mark.parametrize("compressed", [True, False]) +def test_writegeneric_rgbseries(tmp_path, dtype, compressed): """This test establishes the possibility of saving RGB surface series""" - size = (5,44,24,3) + size = (5, 44, 24, 3) minint = np.iinfo(dtype[0]).min maxint = np.iinfo(dtype[0]).max - gen = hs.signals.Signal1D(np.random.randint(low=minint,high=maxint,size=size,dtype=dtype[0])) + gen = hs.signals.Signal1D( + np.random.randint(low=minint, high=maxint, size=size, dtype=dtype[0]) + ) gen.change_dtype(dtype[1]) - fgen = tmp_path.joinpath('test.sur') + fgen = tmp_path.joinpath("test.sur") - gen.save(fgen,compressed = compressed, overwrite=True) + gen.save(fgen, compressed=compressed, overwrite=True) gen2 = hs.load(fgen) - for k in ['R','G','B']: - assert np.allclose(gen.data[k],gen2.data[k]) + for k in ["R", "G", "B"]: + assert np.allclose(gen.data[k], gen2.data[k]) -@pytest.mark.parametrize("dtype", [(np.uint8,"rgba8"), (np.uint16,"rgba16")]) -@pytest.mark.parametrize('compressed',[True,False]) -def test_writegeneric_rgbaseries(tmp_path,dtype,compressed): - """This test establishes the possibility of saving RGBA data while discarding +@pytest.mark.parametrize("dtype", [(np.uint8, "rgba8"), (np.uint16, "rgba16")]) +@pytest.mark.parametrize("compressed", [True, False]) +def test_writegeneric_rgbaseries(tmp_path, dtype, compressed): + """This test establishes the possibility of saving RGBA data while discarding A channel and warning""" - size = (5,44,24,4) + size = (5, 44, 24, 4) minint = np.iinfo(dtype[0]).min maxint = np.iinfo(dtype[0]).max - gen = hs.signals.Signal1D(np.random.randint(low=minint,high=maxint,size=size,dtype=dtype[0])) + gen = hs.signals.Signal1D( + np.random.randint(low=minint, high=maxint, size=size, dtype=dtype[0]) + ) gen.change_dtype(dtype[1]) - fgen = tmp_path.joinpath('test.sur') + fgen = tmp_path.joinpath("test.sur") with pytest.warns(): - gen.save(fgen,compressed = compressed, overwrite=True) + gen.save(fgen, compressed=compressed, overwrite=True) gen2 = hs.load(fgen) - for k in ['R','G','B']: - assert np.allclose(gen.data[k],gen2.data[k]) + for k in ["R", "G", "B"]: + assert np.allclose(gen.data[k], gen2.data[k]) @pytest.mark.parametrize("dtype", [np.int16, np.int32, np.float64]) -@pytest.mark.parametrize("compressed",[True,False]) -def test_writegeneric_surfaceseries(tmp_path,dtype,compressed): - """This test establishes the possibility of saving RGBA surface series while discarding +@pytest.mark.parametrize("compressed", [True, False]) +def test_writegeneric_surfaceseries(tmp_path, dtype, compressed): + """This test establishes the possibility of saving RGBA surface series while discarding A channel and warning""" - size = (9,44,58) + size = (9, 44, 58) - if np.issubdtype(dtype,np.integer): + if np.issubdtype(dtype, np.integer): minint = np.iinfo(dtype).min maxint = np.iinfo(dtype).max - gen = hs.signals.Signal2D(np.random.randint(low=minint,high=maxint,size=size,dtype=dtype)) + gen = hs.signals.Signal2D( + np.random.randint(low=minint, high=maxint, size=size, dtype=dtype) + ) else: - gen = hs.signals.Signal2D(np.random.random(size=size).astype(dtype)*1e6) + gen = hs.signals.Signal2D(np.random.random(size=size).astype(dtype) * 1e6) - fgen = tmp_path.joinpath('test.sur') + fgen = tmp_path.joinpath("test.sur") - gen.save(fgen,compressed = compressed, overwrite=True) + gen.save(fgen, compressed=compressed, overwrite=True) gen2 = hs.load(fgen) - assert np.allclose(gen.data,gen2.data) \ No newline at end of file + assert np.allclose(gen.data, gen2.data) From d914fda926ef0a7e1f7374b08d2d4a88b88d0827 Mon Sep 17 00:00:00 2001 From: Nicolas Tappy Date: Tue, 25 Jun 2024 11:06:44 +0200 Subject: [PATCH 08/21] Increase codecov --- rsciio/digitalsurf/_api.py | 7 ++--- rsciio/tests/test_digitalsurf.py | 49 +++++++++++++++++++++++++++++--- 2 files changed, 47 insertions(+), 9 deletions(-) diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index 0930c9da0..718c5db96 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -676,6 +676,7 @@ def _split_spectrum( nax_nav = self._n_ax_nav nax_sig = self._n_ax_sig + # _split_signal_dict ensures that the correct dims are sent here. if (nax_nav, nax_sig) == (0, 1) or (nax_nav, nax_sig) == (1, 0): self.Xaxis = self.signal_dict["axes"][0] elif (nax_nav, nax_sig) == (1, 1): @@ -683,10 +684,6 @@ def _split_spectrum( ax for ax in self.signal_dict["axes"] if not ax["navigate"] ) self.Yaxis = next(ax for ax in self.signal_dict["axes"] if ax["navigate"]) - else: - raise MountainsMapFileError( - f"Dimensions ({nax_nav})|{nax_sig}) invalid for export as spectrum type" - ) self.data_split = [self.signal_dict["data"]] self.objtype_split = [obj_type] @@ -2281,7 +2278,7 @@ def _unpack_data(self, file, encoding="latin-1"): # Packing data into ints or float, with or without scaling. if self._is_data_int(): - _points = _points + pass #Case left here for future modification elif self._is_data_scaleint(): _points = (_points.astype(float) - Zmin) * scale + offset _points = np.round(_points).astype(int) diff --git a/rsciio/tests/test_digitalsurf.py b/rsciio/tests/test_digitalsurf.py index 9fdafcdd2..43655966a 100644 --- a/rsciio/tests/test_digitalsurf.py +++ b/rsciio/tests/test_digitalsurf.py @@ -675,17 +675,24 @@ def test_norm_int_data(dtype, special, fullscale): assert Zmin == off assert Zmax == maxval - -def test_writeRGB(tmp_path): +@pytest.mark.parametrize("transpose", [True, False]) +def test_writetestobjects_rgb(tmp_path,transpose): # This is just a different test function because the # comparison of rgb data must be done differently # (due to hyperspy underlying structure) df = TEST_DATA_PATH.joinpath("test_RGB.sur") d = hs.load(df) fn = tmp_path.joinpath("test_RGB.sur") - d.save(fn, is_special=False) + + if transpose: + d = d.T + with pytest.warns(): + d.save(fn) + else: + d.save(fn) + d2 = hs.load(fn) - d2.save(fn, is_special=False) + d2.save(fn) d3 = hs.load(fn) for k in ["R", "G", "B"]: @@ -723,6 +730,35 @@ def test_writegeneric_validtypes(tmp_path, dtype, compressed): gen2 = hs.load(fgen) assert np.allclose(gen2.data, gen.data) +@pytest.mark.parametrize("compressed", [True, False]) +def test_writegeneric_nans(tmp_path, compressed): + """This test establishes the capability of saving a generic signal + generated from numpy array containing floats""" + gen = hs.signals.Signal1D(np.random.random(size=301)) + + gen.data[66] = np.nan + gen.data[111] = np.nan + + fgen = tmp_path.joinpath("test.pro") + + gen.save(fgen, compressed=compressed, is_special=True, overwrite=True) + + gen2 = hs.load(fgen) + assert np.allclose(gen2.data, gen.data, equal_nan=True) + +def test_writegeneric_transposedprofile(tmp_path): + """This test checks the expected behaviour that a transposed profile gets + correctly saved but a warning is raised.""" + gen = hs.signals.Signal1D(np.random.random(size=99)) + gen = gen.T + + fgen = tmp_path.joinpath("test.pro") + + with pytest.warns(): + gen.save(fgen, overwrite=True) + + gen2 = hs.load(fgen) + assert np.allclose(gen2.data, gen.data) @pytest.mark.parametrize( "dtype", @@ -738,6 +774,11 @@ def test_writegeneric_failingtypes(tmp_path, dtype): with pytest.raises(MountainsMapFileError): gen.save(fgen, overwrite=True) +def test_writegeneric_failingformat(tmp_path): + gen = hs.signals.Signal1D(np.zeros((3,4,5,6))) + fgen = tmp_path.joinpath("test.sur") + with pytest.raises(MountainsMapFileError): + gen.save(fgen, overwrite=True) @pytest.mark.parametrize("dtype", [(np.uint8, "rgba8"), (np.uint16, "rgba16")]) @pytest.mark.parametrize("compressed", [True, False]) From 94f0ae1002e0657fab0dc5dc99c19a0d1429e1db Mon Sep 17 00:00:00 2001 From: Nicolas Tappy Date: Tue, 25 Jun 2024 16:17:14 +0200 Subject: [PATCH 09/21] Suppress implicit return in _is_data_scaleint, also codestyle --- rsciio/digitalsurf/_api.py | 2 ++ rsciio/tests/test_digitalsurf.py | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index 718c5db96..bf42e033c 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -2200,6 +2200,8 @@ def _is_data_scaleint( "_INTENSITYIMAGE", ]: return True + else: + return False def _get_uncompressed_datasize(self) -> int: """Return size of uncompressed data in bytes""" diff --git a/rsciio/tests/test_digitalsurf.py b/rsciio/tests/test_digitalsurf.py index 43655966a..c5c2f9e86 100644 --- a/rsciio/tests/test_digitalsurf.py +++ b/rsciio/tests/test_digitalsurf.py @@ -664,6 +664,8 @@ def test_norm_int_data(dtype, special, fullscale): dat[2] = minint dat[11] = maxint + Zscale = 0.0 #to avoid CodeQL error: pot. non-initialized var + Zoffset = -np.inf #to avoid CodeQL error: pot. non-initialized var pointsize, Zmin, Zmax, Zscale, Zoffset, data_int = dh._norm_data(dat, special) off = minint + 1 if special and fullscale else dat.min() @@ -690,7 +692,7 @@ def test_writetestobjects_rgb(tmp_path,transpose): d.save(fn) else: d.save(fn) - + d2 = hs.load(fn) d2.save(fn) d3 = hs.load(fn) From acfb7f852f25636c70090cc38378c0238296a13b Mon Sep 17 00:00:00 2001 From: Nicolas Tappy Date: Thu, 27 Jun 2024 10:13:02 +0200 Subject: [PATCH 10/21] refactor n_objects_to_read, remove useless binary get / set defaults and fix endianness --- rsciio/digitalsurf/_api.py | 83 +++++++++++++------------------------- 1 file changed, 28 insertions(+), 55 deletions(-) diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index bf42e033c..63252744b 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -127,7 +127,7 @@ def __init__(self, filename: str = ""): self._work_dict = { "_01_Signature": { "value": "DSCOMPRESSED", # Uncompressed key is DIGITAL SURF - "b_unpack_fn": lambda f: self._get_str(f, 12, "DSCOMPRESSED"), + "b_unpack_fn": lambda f: self._get_str(f, 12), "b_pack_fn": lambda f, v: self._set_str(f, v, 12), }, "_02_Format": { @@ -152,12 +152,12 @@ def __init__(self, filename: str = ""): }, "_06_Object_Name": { "value": "", - "b_unpack_fn": lambda f: self._get_str(f, 30, ""), + "b_unpack_fn": lambda f: self._get_str(f, 30, ), "b_pack_fn": lambda f, v: self._set_str(f, v, 30), }, "_07_Operator_Name": { "value": "ROSETTA", - "b_unpack_fn": lambda f: self._get_str(f, 30, ""), + "b_unpack_fn": lambda f: self._get_str(f, 30, ), "b_pack_fn": lambda f, v: self._set_str(f, v, 30), }, "_08_P_Size": { @@ -197,7 +197,7 @@ def __init__(self, filename: str = ""): }, "_15_Size_of_Points": { "value": 16, - "b_unpack_fn": lambda f: self._get_int16(f, 32), + "b_unpack_fn": self._get_int16, "b_pack_fn": self._set_int16, }, "_16_Zmin": { @@ -242,47 +242,47 @@ def __init__(self, filename: str = ""): }, "_24_Name_of_X_Axis": { "value": "X", - "b_unpack_fn": lambda f: self._get_str(f, 16, "X"), + "b_unpack_fn": lambda f: self._get_str(f, 16 ), "b_pack_fn": lambda f, v: self._set_str(f, v, 16), }, "_25_Name_of_Y_Axis": { "value": "Y", - "b_unpack_fn": lambda f: self._get_str(f, 16, "Y"), + "b_unpack_fn": lambda f: self._get_str(f, 16 ), "b_pack_fn": lambda f, v: self._set_str(f, v, 16), }, "_26_Name_of_Z_Axis": { "value": "Z", - "b_unpack_fn": lambda f: self._get_str(f, 16, "Z"), + "b_unpack_fn": lambda f: self._get_str(f, 16 ), "b_pack_fn": lambda f, v: self._set_str(f, v, 16), }, "_27_X_Step_Unit": { "value": "um", - "b_unpack_fn": lambda f: self._get_str(f, 16, "um"), + "b_unpack_fn": lambda f: self._get_str(f, 16 ), "b_pack_fn": lambda f, v: self._set_str(f, v, 16), }, "_28_Y_Step_Unit": { "value": "um", - "b_unpack_fn": lambda f: self._get_str(f, 16, "um"), + "b_unpack_fn": lambda f: self._get_str(f, 16 ), "b_pack_fn": lambda f, v: self._set_str(f, v, 16), }, "_29_Z_Step_Unit": { "value": "um", - "b_unpack_fn": lambda f: self._get_str(f, 16, "um"), + "b_unpack_fn": lambda f: self._get_str(f, 16 ), "b_pack_fn": lambda f, v: self._set_str(f, v, 16), }, "_30_X_Length_Unit": { "value": "um", - "b_unpack_fn": lambda f: self._get_str(f, 16, "um"), + "b_unpack_fn": lambda f: self._get_str(f, 16 ), "b_pack_fn": lambda f, v: self._set_str(f, v, 16), }, "_31_Y_Length_Unit": { "value": "um", - "b_unpack_fn": lambda f: self._get_str(f, 16, "um"), + "b_unpack_fn": lambda f: self._get_str(f, 16 ), "b_pack_fn": lambda f, v: self._set_str(f, v, 16), }, "_32_Z_Length_Unit": { "value": "um", - "b_unpack_fn": lambda f: self._get_str(f, 16, "um"), + "b_unpack_fn": lambda f: self._get_str(f, 16 ), "b_pack_fn": lambda f, v: self._set_str(f, v, 16), }, "_33_X_Unit_Ratio": { @@ -412,12 +412,12 @@ def __init__(self, filename: str = ""): }, "_58_T_Axis_Name": { "value": "T", - "b_unpack_fn": lambda f: self._get_str(f, 13, "Wavelength"), + "b_unpack_fn": lambda f: self._get_str(f, 13 ), "b_pack_fn": lambda f, v: self._set_str(f, v, 13), }, "_59_T_Step_Unit": { "value": "um", - "b_unpack_fn": lambda f: self._get_str(f, 13, "nm"), + "b_unpack_fn": lambda f: self._get_str(f, 13 ), "b_pack_fn": lambda f, v: self._set_str(f, v, 13), }, "_60_Comment": { @@ -1138,15 +1138,9 @@ def _read_sur_file(self): ) self._N_data_channels = self._get_work_dict_key_value("_08_P_Size") - # Determine how many objects we need to read - if self._N_data_channels > 0 and self._N_data_objects > 0: - n_objects_to_read = self._N_data_channels * self._N_data_objects - elif self._N_data_channels > 0: - n_objects_to_read = self._N_data_channels - elif self._N_data_objects > 0: - n_objects_to_read = self._N_data_objects - else: - n_objects_to_read = 1 + # Determine how many objects we need to read, at least 1 object and 1 channel + # even if metadata is set to 0 (happens sometimes) + n_objects_to_read = max(self._N_data_channels,1) * max(self._N_data_objects,1) # Lookup what object type we are dealing with and save self._Object_type = DigitalSurfHandler._mountains_object_types[ @@ -2045,11 +2039,9 @@ def post_process_binary(signal): # pack/unpack binary quantities @staticmethod - def _get_uint16(file, default=None): + def _get_uint16(file): """Read a 16-bits int with a user-definable default value if no file is given""" - if file is None: - return default b = file.read(2) return struct.unpack("i", b)[0] - else: - return struct.unpack("I", b)[0] - else: - return struct.unpack(" Date: Thu, 27 Jun 2024 11:24:38 +0200 Subject: [PATCH 11/21] Fixed typos in doc --- .../supported_formats/digitalsurf.rst | 58 +++++++++---------- rsciio/digitalsurf/_api.py | 17 +++--- 2 files changed, 37 insertions(+), 38 deletions(-) diff --git a/doc/user_guide/supported_formats/digitalsurf.rst b/doc/user_guide/supported_formats/digitalsurf.rst index 6b52cadc0..57a35395e 100644 --- a/doc/user_guide/supported_formats/digitalsurf.rst +++ b/doc/user_guide/supported_formats/digitalsurf.rst @@ -3,31 +3,30 @@ DigitalSurf format (SUR & PRO) ------------------------------ -``.sur`` and ``.pro`` is format developed by digitalsurf to import/export data in their MountainsMap scientific -analysis software. Target datasets originally result from (micro)-topography and imaging instruments: SEM, AFM, -profilometer. RGB(A) images, multilayer surfaces and profiles are also supported. Even though it is essentially -a surfaces format, 1D signals are supported for spectra and spectral maps. Specifically, this is the fileformat -used by Attolight SA for its scanning electron microscope cathodoluminescence (SEM-CL) hyperspectral maps. This -plugin was developed based on the MountainsMap software documentation. - -Support for loading ``.sur`` and ``.pro`` datasets is complete, including parsing of user/customer-specific -metadata, and opening of files containing multiple objects. Some rare specific objects (e.g. force curves) -are not supported, due to no example data being available. Those can be added upon request and providing of -example datasets. Heterogeneous data can be represented in ``.sur`` and ``.pro`` objects, for instance -floating-point/topography and rgb data can coexist along the same navigation dimension. Those are casted to -a homogeneous floating-point representation upon loading. - -Support for data saving is partial as ``.sur`` and ``.pro`` do not support all features of hyperspy signals. -First, they have limited dimensionality. Up to 3d data arrays with either 1d (series of images) or 2d -(hyperspectral studiable) navigation space can be saved. Also, ``.sur`` and ``.pro`` do not support non-uniform -axes and saving of models. Finally, ``.sur`` / ``.pro`` linearize intensities along a uniform axis to enforce -an integer-representation of the data (with scaling and offset). This means that export from float-type hyperspy -signals is inherently lossy. - -Within these limitations, all features from ``.sur`` and ``.pro`` fileformats are supported, notably data -compression and setting of custom metadata. The file writer splits a signal into the suitable digitalsurf -dataobject primarily by inspecting its dimensions and its datatype, ultimately how various axes and signal -quantity are named. The criteria are listed here below: +``.sur`` and ``.pro`` is a format developed by digitalsurf to import/export data in the MountainsMap scientific +analysis software. Target datasets are originally (micro)-topography maps and profile from imaging instruments: +SEM, AFM, profilometery etc. RGB(A) images, multilayer surfaces and profiles are also supported. Even though it +is essentially a surfaces format, 1D signals are supported for spectra and spectral maps. Specifically, this is +the format used by Attolight for saving SEM-cathodoluminescence (SEM-CL) hyperspectral maps. This plugin was +developed based on the MountainsMap software documentation. + +Support for loading ``.sur`` and ``.pro`` files is complete, including parsing of custom metadata, and opening of +files containing multiple objects. Some rare, deprecated object types (e.g. force curves) are not supported, due +to no example data being available. Those can be added upon request to the module, if provided with example data +and a explanations. Unlike hyperspy.signal, ``.sur`` and ``.pro`` objects can be used to represent heterogeneous +data. For instance, float (topography) and int (rgb data) data can coexist along the same navigation dimension. +Those are casted to a homogeneous floating-point representation upon loading. + +Support for data saving is partial, as ``.sur`` and ``.pro`` do not support all features of hyperspy signals. Up +to 3d data arrays with either 1d (series of images) or 2d (spectral maps) navigation space can be saved. ``.sur`` +and ``.pro`` also do not support non-uniform axes and fitted models. Finally, MountainsMap maps intensities along +an axis with constant spacing between numbers by enforcing an integer-representation of the data with scaling and +offset. This means that export from float data is inherently lossy. + +Within these limitations, all features from ``.sur`` and ``.pro`` fileformats are supported. Data compression and +custom metadata allows a good interoperability of hyperspy and Mountainsmap. The file writer splits a signal into +the suitable digitalsurf dataobject. Primarily by inspecting its dimension and datatype. If ambiguity remains, it +inspects the names of signal axes and ``metadata.Signal.quantity``. The criteria are listed here below: +-----------------+---------------+------------------------------------------------------------------------------+ | Nav. dimension | Sig dimension | Extension and MountainsMap subclass | @@ -48,11 +47,10 @@ quantity are named. The criteria are listed here below: | 2 | 1 | ``.sur``: hyperspectralMap (default) | +-----------------+---------------+------------------------------------------------------------------------------+ -Axes named one of ``Wavelength``, ``Energy``, ``Energy Loss``, ``E``, are considered spectral, and quantities -named one of ``Height``, ``Altitude``, ``Elevation``, ``Depth``, ``Z`` are considered surface. The difference -between Surface and IntensitySurface stems from the AFM / profilometry origin of MountainsMap. "Surface" has -the proper meaning of an open boundary of 3d space, whereas "IntensitySurface" is a mere 2D mapping of an arbitrary -quantity. +Axes named one of ``Wavelength``, ``Energy``, ``Energy Loss`` or ``E`` are considered spectral. A quantity named +one of ``Height``, ``Altitude``, ``Elevation``, ``Depth`` or ``Z`` is considered a surface. The difference between +Surface and IntensitySurface stems from the AFM / profilometry origin of MountainsMap. "Surface" has its proper +meaning of being a 2d-subset of 3d space, whereas "IntensitySurface" is a mere 2D mapping of an arbitrary quantity. API functions ^^^^^^^^^^^^^ diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index 63252744b..e27bb546b 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -2361,15 +2361,16 @@ def file_writer( %s %s set_comments : str , default = 'auto' - Whether comments should be a simplified original_metadata ('auto'), - exported as the raw original_metadata dictionary ('raw'), skipped ('off'), - or supplied by the user as an additional kwarg ('custom'). + Whether comments should be a simplified version original_metadata ('auto'), + the raw original_metadata dictionary ('raw'), skipped ('off'), or supplied + by the user as an additional kwarg ('custom'). is_special : bool , default = False - If True, NaN values in the dataset or integers reaching boundary values are - flagged in the export as non-measured and saturating, respectively. If False, - those values are kept as-is. + If True, NaN values in the dataset or integers reaching the boundary of the + signed int-representation are flagged as non-measured or saturating, + respectively. If False, those values are not flagged (converted to valid points). compressed : bool, default =True - If True, compress the data in the export file using zlib. + If True, compress the data in the export file using zlib. Can help dramatically + reduce the file size. comments : dict, default = {} Set a custom dictionnary in the comments field of the exported file. Ignored if set_comments is not set to 'custom'. @@ -2386,7 +2387,7 @@ def file_writer( Maximum size is 32.0 kB and content will be cropped if this size is exceeded. client_zone : bytes, default = b'' Set arbitrary byte-content in the client_zone field of exported file metadata. - Maximum size is 128B and and content will be cropped if this size is exceeded. + Maximum size is 128 B and and content will be cropped if this size is exceeded. """ ds = DigitalSurfHandler(filename=filename) ds.signal_dict = signal From 41f43058f925d8d7194ab87cf0d42296108cb566 Mon Sep 17 00:00:00 2001 From: Nicolas Tappy Date: Fri, 28 Jun 2024 10:41:02 +0200 Subject: [PATCH 12/21] codecov, fix binary images, add RGBSurface tests --- rsciio/digitalsurf/_api.py | 16 ++++++++++++++-- .../tests/data/digitalsurf/test_RGBSURFACE.sur | Bin 0 -> 3357 bytes rsciio/tests/test_digitalsurf.py | 2 +- 3 files changed, 15 insertions(+), 3 deletions(-) create mode 100644 rsciio/tests/data/digitalsurf/test_RGBSURFACE.sur diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index e27bb546b..74dff77c7 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -2166,9 +2166,7 @@ def _is_data_scaleint( self._get_work_dict_key_value("_05_Object_Type") ] if objtype in [ - "_BINARYIMAGE", "_RGBIMAGE", - "_RGBSURFACE", "_SERIESOFRGBIMAGES", "_INTENSITYIMAGE", ]: @@ -2176,6 +2174,18 @@ def _is_data_scaleint( else: return False + def _is_data_bin(self): + """Digitalsurf image formats can be binary sometimes""" + objtype = self._mountains_object_types[ + self._get_work_dict_key_value("_05_Object_Type") + ] + if objtype in [ + "_BINARYIMAGE", + ]: + return True + else: + return False + def _get_uncompressed_datasize(self) -> int: """Return size of uncompressed data in bytes""" psize = int(self._get_work_dict_key_value("_15_Size_of_Points") / 8) @@ -2257,6 +2267,8 @@ def _unpack_data(self, file, encoding="latin-1"): elif self._is_data_scaleint(): _points = (_points.astype(float) - Zmin) * scale + offset _points = np.round(_points).astype(int) + elif self._is_data_bin(): + pass else: _points = (_points.astype(float) - Zmin) * scale + offset _points[nm] = np.nan # Ints have no nans diff --git a/rsciio/tests/data/digitalsurf/test_RGBSURFACE.sur b/rsciio/tests/data/digitalsurf/test_RGBSURFACE.sur new file mode 100644 index 0000000000000000000000000000000000000000..a3a8b7da165b3ac581ad8647eea19189d6fca962 GIT binary patch literal 3357 zcmZ<>cJ}uT2yzV$c6DK3U<5*51_cTL3j+i<LsD0fvU3|NkqnF)(le#XvX(h#x1# z*nn{G=_M8sX!b>-@uSfAxw)tU)aElVG}t3B+#@&{_u(=kKo+Xo=+84)Dek~4Zs{;3 zrC>kIVYpr4;Na^R;u_@X=o9Se;##9?=-|Kwin|j)d>M!<<}jXMF#o@iHJiC&^$??SP`=@(>sQf|5N|H|I_#T_fJ-~Lo5!=cNiZqy!=1!-~T_}znA{} z%(|9Eh+P&Ai_4lcJLn+4#;mW=yS}FW76BvbKt7NlfHx7Z!X9W z{J~Ij#VNM?V5G>sme+y)nuZ>JUd}gKOs3m@TF_PMT*LBj+mcF#tNu~Lw)#ij-wlQ= zj(@kmJrKM6T~342!@1@G2Hcsaj+-&0Jv@J1@h0QUOPjQov;Wi&4wF1(sDF2X)BE?I zo)tJfHZjSx3(*VxBxTzu{_#iBA??#;nNz*Luk_wNo97?nqWC0VdgL)SUj~NrF~c{H z1rF0Z<_gVYzMwpI-2Q)CbMwO|PJ9~ek)Dr?bTqxZtTT2_JiJFn!Y%I8wCj7$E?RN* zkwS~lqm}}Rs7%G_vePC9d0kT4*S`Gz<B( z8ss-C^L%8e;d7j-btmJ&#XU=;CuJoXU4Br?ckgb&e(uiLf=7+qs}DY2^DM65+Ts4o z50d3S{W@6Bu>2_7&gKWNTi&>ykZOO;_%@SqcZ8+1fS|qw+eM2#p8iXJZBp0~zQ8Z) zZS938&3~jLUNrLDweOf-#1ZfP!J6x8=%wX$Pr~)%S7uL;VZ6;P;@KT}>-25K1#{=g z-pHQ1jjbTp>8z|xXXZ=+@BDp7H?MQfG!{Aac;gPfxfeBWOq#c)q`50GRpHvySCN&i zRf-QiZf3okwr<9!zPX1x{@sZAq|CqlR%TG&|CKvaMd^{r=G!tbC<9|`xK?Do!!(mQ zLNl4S#*sjgWA*j7)8Umz6OOaf4{e$zY~wBYkD?q4|tYrTpa zilvjT#9mSfY4-hh(peyy!+h#0#t+^9*M)3eRlh{yPIkG!=m!12Gr!!wm43|T&o^g} cT2*;3@hR)~g#NXjU>EzQdj1FgxS46a0G Date: Fri, 28 Jun 2024 15:50:02 +0000 Subject: [PATCH 13/21] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- rsciio/digitalsurf/_api.py | 149 ++++++++++++++++--------------- rsciio/tests/registry.txt | 2 + rsciio/tests/test_digitalsurf.py | 18 ++-- 3 files changed, 92 insertions(+), 77 deletions(-) diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index 74dff77c7..752d3bf64 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -23,16 +23,15 @@ # comments can be systematically parsed into metadata and write a support for # original_metadata or other +import ast import datetime -from copy import deepcopy import logging import os -import struct -import sys import re +import struct import warnings import zlib -import ast +from copy import deepcopy # Commented for now because I don't know what purpose it serves # import traits.api as t @@ -53,9 +52,9 @@ RETURNS_DOC, SIGNAL_DOC, ) +from rsciio.utils.date_time_tools import get_date_time_from_metadata from rsciio.utils.exceptions import MountainsMapFileError from rsciio.utils.rgb_tools import is_rgb, is_rgba -from rsciio.utils.date_time_tools import get_date_time_from_metadata _logger = logging.getLogger(__name__) @@ -152,12 +151,18 @@ def __init__(self, filename: str = ""): }, "_06_Object_Name": { "value": "", - "b_unpack_fn": lambda f: self._get_str(f, 30, ), + "b_unpack_fn": lambda f: self._get_str( + f, + 30, + ), "b_pack_fn": lambda f, v: self._set_str(f, v, 30), }, "_07_Operator_Name": { "value": "ROSETTA", - "b_unpack_fn": lambda f: self._get_str(f, 30, ), + "b_unpack_fn": lambda f: self._get_str( + f, + 30, + ), "b_pack_fn": lambda f, v: self._set_str(f, v, 30), }, "_08_P_Size": { @@ -242,47 +247,47 @@ def __init__(self, filename: str = ""): }, "_24_Name_of_X_Axis": { "value": "X", - "b_unpack_fn": lambda f: self._get_str(f, 16 ), + "b_unpack_fn": lambda f: self._get_str(f, 16), "b_pack_fn": lambda f, v: self._set_str(f, v, 16), }, "_25_Name_of_Y_Axis": { "value": "Y", - "b_unpack_fn": lambda f: self._get_str(f, 16 ), + "b_unpack_fn": lambda f: self._get_str(f, 16), "b_pack_fn": lambda f, v: self._set_str(f, v, 16), }, "_26_Name_of_Z_Axis": { "value": "Z", - "b_unpack_fn": lambda f: self._get_str(f, 16 ), + "b_unpack_fn": lambda f: self._get_str(f, 16), "b_pack_fn": lambda f, v: self._set_str(f, v, 16), }, "_27_X_Step_Unit": { "value": "um", - "b_unpack_fn": lambda f: self._get_str(f, 16 ), + "b_unpack_fn": lambda f: self._get_str(f, 16), "b_pack_fn": lambda f, v: self._set_str(f, v, 16), }, "_28_Y_Step_Unit": { "value": "um", - "b_unpack_fn": lambda f: self._get_str(f, 16 ), + "b_unpack_fn": lambda f: self._get_str(f, 16), "b_pack_fn": lambda f, v: self._set_str(f, v, 16), }, "_29_Z_Step_Unit": { "value": "um", - "b_unpack_fn": lambda f: self._get_str(f, 16 ), + "b_unpack_fn": lambda f: self._get_str(f, 16), "b_pack_fn": lambda f, v: self._set_str(f, v, 16), }, "_30_X_Length_Unit": { "value": "um", - "b_unpack_fn": lambda f: self._get_str(f, 16 ), + "b_unpack_fn": lambda f: self._get_str(f, 16), "b_pack_fn": lambda f, v: self._set_str(f, v, 16), }, "_31_Y_Length_Unit": { "value": "um", - "b_unpack_fn": lambda f: self._get_str(f, 16 ), + "b_unpack_fn": lambda f: self._get_str(f, 16), "b_pack_fn": lambda f, v: self._set_str(f, v, 16), }, "_32_Z_Length_Unit": { "value": "um", - "b_unpack_fn": lambda f: self._get_str(f, 16 ), + "b_unpack_fn": lambda f: self._get_str(f, 16), "b_pack_fn": lambda f, v: self._set_str(f, v, 16), }, "_33_X_Unit_Ratio": { @@ -412,12 +417,12 @@ def __init__(self, filename: str = ""): }, "_58_T_Axis_Name": { "value": "T", - "b_unpack_fn": lambda f: self._get_str(f, 13 ), + "b_unpack_fn": lambda f: self._get_str(f, 13), "b_pack_fn": lambda f, v: self._set_str(f, v, 13), }, "_59_T_Step_Unit": { "value": "um", - "b_unpack_fn": lambda f: self._get_str(f, 13 ), + "b_unpack_fn": lambda f: self._get_str(f, 13), "b_pack_fn": lambda f, v: self._set_str(f, v, 13), }, "_60_Comment": { @@ -613,7 +618,7 @@ def _split_signal_dict(self): self._split_rgb() elif is_rgba(self.signal_dict["data"]): warnings.warn( - f"A channel discarded upon saving \ + "A channel discarded upon saving \ RGBA signal in .sur format" ) self._split_rgb() @@ -636,7 +641,7 @@ def _split_signal_dict(self): self._split_rgbserie() elif is_rgba(self.signal_dict["data"]): warnings.warn( - f"Alpha channel discarded upon saving RGBA signal in .sur format" + "Alpha channel discarded upon saving RGBA signal in .sur format" ) self._split_rgbserie() else: @@ -651,7 +656,7 @@ def _split_signal_dict(self): self._split_rgb() elif is_rgba(self.signal_dict["data"]): warnings.warn( - f"A channel discarded upon saving \ + "A channel discarded upon saving \ RGBA signal in .sur format" ) self._split_rgb() @@ -846,7 +851,7 @@ def _norm_data(self, data: np.ndarray, is_special: bool): if np.issubdtype(data_type, np.complexfloating): raise MountainsMapFileError( - f"digitalsurf file formats do not support export of complex data. Convert data to real-value representations before before export" + "digitalsurf file formats do not support export of complex data. Convert data to real-value representations before before export" ) elif data_type == bool: pointsize = 16 @@ -867,7 +872,7 @@ def _norm_data(self, data: np.ndarray, is_special: bool): data_int = data.astype(np.int32) elif np.issubdtype(data_type, np.unsignedinteger): raise MountainsMapFileError( - f"digitalsurf file formats do not support unsigned int >16bits. Convert data to signed integers before export." + "digitalsurf file formats do not support unsigned int >16bits. Convert data to signed integers before export." ) elif data_type == np.int8: pointsize = 16 # Pointsize has to be 16 or 32 in surf format @@ -883,7 +888,7 @@ def _norm_data(self, data: np.ndarray, is_special: bool): Zmin, Zmax, Zscale, Zoffset = self._norm_signed_int(data, is_special) elif np.issubdtype(data_type, np.integer): raise MountainsMapFileError( - f"digitalsurf file formats do not support export integers larger than 32 bits. Convert data to 32-bit representation before exporting" + "digitalsurf file formats do not support export integers larger than 32 bits. Convert data to 32-bit representation before exporting" ) elif np.issubdtype(data_type, np.floating): pointsize = 32 @@ -977,41 +982,41 @@ def _build_workdict( """Populate _work_dict with the""" if not compressed: - self._work_dict["_01_Signature"][ - "value" - ] = "DIGITAL SURF" # DSCOMPRESSED by default + self._work_dict["_01_Signature"]["value"] = ( + "DIGITAL SURF" # DSCOMPRESSED by default + ) else: - self._work_dict["_01_Signature"][ - "value" - ] = "DSCOMPRESSED" # DSCOMPRESSED by default + self._work_dict["_01_Signature"]["value"] = ( + "DSCOMPRESSED" # DSCOMPRESSED by default + ) # self._work_dict['_02_Format']['value'] = 0 # Dft. other possible value is 257 for MacintoshII computers with Motorola CPUs. Obv not supported... self._work_dict["_03_Number_of_Objects"]["value"] = self._N_data_objects # self._work_dict['_04_Version']['value'] = 1 # Version number. Always default. self._work_dict["_05_Object_Type"]["value"] = obj_type - self._work_dict["_06_Object_Name"][ - "value" - ] = object_name # Obsolete, DOS-version only (Not supported) - self._work_dict["_07_Operator_Name"][ - "value" - ] = operator_name # Should be settable from kwargs + self._work_dict["_06_Object_Name"]["value"] = ( + object_name # Obsolete, DOS-version only (Not supported) + ) + self._work_dict["_07_Operator_Name"]["value"] = ( + operator_name # Should be settable from kwargs + ) self._work_dict["_08_P_Size"]["value"] = self._N_data_channels - self._work_dict["_09_Acquisition_Type"][ - "value" - ] = 0 # AFM data only, could be inferred - self._work_dict["_10_Range_Type"][ - "value" - ] = 0 # Only 1 for high-range (z-stage scanning), AFM data only, could be inferred + self._work_dict["_09_Acquisition_Type"]["value"] = ( + 0 # AFM data only, could be inferred + ) + self._work_dict["_10_Range_Type"]["value"] = ( + 0 # Only 1 for high-range (z-stage scanning), AFM data only, could be inferred + ) self._work_dict["_11_Special_Points"]["value"] = int(is_special) - self._work_dict["_12_Absolute"][ - "value" - ] = absolute # Probably irrelevant in most cases. Absolute vs rel heights (for profilometers), can be inferred - self._work_dict["_13_Gauge_Resolution"][ - "value" - ] = 0.0 # Probably irrelevant. Only for profilometers (maybe AFM), can be inferred + self._work_dict["_12_Absolute"]["value"] = ( + absolute # Probably irrelevant in most cases. Absolute vs rel heights (for profilometers), can be inferred + ) + self._work_dict["_13_Gauge_Resolution"]["value"] = ( + 0.0 # Probably irrelevant. Only for profilometers (maybe AFM), can be inferred + ) # T-axis acts as W-axis for spectrum / hyperspectrum surfaces. if obj_type in [21]: @@ -1081,17 +1086,15 @@ def _build_workdict( data_bin = data_int.ravel().astype(fmt).tobytes() compressed_size = 0 - self._work_dict["_48_Compressed_data_size"][ - "value" - ] = compressed_size # Obsolete in case of non-compressed + self._work_dict["_48_Compressed_data_size"]["value"] = ( + compressed_size # Obsolete in case of non-compressed + ) # _49_Obsolete comment_len = len(f"{comment}".encode("latin-1")) if comment_len > 2**15: - warnings.warn( - f"Comment exceeding max length of 32.0 kB and will be cropped" - ) + warnings.warn("Comment exceeding max length of 32.0 kB and will be cropped") comment_len = np.int16(2**15) self._work_dict["_50_Comment_size"]["value"] = comment_len @@ -1099,7 +1102,7 @@ def _build_workdict( privatesize = len(private_zone) if privatesize > 2**15: warnings.warn( - f"Private size exceeding max length of 32.0 kB and will be cropped" + "Private size exceeding max length of 32.0 kB and will be cropped" ) privatesize = np.int16(2**15) @@ -1138,9 +1141,11 @@ def _read_sur_file(self): ) self._N_data_channels = self._get_work_dict_key_value("_08_P_Size") - # Determine how many objects we need to read, at least 1 object and 1 channel + # Determine how many objects we need to read, at least 1 object and 1 channel # even if metadata is set to 0 (happens sometimes) - n_objects_to_read = max(self._N_data_channels,1) * max(self._N_data_objects,1) + n_objects_to_read = max(self._N_data_channels, 1) * max( + self._N_data_objects, 1 + ) # Lookup what object type we are dealing with and save self._Object_type = DigitalSurfHandler._mountains_object_types[ @@ -1500,7 +1505,6 @@ def _build_RGB_image( def _build_RGB_image_series( self, ): - # First object dictionary hypdic = self._list_sur_file_content[0] @@ -1941,7 +1945,7 @@ def _get_comment_dict( elif method == "off": return {} elif method == "auto": - pattern = re.compile("Object_\d*_Channel_\d*") + pattern = re.compile(r"Object_\d*_Channel_\d*") omd = original_metadata # filter original metadata content of dict type and matching pattern. validfields = [ @@ -1968,7 +1972,7 @@ def _get_comment_dict( return {} else: raise MountainsMapFileError( - f"Non-valid method for setting mountainsmap file comment. Choose one of: 'auto','raw','custom','off' " + "Non-valid method for setting mountainsmap file comment. Choose one of: 'auto','raw','custom','off' " ) @staticmethod @@ -2050,7 +2054,9 @@ def _set_uint16(file, val): file.write(struct.pack(" int: return datasize def _unpack_data(self, file, encoding="latin-1"): - # Size of datapoints in bytes. Always int16 (==2) or 32 (==4) psize = int(self._get_work_dict_key_value("_15_Size_of_Points") / 8) dtype = np.int16 if psize == 2 else np.int32 @@ -2263,7 +2272,7 @@ def _unpack_data(self, file, encoding="latin-1"): # Packing data into ints or float, with or without scaling. if self._is_data_int(): - pass #Case left here for future modification + pass # Case left here for future modification elif self._is_data_scaleint(): _points = (_points.astype(float) - Zmin) * scale + offset _points = np.round(_points).astype(int) @@ -2292,7 +2301,7 @@ def _compress_data(data_int, nstreams: int = 1) -> bytes: if nstreams <= 0 or nstreams > 8: raise MountainsMapFileError( - f"Number of compression streams must be >= 1, <= 8" + "Number of compression streams must be >= 1, <= 8" ) bstr = b"" @@ -2374,14 +2383,14 @@ def file_writer( %s set_comments : str , default = 'auto' Whether comments should be a simplified version original_metadata ('auto'), - the raw original_metadata dictionary ('raw'), skipped ('off'), or supplied + the raw original_metadata dictionary ('raw'), skipped ('off'), or supplied by the user as an additional kwarg ('custom'). is_special : bool , default = False - If True, NaN values in the dataset or integers reaching the boundary of the - signed int-representation are flagged as non-measured or saturating, + If True, NaN values in the dataset or integers reaching the boundary of the + signed int-representation are flagged as non-measured or saturating, respectively. If False, those values are not flagged (converted to valid points). compressed : bool, default =True - If True, compress the data in the export file using zlib. Can help dramatically + If True, compress the data in the export file using zlib. Can help dramatically reduce the file size. comments : dict, default = {} Set a custom dictionnary in the comments field of the exported file. diff --git a/rsciio/tests/registry.txt b/rsciio/tests/registry.txt index b8b9f1f36..593391ed2 100644 --- a/rsciio/tests/registry.txt +++ b/rsciio/tests/registry.txt @@ -130,6 +130,8 @@ 'digitalmicrograph/Fei HAADF-UK_location.dm3' 3264325b6f79457737f6ff71e3979ebe508971a592c24e15d9ee4ba876244e56 'digitalmicrograph/test_stackbuilder_imagestack.dm3' 41070d0fd25a838a504f705e1431735192b7a97ca7dd15d9328af5e939fe74a2 'digitalsurf/test_RGB.sur' 802f3d915bf9feb7c264ef3f1242df35033da7227e5a7a5924fd37f8f49f4778 +'digitalsurf/test_RGBSURFACE.sur' 15e8b345cc5d67e7399831c881c63362fd92bc075fad8d763f3ff0d26dfe29a2 +'digitalsurf/test_isurface.sur' 6ed59a9a235c0b6dc7e15f155d0e738c5841cfc0fe78f1861b7e145f9dcaadf4 'digitalsurf/test_profile.pro' fdd9936a4b5e205b819b1d82813bb21045b702b4610e8ef8d1d0932d63344f6d 'digitalsurf/test_spectra.pro' ea1602de193b73046beb5e700fcac727fb088bf459edeec3494b0362a41bdcb1 'digitalsurf/test_spectral_map.sur' f9c863e3fd61be89c3b68cef6fa2434ffedc7e486efe2263c2241109fa58c3f7 diff --git a/rsciio/tests/test_digitalsurf.py b/rsciio/tests/test_digitalsurf.py index 71cf75873..e604cda63 100644 --- a/rsciio/tests/test_digitalsurf.py +++ b/rsciio/tests/test_digitalsurf.py @@ -499,7 +499,6 @@ def test_metadata_mapping(): def test_compressdata(): - testdat = np.arange(120, dtype=np.int32) # Refuse too many / neg streams @@ -665,8 +664,8 @@ def test_norm_int_data(dtype, special, fullscale): dat[2] = minint dat[11] = maxint - Zscale = 0.0 #to avoid CodeQL error: pot. non-initialized var - Zoffset = -np.inf #to avoid CodeQL error: pot. non-initialized var + Zscale = 0.0 # to avoid CodeQL error: pot. non-initialized var + Zoffset = -np.inf # to avoid CodeQL error: pot. non-initialized var pointsize, Zmin, Zmax, Zscale, Zoffset, data_int = dh._norm_data(dat, special) off = minint + 1 if special and fullscale else dat.min() @@ -678,8 +677,9 @@ def test_norm_int_data(dtype, special, fullscale): assert Zmin == off assert Zmax == maxval + @pytest.mark.parametrize("transpose", [True, False]) -def test_writetestobjects_rgb(tmp_path,transpose): +def test_writetestobjects_rgb(tmp_path, transpose): # This is just a different test function because the # comparison of rgb data must be done differently # (due to hyperspy underlying structure) @@ -718,6 +718,7 @@ def test_writetestobjects_rgb(tmp_path,transpose): assert np.allclose(ax.axis, ax2.axis) assert np.allclose(ax.axis, ax3.axis) + @pytest.mark.parametrize( "dtype", [np.int8, np.int16, np.int32, np.float64, np.uint8, np.uint16] ) @@ -732,6 +733,7 @@ def test_writegeneric_validtypes(tmp_path, dtype, compressed): gen2 = hs.load(fgen) assert np.allclose(gen2.data, gen.data) + @pytest.mark.parametrize("compressed", [True, False]) def test_writegeneric_nans(tmp_path, compressed): """This test establishes the capability of saving a generic signal @@ -748,6 +750,7 @@ def test_writegeneric_nans(tmp_path, compressed): gen2 = hs.load(fgen) assert np.allclose(gen2.data, gen.data, equal_nan=True) + def test_writegeneric_transposedprofile(tmp_path): """This test checks the expected behaviour that a transposed profile gets correctly saved but a warning is raised.""" @@ -762,6 +765,7 @@ def test_writegeneric_transposedprofile(tmp_path): gen2 = hs.load(fgen) assert np.allclose(gen2.data, gen.data) + @pytest.mark.parametrize( "dtype", [ @@ -776,12 +780,14 @@ def test_writegeneric_failingtypes(tmp_path, dtype): with pytest.raises(MountainsMapFileError): gen.save(fgen, overwrite=True) + def test_writegeneric_failingformat(tmp_path): - gen = hs.signals.Signal1D(np.zeros((3,4,5,6))) + gen = hs.signals.Signal1D(np.zeros((3, 4, 5, 6))) fgen = tmp_path.joinpath("test.sur") with pytest.raises(MountainsMapFileError): gen.save(fgen, overwrite=True) + @pytest.mark.parametrize("dtype", [(np.uint8, "rgba8"), (np.uint16, "rgba16")]) @pytest.mark.parametrize("compressed", [True, False]) @pytest.mark.parametrize("transpose", [True, False]) @@ -815,7 +821,6 @@ def test_writegeneric_rgba(tmp_path, dtype, compressed, transpose): @pytest.mark.parametrize("compressed", [True, False]) @pytest.mark.parametrize("transpose", [True, False]) def test_writegeneric_binaryimg(tmp_path, compressed, transpose): - size = (76, 3) gen = hs.signals.Signal2D(np.random.randint(low=0, high=1, size=size, dtype=bool)) @@ -836,7 +841,6 @@ def test_writegeneric_binaryimg(tmp_path, compressed, transpose): @pytest.mark.parametrize("compressed", [True, False]) def test_writegeneric_profileseries(tmp_path, compressed): - size = (9, 655) gen = hs.signals.Signal1D(np.random.random(size=size) * 1444 + 2550.0) From 7cfd261d0b59316025fc0ac0ae16a18c5c61c519 Mon Sep 17 00:00:00 2001 From: Attolight-NTappy <123734179+Attolight-NTappy@users.noreply.github.com> Date: Sun, 30 Jun 2024 10:29:09 +0200 Subject: [PATCH 14/21] Update doc/user_guide/supported_formats/digitalsurf.rst fixed type Co-authored-by: Eric Prestat --- doc/user_guide/supported_formats/digitalsurf.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/user_guide/supported_formats/digitalsurf.rst b/doc/user_guide/supported_formats/digitalsurf.rst index 57a35395e..08f2705b9 100644 --- a/doc/user_guide/supported_formats/digitalsurf.rst +++ b/doc/user_guide/supported_formats/digitalsurf.rst @@ -36,7 +36,7 @@ inspects the names of signal axes and ``metadata.Signal.quantity``. The criteria | 0 | 2 | ``.sur``: BinaryImage (based on dtype), RGBImage (based on dtype), | | | | Surface (default) | +-----------------+---------------+------------------------------------------------------------------------------+ -| 1 | 0 | ``.pro``: same as (1,0) | +| 1 | 0 | ``.pro``: same as (0,1) | +-----------------+---------------+------------------------------------------------------------------------------+ | 1 | 1 | ``.pro``: Spectrum Serie (based on axes name), Profile Serie (default) | +-----------------+---------------+------------------------------------------------------------------------------+ From 61b932b03acc3e4419954c4b5236bc0ea6af2958 Mon Sep 17 00:00:00 2001 From: Nicolas Tappy Date: Mon, 1 Jul 2024 14:18:06 +0200 Subject: [PATCH 15/21] Increase codecov, code cleanup --- .../supported_formats/digitalsurf.rst | 2 +- rsciio/digitalsurf/Untitled-1.ipynb | 673 ------------------ rsciio/digitalsurf/_api.py | 40 +- rsciio/tests/test_digitalsurf.py | 84 ++- 4 files changed, 100 insertions(+), 699 deletions(-) delete mode 100644 rsciio/digitalsurf/Untitled-1.ipynb diff --git a/doc/user_guide/supported_formats/digitalsurf.rst b/doc/user_guide/supported_formats/digitalsurf.rst index 57a35395e..08f2705b9 100644 --- a/doc/user_guide/supported_formats/digitalsurf.rst +++ b/doc/user_guide/supported_formats/digitalsurf.rst @@ -36,7 +36,7 @@ inspects the names of signal axes and ``metadata.Signal.quantity``. The criteria | 0 | 2 | ``.sur``: BinaryImage (based on dtype), RGBImage (based on dtype), | | | | Surface (default) | +-----------------+---------------+------------------------------------------------------------------------------+ -| 1 | 0 | ``.pro``: same as (1,0) | +| 1 | 0 | ``.pro``: same as (0,1) | +-----------------+---------------+------------------------------------------------------------------------------+ | 1 | 1 | ``.pro``: Spectrum Serie (based on axes name), Profile Serie (default) | +-----------------+---------------+------------------------------------------------------------------------------+ diff --git a/rsciio/digitalsurf/Untitled-1.ipynb b/rsciio/digitalsurf/Untitled-1.ipynb deleted file mode 100644 index c35673f23..000000000 --- a/rsciio/digitalsurf/Untitled-1.ipynb +++ /dev/null @@ -1,673 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "from rsciio.digitalsurf._api import DigitalSurfHandler\n", - "import hyperspy.api as hs\n", - "import numpy as np\n", - "import pathlib\n", - "import matplotlib.pyplot as plt\n", - "%matplotlib qt\n", - "\n", - "savedir = pathlib.Path().home().joinpath(\"OneDrive - Attolight/Desktop/\")" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "ddd = np.loadtxt(r\"C:\\Users\\NicolasTappy\\Attolight Dropbox\\ATT_RnD\\INJECT\\BEAMFOUR\\BeamFour-end-users_Windows\\histo2dim_500mmoffset.txt\",delimiter=',')" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "255" - ] - }, - "execution_count": 26, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "np.iinfo(np.uint8).max" - ] - }, - { - "cell_type": "code", - "execution_count": 35, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 35, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "dt = np.uint8\n", - "maxint = np.iinfo(dt).max\n", - "np.random.randint(low=0,high=maxint,size=(17,38,3),dtype=dt)\n", - "size = (5,17,38,3)\n", - "maxint = np.iinfo(dt).max\n", - "\n", - "gen = hs.signals.Signal1D(np.random.randint(low=0,high=maxint,size=size,dtype=dt))\n", - "gen\n", - "# gen.change_dtype('rgb8')" - ] - }, - { - "cell_type": "code", - "execution_count": 36, - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "52c591c4ee4f44d6a582c5958ecffd12", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "VBox(children=(HBox(children=(Label(value='Unnamed 0th axis', layout=Layout(width='15%')), IntSlider(value=0, …" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "gen.change_dtype('rgb8')\n", - "gen.plot()" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Text(0, 0.5, 'Y (um)')" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "plt.matshow(ddd)\n", - "plt.xlabel('X (um)')\n", - "plt.ylabel('Y (um)')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def diffdic(a:dict,b:dict):\n", - " set1 = set(a.items())\n", - " set2 = set(b.items())\n", - " return set1^set2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import struct\n", - "def _pack_str(val, size, encoding=\"latin-1\"):\n", - " \"\"\"Write a str of defined size in bytes to a file. struct.pack\n", - " will automatically trim the string if it is too long\"\"\"\n", - " return struct.pack(\"<{:d}s\".format(size), f\"{val}\".ljust(size).encode(encoding))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "testdir = pathlib.Path(r'C:\\Program Files\\Attolight\\AttoMap Advanced 7.4\\Example Data')\n", - "testfiles = list(testdir.glob('*.sur'))+list(testdir.glob('*pro'))\n", - "list(testfiles)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "testdir = pathlib.Path(r'C:\\Program Files\\Attolight\\AttoMap Advanced 7.4\\Example Data')\n", - "testfiles = list(testdir.glob('*.sur'))+list(testdir.glob('*pro'))\n", - "savedir = pathlib.Path(r'C:\\Users\\NicolasTappy\\OneDrive - Attolight\\Desktop\\ds_testfiles')\n", - "for tf in testfiles:\n", - " d = hs.load(tf)\n", - " comp = d.original_metadata.Object_0_Channel_0.Header.H01_Signature == 'DSCOMPRESSED'\n", - " nam = d.original_metadata.Object_0_Channel_0.Header.H06_Object_Name\n", - " abso = d.original_metadata.Object_0_Channel_0.Header.H12_Absolute\n", - " # print(tf.name)\n", - " # if d.original_metadata.Object_0_Channel_0.Header.H05_Object_Type == 12:\n", - " # print(d.original_metadata.Object_0_Channel_0.Header.H23_Z_Spacing)\n", - " nn = savedir.joinpath(f\"EXPORTED_{tf.name}\")\n", - " print(f\"{nn.name}: {comp}, {abso}\")\n", - " d.save(nn,object_name=nam,compressed=comp,absolute=abso,overwrite=True)\n", - " tmp = hs.load(nn)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a = d.axes_manager[0]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a.get_axis_dictionary()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "exptf = list(pathlib.Path(r'C:\\Users\\NicolasTappy\\OneDrive - Attolight\\Desktop\\ds_testfiles').glob('*.sur'))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "i = 26\n", - "print(testfiles[i].name)\n", - "d = hs.load(testfiles[i])\n", - "print(exptf[i].name)\n", - "ed = hs.load(exptf[i])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "diffdic(d.original_metadata.Object_0_Channel_0.Header.as_dictionary(),\n", - " ed.original_metadata.Object_0_Channel_0.Header.as_dictionary())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "d.plot(),ed.plot(),(d-ed).plot()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pathlib\n", - "d = pathlib.Path(r\"C:\\Users\\NicolasTappy\\OneDrive - Attolight\\Documents\\GIT\\rosettasciio\\rsciio\\tests\\data\\digitalsurf\")\n", - "fl = list(d.iterdir())\n", - "fl" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a = hs.load(r\"C:\\Users\\NicolasTappy\\Attolight Dropbox\\ATT_RnD\\INJECT\\hyperspectral tests\\HYP-TEST-NOLASER\\HYPCard.sur\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "savedir = pathlib.Path(r'C:\\Users\\NicolasTappy\\OneDrive - Attolight\\Desktop\\ds_testfiles')\n", - "for tf in fl:\n", - " d = hs.load(tf)\n", - " try:\n", - " comp = d.original_metadata.Object_0_Channel_0.Header.H01_Signature == 'DSCOMPRESSED'\n", - " nam = d.original_metadata.Object_0_Channel_0.Header.H06_Object_Name\n", - " abso = d.original_metadata.Object_0_Channel_0.Header.H12_Absolute\n", - " except:\n", - " comp=False\n", - " nam= 'test'\n", - " abso = 0\n", - " # print(tf.name)\n", - " # if d.original_metadata.Object_0_Channel_0.Header.H05_Object_Type == 12:\n", - " # print(d.original_metadata.Object_0_Channel_0.Header.H23_Z_Spacing)\n", - " nn = savedir.joinpath(f\"EXPORTED_{tf.name}\")\n", - " print(f\"{nn.name}: {comp}, {abso}\")\n", - " d.save(nn,object_name=nam,compressed=comp,absolute=abso,overwrite=True)\n", - " tmp = hs.load(nn)\n", - "exptf = list(pathlib.Path(r'C:\\Users\\NicolasTappy\\OneDrive - Attolight\\Desktop\\ds_testfiles').glob('*'))\n", - "exptf" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t = hs.load(r\"C:\\Users\\NicolasTappy\\OneDrive - Attolight\\Desktop\\ds_testfiles\\EXPORTED_test_spectral_map.sur\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "b'\\x19\\x00\\x00\\x00\\x1a\\x00\\x00\\x00\\x1b\\x00\\x00\\x00\\x1c\\x00\\x00\\x00\\x1d\\x00\\x00\\x00\\x1e\\x00\\x00\\x00\\x1f\\x00\\x00\\x00 \\x00\\x00\\x00!\\x00\\x00\\x00\"\\x00\\x00\\x00#\\x00\\x00\\x00$\\x00\\x00\\x00'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "i = 4\n", - "print(fl[i].name)\n", - "# d = hs.load(fl[i])\n", - "print(exptf[i].name)\n", - "ed = hs.load(exptf[i])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "a = d.metadata\n", - "a.as_dictionary()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "d = hs.load(r\"C:\\Users\\NicolasTappy\\OneDrive - Attolight\\Pictures\\Untitled.jpg\")\n", - "n = savedir.joinpath(f\"EXPORTED_Untitled.sur\")\n", - "d.save(n)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "i = 1\n", - "d1 = hs.load(fl[i])\n", - "d1.save(savedir.joinpath(fl[i].name),overwrite=True)\n", - "d2 = hs.load(savedir.joinpath(fl[i].name))\n", - "for k in ['R','G','B']:\n", - " plt.figure()\n", - " plt.imshow(d1.data[k].astype(np.int16)-d2.data[k].astype(np.int16))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "aa[0].axis" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "k.plot()" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([111., 112., 113., 114., 115., 116., 117., 118., 119., 120., 121.,\n", - " 122., 123., 124., 125., 126., 127., 128., 129., 130., 131., 132.,\n", - " 133., 134.])" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import numpy as np\n", - "from rsciio.digitalsurf import file_writer,file_reader\n", - "md = { 'General': {},\n", - " 'Signal': {}}\n", - "\n", - "ax = {'name': 'X',\n", - " 'navigate': False,\n", - " }\n", - "\n", - "sd = {\"data\": np.arange(24)+111,\n", - " \"axes\": [ax],\n", - " \"metadata\": md,\n", - " \"original_metadata\": {}}\n", - "\n", - "file_writer(\"test.pro\",sd)\n", - "file_reader('test.pro')[0]['data']" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for f in fl:\n", - " print(f.name)\n", - " d = hs.load(f)\n", - " # d.plot()\n", - " # d.save(savedir.joinpath(f.name),overwrite=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for f in fl:\n", - " print(f.name)\n", - " d = hs.load(savedir.joinpath(f.name))\n", - " # d.plot()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "d.plot()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "k = hs.load(savedir.joinpath('test_RGB.sur'))\n", - "k.original_metadata" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for f in testrgbfiles:\n", - " print(pathlib.Path(f).name)\n", - " d = hs.load(f)\n", - " d.plot()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ds = DigitalSurfHandler(savedir.joinpath('test_spectra.pro'))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "d.save(savedir.joinpath('test_spectra.pro'),comment='off')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "gen = hs.signals.Signal1D(np.arange(24,dtype=np.float32))\n", - "fgen = savedir.joinpath('test.pro')\n", - "gen.save(fgen,overwrite=True,is_special=False)\n", - "gen.data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "hs.load(fgen).original_metadata" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "11.5+11.5" - ] - }, - { - "cell_type": "code", - "execution_count": 37, - "metadata": {}, - "outputs": [], - "source": [ - "from rsciio.utils import rgb_tools" - ] - }, - { - "cell_type": "code", - "execution_count": 74, - "metadata": {}, - "outputs": [], - "source": [ - "# a = np.random.randint(0,65535,size=(8,3,12,14),dtype=np.uint16)\n", - "a = np.random.randint(0,65535,size=(24,12,14),dtype=np.uint16)\n", - "a = a.reshape(8,3,12,14)" - ] - }, - { - "cell_type": "code", - "execution_count": 78, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "(8, 12, 14, 3)" - ] - }, - "execution_count": 78, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "np.rollaxis(a,1,4).shape" - ] - }, - { - "cell_type": "code", - "execution_count": 53, - "metadata": {}, - "outputs": [], - "source": [ - "b = rgb_tools.regular_array2rgbx(a)" - ] - }, - { - "cell_type": "code", - "execution_count": 80, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,\n", - " 42, 43, 44, 45, 46, 47, 48], dtype=int8)" - ] - }, - "execution_count": 80, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "d=hs.signals.Signal1D(np.arange(24,dtype=np.int8))+25\n", - "d.data" - ] - }, - { - "cell_type": "code", - "execution_count": 58, - "metadata": {}, - "outputs": [], - "source": [ - "c = b[:8]" - ] - }, - { - "cell_type": "code", - "execution_count": 81, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "-128" - ] - }, - "execution_count": 81, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "- 2**(8-1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "hsdev", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index 74dff77c7..a8d56b7af 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -104,7 +104,7 @@ class DigitalSurfHandler(object): 21: "_HYPCARD", } - def __init__(self, filename: str = ""): + def __init__(self, filename: str): # We do not need to check for file existence here because # io module implements it in the load function self.filename = filename @@ -954,9 +954,6 @@ def _get_Zname_Zunit(self, metadata: dict): elif len(quantity) == 1: Zname = quantity.pop() Zunit = "" - else: - Zname = "" - Zunit = "" return Zname, Zunit @@ -1088,20 +1085,20 @@ def _build_workdict( # _49_Obsolete comment_len = len(f"{comment}".encode("latin-1")) - if comment_len > 2**15: + if comment_len >= 2**15: warnings.warn( f"Comment exceeding max length of 32.0 kB and will be cropped" ) - comment_len = np.int16(2**15) + comment_len = np.int16(2**15-1) self._work_dict["_50_Comment_size"]["value"] = comment_len privatesize = len(private_zone) - if privatesize > 2**15: + if privatesize >= 2**15: warnings.warn( f"Private size exceeding max length of 32.0 kB and will be cropped" ) - privatesize = np.int16(2**15) + privatesize = np.uint16(2**15-1) self._work_dict["_51_Private_size"]["value"] = privatesize @@ -1870,7 +1867,7 @@ def _MS_parse(str_ms, prefix, delimiter): # Title lines start with an underscore titlestart = "{:s}_".format(prefix) - keymain = None + key_main = None for line in str_ms.splitlines(): # Here we ignore any empty line or line starting with @@ @@ -1884,8 +1881,8 @@ def _MS_parse(str_ms, prefix, delimiter): key_main = line[len(titlestart) :].strip() dict_ms[key_main] = {} elif line.startswith(prefix): - if keymain is None: - keymain = "UNTITLED" + if key_main is None: + key_main = "UNTITLED" dict_ms[key_main] = {} key, *li_value = line.split(delimiter) # Key is also stripped from beginning or end whitespace @@ -2006,9 +2003,9 @@ def _stringify_dict(omd: dict): if has_units: _ = keys_queue.pop(ku_idx) vu = vals_queue.pop(ku_idx) - cmtstr += f"${k} = {v.__repr__()} {vu}\n" + cmtstr += f"${k} = {v.__str__()} {vu}\n" else: - cmtstr += f"${k} = {v.__repr__()}\n" + cmtstr += f"${k} = {v.__str__()}\n" return cmtstr @@ -2218,8 +2215,7 @@ def _unpack_data(self, file, encoding="latin-1"): # set to 0 instead of 1 in non-spectral data to compute the # space occupied by data in the file readsize = Npts_tot * psize * Wsize - # if Wsize != 0: - # readsize *= Wsize + buf = file.read(readsize) # Read the exact size of the data _points = np.frombuffer(buf, dtype=dtype) @@ -2374,15 +2370,15 @@ def file_writer( %s set_comments : str , default = 'auto' Whether comments should be a simplified version original_metadata ('auto'), - the raw original_metadata dictionary ('raw'), skipped ('off'), or supplied - by the user as an additional kwarg ('custom'). + the raw original_metadata dictionary ('raw'), skipped ('off'), or supplied + by the user as an additional kwarg ('custom'). is_special : bool , default = False - If True, NaN values in the dataset or integers reaching the boundary of the - signed int-representation are flagged as non-measured or saturating, - respectively. If False, those values are not flagged (converted to valid points). + If True, NaN values in the dataset or integers reaching the boundary of the + signed int-representation are flagged as non-measured or saturating, + respectively. If False, those values are not flagged (converted to valid points). compressed : bool, default =True - If True, compress the data in the export file using zlib. Can help dramatically - reduce the file size. + If True, compress the data in the export file using zlib. Can help dramatically + reduce the file size. comments : dict, default = {} Set a custom dictionnary in the comments field of the exported file. Ignored if set_comments is not set to 'custom'. diff --git a/rsciio/tests/test_digitalsurf.py b/rsciio/tests/test_digitalsurf.py index 71cf75873..5dcd59780 100644 --- a/rsciio/tests/test_digitalsurf.py +++ b/rsciio/tests/test_digitalsurf.py @@ -141,7 +141,7 @@ def test_invalid_data(): - dsh = DigitalSurfHandler() + dsh = DigitalSurfHandler('untitled.sur') with pytest.raises(MountainsMapFileError): dsh._Object_type = "INVALID" @@ -435,7 +435,7 @@ def test_load_surface(): def test_choose_signal_type(): - reader = DigitalSurfHandler() + reader = DigitalSurfHandler('untitled.sur') # Empty dict should not raise error but return empty string mock_dict = {} @@ -599,6 +599,8 @@ def test_writetestobjects(tmp_path, test_object): assert np.allclose(d2.data, d.data) assert np.allclose(d2.data, d3.data) + assert d.metadata.Signal.quantity == d2.metadata.Signal.quantity + assert d.metadata.Signal.quantity == d3.metadata.Signal.quantity a = d.axes_manager.navigation_axes b = d2.axes_manager.navigation_axes @@ -607,6 +609,10 @@ def test_writetestobjects(tmp_path, test_object): for ax, ax2, ax3 in zip(a, b, c): assert np.allclose(ax.axis, ax2.axis) assert np.allclose(ax.axis, ax3.axis) + assert ax.name == ax2.name + assert ax.name == ax3.name + assert ax.units == ax2.units + assert ax.units == ax3.units a = d.axes_manager.signal_axes b = d2.axes_manager.signal_axes @@ -615,6 +621,10 @@ def test_writetestobjects(tmp_path, test_object): for ax, ax2, ax3 in zip(a, b, c): assert np.allclose(ax.axis, ax2.axis) assert np.allclose(ax.axis, ax3.axis) + assert ax.name == ax2.name + assert ax.name == ax3.name + assert ax.units == ax2.units + assert ax.units == ax3.units @pytest.mark.parametrize( @@ -650,7 +660,7 @@ def test_split(test_tuple): @pytest.mark.parametrize("special", [True, False]) @pytest.mark.parametrize("fullscale", [True, False]) def test_norm_int_data(dtype, special, fullscale): - dh = DigitalSurfHandler() + dh = DigitalSurfHandler('untitled.sur') if fullscale: minint = np.iinfo(dtype).min @@ -678,6 +688,7 @@ def test_norm_int_data(dtype, special, fullscale): assert Zmin == off assert Zmax == maxval + @pytest.mark.parametrize("transpose", [True, False]) def test_writetestobjects_rgb(tmp_path,transpose): # This is just a different test function because the @@ -718,6 +729,7 @@ def test_writetestobjects_rgb(tmp_path,transpose): assert np.allclose(ax.axis, ax2.axis) assert np.allclose(ax.axis, ax3.axis) + @pytest.mark.parametrize( "dtype", [np.int8, np.int16, np.int32, np.float64, np.uint8, np.uint16] ) @@ -732,6 +744,7 @@ def test_writegeneric_validtypes(tmp_path, dtype, compressed): gen2 = hs.load(fgen) assert np.allclose(gen2.data, gen.data) + @pytest.mark.parametrize("compressed", [True, False]) def test_writegeneric_nans(tmp_path, compressed): """This test establishes the capability of saving a generic signal @@ -748,6 +761,7 @@ def test_writegeneric_nans(tmp_path, compressed): gen2 = hs.load(fgen) assert np.allclose(gen2.data, gen.data, equal_nan=True) + def test_writegeneric_transposedprofile(tmp_path): """This test checks the expected behaviour that a transposed profile gets correctly saved but a warning is raised.""" @@ -762,6 +776,26 @@ def test_writegeneric_transposedprofile(tmp_path): gen2 = hs.load(fgen) assert np.allclose(gen2.data, gen.data) + +def test_writegeneric_transposedsurface(tmp_path,): + """This test establishes the possibility of saving RGBA surface series while discarding + A channel and warning""" + size = (44, 58) + + gen = hs.signals.Signal2D( + np.random.random(size=size)*1e4 + ) + gen = gen.T + + fgen = tmp_path.joinpath("test.sur") + + gen.save(fgen, overwrite=True) + + gen2 = hs.load(fgen) + + assert np.allclose(gen.data, gen2.data) + + @pytest.mark.parametrize( "dtype", [ @@ -776,6 +810,7 @@ def test_writegeneric_failingtypes(tmp_path, dtype): with pytest.raises(MountainsMapFileError): gen.save(fgen, overwrite=True) + def test_writegeneric_failingformat(tmp_path): gen = hs.signals.Signal1D(np.zeros((3,4,5,6))) fgen = tmp_path.joinpath("test.sur") @@ -920,3 +955,46 @@ def test_writegeneric_surfaceseries(tmp_path, dtype, compressed): gen2 = hs.load(fgen) assert np.allclose(gen.data, gen2.data) + + +def test_writegeneric_datetime(tmp_path): + + gen = hs.signals.Signal1D(np.random.rand(87)) + gen.metadata.General.date = '2024-06-30' + gen.metadata.General.time = '13:29:10' + + fgen = tmp_path.joinpath("test.pro") + gen.save(fgen) + + gen2 = hs.load(fgen) + assert gen2.original_metadata.Object_0_Channel_0.Header.H40_Seconds == 10 + assert gen2.original_metadata.Object_0_Channel_0.Header.H41_Minutes == 29 + assert gen2.original_metadata.Object_0_Channel_0.Header.H42_Hours == 13 + assert gen2.original_metadata.Object_0_Channel_0.Header.H43_Day == 30 + assert gen2.original_metadata.Object_0_Channel_0.Header.H44_Month == 6 + assert gen2.original_metadata.Object_0_Channel_0.Header.H45_Year == 2024 + assert gen2.original_metadata.Object_0_Channel_0.Header.H46_Day_of_week == 6 + + +def test_writegeneric_comments(tmp_path): + + gen = hs.signals.Signal1D(np.random.rand(87)) + fgen = tmp_path.joinpath("test.pro") + + res = "".join(["a" for i in range(2**15+2)]) + cmt = {'comment': res} + + with pytest.raises(MountainsMapFileError): + gen.save(fgen,set_comments='somethinginvalid') + + with pytest.warns(): + gen.save(fgen,set_comments='custom',comments=cmt) + + gen2 = hs.load(fgen) + assert gen2.original_metadata.Object_0_Channel_0.Parsed.UNTITLED.comment.startswith('a') + assert len(gen2.original_metadata.Object_0_Channel_0.Parsed.UNTITLED.comment) < 2**15-1 + + priv = res.encode('latin-1') + with pytest.warns(): + gen.save(fgen,private_zone=priv,overwrite=True) + From e6744334d47c69efaaf3b3bc852ab3fce3bb632d Mon Sep 17 00:00:00 2001 From: Nicolas Tappy Date: Mon, 1 Jul 2024 15:07:08 +0200 Subject: [PATCH 16/21] Cleanup obsolete testfile --- rsciio/digitalsurf/_api.py | 8 ++++---- rsciio/tests/data/digitalsurf/test_isurface.sur | Bin 56141 -> 0 bytes rsciio/tests/registry.txt | 1 - rsciio/tests/test_digitalsurf.py | 4 ++-- 4 files changed, 6 insertions(+), 7 deletions(-) delete mode 100644 rsciio/tests/data/digitalsurf/test_isurface.sur diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index a8880a25b..b33f331ab 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -2382,14 +2382,14 @@ def file_writer( set_comments : str , default = 'auto' Whether comments should be a simplified version original_metadata ('auto'), the raw original_metadata dictionary ('raw'), skipped ('off'), or supplied - by the user as an additional kwarg ('custom'). + by the user as an additional kwarg ('custom'). is_special : bool , default = False If True, NaN values in the dataset or integers reaching the boundary of the - signed int-representation are flagged as non-measured or saturating, - respectively. If False, those values are not flagged (converted to valid points). + signed int-representation are flagged as non-measured or saturating, + respectively. If False, those values are not flagged (converted to valid points). compressed : bool, default =True If True, compress the data in the export file using zlib. Can help dramatically - reduce the file size. + reduce the file size. comments : dict, default = {} Set a custom dictionnary in the comments field of the exported file. Ignored if set_comments is not set to 'custom'. diff --git a/rsciio/tests/data/digitalsurf/test_isurface.sur b/rsciio/tests/data/digitalsurf/test_isurface.sur deleted file mode 100644 index 2719726e8197eef500378a04998f2d7935ebf709..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 56141 zcmc$FbxbBOu;&7cyE`nfxVyvR%i`|t&WF3Zv$(svyMDO4`-i)`+`e31-uvV3a`)Fw zGij&Ow9_={{5q40Dv8L+$SaB|DT#@G`v&%}fPbU;;bh|Iq~~aFV&vptV6A6kVE=>W z$NwFHefyvEpY^wIKfZmgv3!vSFMRX+r}_{6_UqfXV&p2W|KQ3&N=`KkV<+?fHvMlC z3o~=4|LPEt{vm1WWMb=R;pF}wy*4)grTbsW1`N&&|9!;&>-c|G3jP=Whr$16l3D%l z`v1Q2|Bv>6+7cCIEApT1|Ggdm^PIrG3H{rHTPB^B$*;uiWrB7WgA|jKWLwPh%gf8^ zoK1QcS6BTOKOCz3{L>lF*U>~8x0$U~lh5z$9wh(Kv)NH2 z{%$07e^z8{1NRw2X>0AU@2l44M5#f0_@LPeOVY>3^q9u0J3!+F+IxgXOv%@h@F0ND zL7+P1YCz?hRy|zuG2;j4O2nvI%deg@C<5Jjq9*+29q)Hj(F{A0&kGx_+zxmb9$wT& zb9hy|jWG2GnP-f8une?iVT@X=8&iB{?`3hX)HWn6D~>(w6-{$S?MRs_wSERCfpjd{ zej`w~e3ljb8!_4dVjNjxtbpprcNe*oop4orHWOqK?-`83vMG_7@~>W6TKRh;#Cvf& zAs1i0%(q&2Z=OqGZDPIma1ZJ65Zj&1&R{Gms+Nbz2 z{B)Us6S_cY&A@$2q8$SP!c=>+ z7MBdoWoNi~6UMy_0(YEq*CozgR<8td7Trqi{UA_YL%? z(M1#fPUuWuu@O&c>59vAa@>$R*@zXE(Zg#XbX`X`h!z!-PX6D6<IhI4Gqb7G7t!0LI(8yNTpQz^MRH{+&k;@{bUdsS3|bo)za+=8TH1_w{>?Sdz>@Gx=Q}T-@Ihk?wlB#mr+u_U3D&*l%&I}p!jf% z3gF%G{2MC3mN<|hQ(ve>AE|uAY*(0>!EL?QArITZwh?#>tpoTlWGpRz3_ZR3d~T`X zMA&-j-JR62FB!t0D$)&ebn1N-#y?*>46=wPI>w_sHv*^`=;|Vaht!4HmgCJ-1Habt%)= zIVVC!^=KNta^PQALcD%CiH@n6r6Rcwe6SyO6kE$NC_n}rq>7aNl5LUz5h7q1c zgi|0rI}f2YIA$?B<-;G^y7Xx&VP|q(1gP9)-ApW_Vxc0XJ{DrCMx0+id(#fIatU<< zG5KrScFtKn`C83`wSdv~$4TFnvd(EdrlciE!E$aA`KP>eg3aw}-e4 z((>qul-Kjzqv)IQ8Us{0dEeP-?mE0TvW5BQc?H+^2C! z%E;+?)6!O=UyXwfT9gP3&cO4FDXtRh$P_91Yfv^q3j?S8kq(=b8sYBsiIm%~&VWgs z8vDM^o^z%=|8Ndcz8r&y=nLyQvPHxa8~!@G4(!_+51@^?*2t8*VOAh7L9kovM%6?jo#9wvOEiP>G9l?E%uAl zEC7h@K~8cc*M1nqI9r%79J^v%K%hrqLUuNp2YYVo~98_Jy{Kv`}RlY0&86weM zA@670ZzJqNZ_^-YVl+oDB{DkO zg1W&T4DWgCeo&Xe)HBgR*rAH*ih@~im8t5=j$zDJXw)vcS9i;?<$mQlL-Q)2MH~%6yn=ISD~WpGZDe8_`xcl* zuP?X-Hv5#B$2N#-h1@~LH%(Ym)0#WA4Cy`9Y7jF)ctPB1FX?dvpAD)R6?#^e1FE4Q6XU4mGnfEKy8qkD3gK#f z2*+m(M#>}Ia8}>wy~`?K$!>zVpo=`DtI2Q5Oe$)cNuoo{^m@ZGh;>XnHH~x_v={A71QA;%pnG@@-QIZBgSoJ0BE*ophzF6*UFuFMmKA`4#ya(CF0&gK2?;O ze_jVWr@m?|b(pBY7XrX2LTLQ1|S?}5a$`a-w%&Mh-Xx8LTR#7ZF2C$20W}M!@u0+uK z^(LD5DU{h|zz*tRG;cJXbgn&&WKxBoRT;hFas<RI7uuy9(b}O{eZFowxd= zqu@@Ildt8`uOXDS5v>ztZwC^~t)FiR-ABdt_sU*Kh2F-_(+Do*5syJ+4R3MuanLw# zF-WmzeZ-z1_%f}^es}@p=x7umy^izZGqm!x07G+Ns;*@oCI(gfY-1Ru@)osAY!UEq z6g5jbgusdRosO4IY)FzBufrmr`xkUcqL}>&t(XKax4*ji=Z8UrE+=UPG=qRN2Z-;%;k&+im{aA(B@hokGu-%;AhTEoQ&}Zr zw#g8fnbc!!P0&WLqzs;f@aMM^GAb=`<8+1yO03H-rUFBk-S}p2Vs!Un{Edp@Z?rr+WRg`XoZ*CcAevB z87kWKe@Dk{yK2w4twUtvuNx+rBB#ox2BOzf&hYYLpckv}ZmN^f*~RUCy5iP%;~H+* zKv=UdiL3jA<=^eMQgRAk>X6-Y8HGWMLtX2b1opX6qF_KBC`$`QX?hAO{tbl~j)gFuZZXpmuEm~s z^HUie>qs>9Q#J}#fCooebsSPEpW9@FjDo9kHzT@iW2NJ75gFdgY+eXblUU~)U5Mio z)ZIYlw~*H!qk(YqDK1oCH8NRgJnj&9!}qBZwQ+ViXVjiCoWLXezN=g?EVhVUXCVk& z#|(2Kb@8)E3ycoR`XTBB$EzhkX9Nt#(yRIUFpP6vh#P$hS^wrYqojNN6*cs)emrg; zPwuLwzpH#Ksl`j2Toor|-n4&O-(xte6ZemCjH9ThOjpn>!LuMjYVG597I_HYJ&M9N zv@zc)3-(r_VlhuOM%0M-J))sw`HQx*#wj>x08{|<; zvGh*svYcYr@ZU0)FQ}U>P@X*;I$4Bo0pj|L#r^2i6Y|uS$oD| z7@E{*=SnMsKX^0zPNXV!9b?HLHKjkd$(DXjZ{*C){ak|=G3jhSA_5GpL6G6Vba;<3}xy0&iFc@NK{>eR-%wMJ5e8Cd+Y|DrZOC6n3Lf5#UHH{kq*-V&W&*4Y%4Vfty%5!iRHLooXK zB&NMk86Azi&t+|hhz$?Dq2d@iZ=t6B*K@Bn9=ck^Z7(|oJD{hXOn4F(=+b7?E5Z3( z%A2=31FLotun^4IpAebnD1$6c<2&srO4{XsRXWfv>*%36C3i-R`Ela z+&e|I$ym8^b5JtiBB*UL}{-&(_J1^Ft*q@Ic-(~uQ3rpvtJaxU+YX7bPV%unaR#k?IbjPaap@YY2PbT z1$fA&oH*yZxzFY`+-f!4k;LZXa2>k(l)ZMcS1?cOPRYt`90gf9@QNCm*=z0TCm24^IL z4IwOq&a)G029a2gQ=ihq6iCR*sNY7n&EMA;f0Ifi&S))2 z<4fplSTGHtS_yqka7u_K<|c8Z8h$~c-(Rzd{tCeGpk1i_Q5r8vLLOC5YgqmVH^-Cn zcJi0=F#-0Y4iI5;y|JH+UIOw*)_v1S!!RdLxENV5}q<2>! z1d@4M{+EhpRQw95>@?AVurxyV4BqC_V}JCFNh%RR3{6`vZm8;Pkqr`~;KyJ_ilGKQ z?QT5n*t0UF@U0m&O(gluW1``gCBHN=_23nfL-6M$4GPn%5PQx2h_S*e@@aeU?MQcq zwTDv}XSA(DhX|7tFW-JvfDuB^UDPA%JlC-^KQf|G{)g{|!4Hm8_DuNCldQ zi_{)djXDv3$^_Zg%S0t@$xEXpMXb%bWegzr%zVz_{QmK>>&;SAxXf&sMqz0)c&!?r%PpPX>yKGtEI%K& zW1~h`Dq2P++~W_@OJ;Eu;YmXLRLsPlpoB%%@OWVtq(?IPCC_9;`KH9r@%O8XA@>v{ zP9p-pjATht9nC}V3Fn@uC0;`d>DAns>bazPH^ooBP&Frfk43lu9lpyW8s$mrJbRRc zqd@$nXJ?)G15n7oF+DRhje;08s@OM_>v8yAwNn*g33i9SI!ERam*?L;XsBHg-*=nM zjgV1QYZ96y3^G0#MR^o~KT} ztVagr$K_O-M;L58yKPx!jGA(M@Ld#Vx+$1D%Qs(HU} z(3qc3KBm?$!5)$#n=iQSZrIChe4U2NV9MH8If@&%<9}W{`=8RPNgp4CxH_mQyShLv z^MX`c$quIgLq|xL>dh0-;yRxCZ&BG|~&up9icGc3~xJI^X`R{oT0l1&QTYKRBp%3#qR0t_@IQ||;mr|8pqRX(hy zhk8g3Qp)Oj9IlWhti1VQ4I=60 zM@9aC0w;`~w|*gH+C*~o;=EA)>~N0lf=ZPFCgG}tbak#--1~RIu6HQZd&b9B>~xpK zQCfuT@r^2wS@8Jkf{ZCfC0>S!i_{@Ie7^3_tj3{&Io}0$1M%ZUy@+V9Qf}%iQ|ENS zV-UP0o9fNzydP7xD$?C;i@viG;Tb9=<@z(K=T6G7I`L5q=x0*9O@34T>=%F|cwL?U zK1Jk}@E724Nxw|<1Wo3-V~0&apa2Emt%;ok&W{tyPba@0%6lDNt}57gqP;5_VO0joF7@<0HO zL!uG~VX9Dpu|0_OKiyY4zQh!1>)#1TaNacZ5)m^#XbS(6S_TMG3(pKcV_@rfH@ zO?hPDv)A5hb`}Kz-=;E=7RLiRT8(0 zT+_g;v6fTnMgDP4Ox$W-ZZ8;{XtbM!P6h^%TTK5N$5ML9e$j%%HhJT3m=#j`ezqXT~sv~oyZ8AHH5s!%Qep@e2SU^Zo zEyfnSL{B?8AR4t&CFU>i@-;o+Ca4b92D@s@ml@6bd&FQbIx4;K@X*Sx63z==XY+RT$a*t5?(WzeXt8->=mIqLp{7PA1Q9YvMh2{XV3~ z`Iwal4Qbhb%BV?5(jun1EDcqEIn|cSLnlG4smtfW9$oomIUvg6{8W1C$4~zrbURz7 z%&@LAm?nOT^CBK4LFlAlL@~%tylef}37}t#RVk9BprbKVd1|d1=1`WXBizI7H|ByU zUA7=Ld}l~-Ss40Z^ian)iV|t7)q53=t1YX(XVFz;xTJ7BfEI}tJ$uoDpcd$il3pGl zZLsMLQUTn?@MP|nmoi%HZMEVJUD6pBJMi!}M;|4m3TofoBIod7vw?E8%LkR~&VSmQ zr_gwutMpO&uXnbU1vcEEtxRzYv}-yZUo;21Ink~lHV-weCabS+6Yv}st1~2k#<}=0 zF6W2VDm2{$|KJ<24#a8C4IF8#9oH*jF8cg>R=Bm&gm5k6oYfSAfc`*4q3#4mbK!ga zOCTT#@_s+z2D&DqQ!Mf3?bX10t+(&py83^qx%C=EU?WMb^}6V288h0&URZqWf|>~+ z{d4N9qqw2yr4DQ(rCmJYBpNE=AfKdl%fhXE){e2uLgk>z;gD*NvcLNZgrO8xhU>oh z_cJcupK3@RtFp?qgmbba&G^K@B~PN-Pb;9~3fr(^E}gvU!23)OpwOf2E>SZNonulv zuEIb%4+_)Gq*Xsu>8pHju{0>wbC+Wa26NN zP}9aQYb(B}l=)Fcl|LUDpvT%MOIWcw-=jrh5Uf7L2>jv9AhoI_??$RsnFt=nTr0TS zQw@9iC#)>Ar-H(=QTcerQLrl_XLZEvzeoeh%3*lN*#Lle8V)v2z*`2(7GCDynoIab zR(7xK*BH*C4k4kyh(4iFFnM$pQ;#F1?J@tp@14j& z#pU(-S+H}1*Um#8r2vC^FfaFZU4B<@xZ3+c^sU@^`yp@S#_h>heLSA*8WkLhkp?F- zcp+NU=FbK3-7d{;)hAWp{9V%hYsN_BV!ZY#&amFlfs-5MfEgvb7k5!`V>O;R2PXCN z`WOjX;#p+fXJK3qiAO|^>%xrqH8=aj4A|V$vch;zI_H(jotO=43yzh3WKOjq(##xL zwlbyo1(rAD<$y$~fknliDbKIS=3=#0vJ0Xb*XQwvIhyjd(qD%~!6Gq++RO<%12k>< z!yWGF_FD-DrSH^%%Ss~ZjB%h-GF*=8MX~jJk2QW7lZCD2s7ZIRP2N$q$Ev;4IH>ds zoBa@mriF>IqrdAn2#qi$t7-ByS7C=&ppD<0Oyfb{F5EyHsH=5B;)lRcr0*MO!9^o>#|EG-DrTYbBrq^(<+)hNm=DR_>LB z3uMr@trD4Z&NzY4U57Y8Pwl2x5@O z&ur#%Qx>@kTjLFvf5?`mA9TaBa>6jUB@EGdOd+ISjU}L#aFP85>dCfSy}uzPUh66H zmPT6Evn+J!q8Xt0io~Z(>%~nd{7{obMb zhw_hQ8g5B|!ULW<^|Hi`0Jrt~K`OUI$jvwc;L;CcwWhGv#>x+Z(U=NhewYH8uyG<- zm`DPUbcjf?;Wn0`0f^YT_#kW|NYR~4coai$lF}!hMA7x~jM-Mp(jSRL5l0?Xw%?N5 zRA#zJ$cD~X3UghVEi{X`u0=%90(bs<) zB-=$**;MVeFJP(BoL~D$(*9T_0F@j6#(X^caD(uwtoIv&=Cyt!Kkh*#nnAi1|5EZs zt!M;i91|qloX3bYByF|cG`eI$JedEg=eiUjJP2glsS}FQh&ADQ{~hx%$fAu$BGxAr zpHKAA`FB`<@oD44AqRrd+vFt`TwuW}Usor0nV9TbgKh)q;3>%i5 zl%NI8;1QpYtEiOvHJgTd(hH4YVSNkl`9%UfvR`b4sHh^fXX}ItxFxCR#qd{HMlC1= z3e#;}A#Zv5{%ttWyTRO0XUIYvabi?K;se2ZkniSCJ;9S=)a+p)$%Y-wRh)Iu)A&9P z38$!yt5NFl7{DbJ3l5pBUYjgG3t8XG`*L8`GAmJ=HFOicwf#m1`d;F3`O4u!JS&%DeMU?K!c1 z8s=zozEfxu3SU5fv*SnYF#yxXCQDed&JKwF!>v{GFN-{UAJ)Uh^S95;?DNJ?iH{bX znCasTS2x>NZI{)aoAkO-ZtR&!2Ao)eY+{Tvuy6%VcGwz)YXJv@PU-~+>{4g296Z|*ovguGsOEw{xe&_*B;+9>?mWxHcV zhBz2?cEaX;AB%qNkPapQuep{5?wTX6xuC3IXU2owEtj8Wz%ZB<^bZfjd}9v zE{6CbKs6M8s3jBLx%7Qda94>i&g-7k`LWZk4j)Lcpg-HJ!Av`e&2<-VV;NUuwd+v! zyYFSF?ZoI=*89HdZ-Gi70=lD9ZigNr<=$DFuNEw&?d0K<+)I>((rBE*zNROlc?){M zP51T0a`aA|ewdA-2*N$9pDMD5w4IUPy@5E+JrE`#6pzF=_PWwFDUQAEFXe}$n=EgR zW>Jw>M@7JJj@tFkxK~Wn^B^)=%}q8?H3o)9_yPaZ=@fTTu|e_E`5RR8^3WMKl$-rD z%)Dw^-VqqyGo#+odlNdUc^idN+QG8$nQas2UARH%!{te6SB!A?bxIv5C+{%L}HTgDB{l} zQ}w<2KrVmC!XnE9ZPx7 z2zZ+KX1j_y2)xmFcWF{&-j32Pi>~5+mG82Pvh-9UKw=KIz;~QTQm5@KtI-cU(QSs= zu#4UkGQ|Or>YqRgsTFZLgz1uQ3SZXUOsdHKZXYMHHPbM0jjVX|HYxIhZt+RBw^qQV zZLDZxEe_XFVejkqm~o?$=tfz`jzR!X^uqYwO9OgBsk43NhV6uW$(ITpHpzUU#QE~l zyOCo|FEQ8mvS|_L%@mCDtaUiX<3VZ+7Or9J~!Uc4is{{48-xkZUqh%x}nx zHyNeAmmV>g_eqT%UmisQJH~~C67ZU9yW*o)2bxW)o!eu#sf*_nWHvN+5{?uFeyg@7 z{F(l;B7yW3WwQQOaeh%OIOZgZg~Ca5w$+HtX3S@b8T!{86Zcl zRx7ZK^V07=VTQ3g^J6~FpI7|N67&3{kS)ngeokYPD zV9V4ubhUqx=x*HR3l@pt>?L(P8~+A84miOTdRlI*q6lX5XqwEe#QO9ew`L%OZByIi zeHSCQ)d9Xsclq+q!74i(a5i+LEoGADvaSJ z{GEyk6k^%4d_raY#`Yj$I-}K9GJmr5B?osqvitW9xPs`-v0rmLPm42d)%1@f@l&E>njKA4RY-~^^+MhA$5KV@e^t`wQ6 z&Yv7~2nXBj&|%rV!(o!i8f)G28M>VpfZ3Y-Uf$Gwhv3x7VNM2XQiLu1lAwwM#3@bs&unc`oN8M175V`tV3 z^=^ZkyteXBayUJR47o#&m>J*AJ(K=EZ<`S{`TPTJdw1w`VBM8 z(voYvISQx-voXk<G03O9JaTb)`fN@WGW-+PHabD* zA`L8`==XJ73(mCEf_&PVXlVSOnSU4fHPG;Pu;!VdIDE%6n5W*N5~+&f{n>!tg#E}M z(IbVe+iDjf#Rb0KPSIKI^_!fNGV9RI#4i^OKqCLZXD4R5Xhxxf%d6t(`?;koeP!NL zI8-&&_i|M#630*!XmBjvtYszheh|=^*)_M<=we{SM;9F3)thdcl5gCHT9HWVy5?7a znE+|0MM>zKD5f>O&S<;tiB}G0KhV2ow!?=~3d!zhg}7zq98)s((ZcIgexjF%{an91 z=#5c%m*HI}IuDAcsC4I)1M@pu(p`T)MtalNIHhKqa&y_iFmwN>>w?NcM9?3RpC9Z& zhk8bd?)V0~kYr8(_F*q=Lz{ku9yFW>G=InE^}3w=bb#Ex7%-a=KmK&} zQp&ztj#n;62&DV*>Yaob-AX^ysBESAKu3Y3u5~q(HkC*NP$cG%E`+ zdNMniMaJL!!hT&t+eeH8%lS^XW>|F{%fEJx)PzJRKkv!Z&Slq2FwpDUi8%EbZLF=x zgb&xvyhIf!cy;7hG$t4vyL2gqNBllg(%^wRjLpvSLmHP6oQ;H^Lj0@EgG!5f@bptL zINQTh7F>Z5w^-~1*d`R9r&M_CClD9*<)7Jk#wrpkQRlktG_;S+@*)hWh0if}WW#zOk+CwGq3zw2#>u;U_fgIhLT{aG zB=PGljsl8r7PvzSM*KF0VqpY+#6J|#-L?fzmU_t=u!xNwyzS)T2TAvuVKDDZ-4AV% z2MJ2q_cByB`3Aok*h>1w&50v$kkhUo6aF%EH#YmeK^;+W6?}JH|9fdt+5p%kjMvEs zq6XyU+Z}SeBge|TP2NqOr zOW4mZIhnE<*8wzm#Kr)f&d(5mNTDn{S_DcE<0~hn<%Y!TNXfh5{sKd~Gh#)x#8;oi zch|8;UZy@FS(o1dG73=9lap{v=i5#8Og&A1LM)&Mg($F`>^oySHK8 zG7m&RK0={fuRBsTT9Q4tKTmFZ?LP37r8|E$;Au(Z2J~xTbnAU$-PVaZYM6FEe=Uy# zOSp9}Py>@9lRCMn$X+A!W8Y51*aC(c{!7Ht`ili)h$N3ObCPoNGILyhHFjbD5Dq_PFyhoR zJKA96`>4ufkUfRTpc1Q&#n6g$!fdY&=G=_>5FI9jJ~69{k47iDy=dYR=usP$%1WGN z(vCWXe|;{q3~-4~zUBmoPO7@zy8=SRDTy&`PyI-OA@m!q2sh|_497;BkJ;MeFS=;m zXPTmn+7s&nxuV+yh}};6aWyx1{&-epROUQnf@pm1X1cW>6BcRs_`GW(nV7Y)$;73u z%~Axix$%COHs|^q?O$im?OfC0UJk>o$*D!qqBqT6g`!ClE!-w*5fQe3272Oe=Hy)^C#Dn~ zE0B_TV$#HPg)pwRYbG9Q1-HWsmT8vvKz~!&hK1rNu|MWOEdm2lwDGT=tMgfU|7F2q zc{2lo0_2}}$wEl9b|0Y0OX^I@m%)rA;^(CsRA%ltU)`T~O_GGurIrQ}zXNPJ3*nHm zC{%&K$2Mv2yNH&H<~xH2qR-RM07rtdf^)|hf=NA@-G))TDkSc`Gi}!a z>8B7aN3HMpE<**FA`06E^yzA%xn>HU!Lc-#vaBM=ORUX>=~h=qfZ-$<53%hc;H4wq zK|`!bJ;ZtGTR=hunwx=lTD}^4uL>q(1G_$tCNzJ_Z>Ep*@90;50FB}?tmyY7;R!#a zwTSP`&h`w{m_y!Ghr5SNp^_JCl{#2tJ#vQC%5XdDsN;8q3SNH} zo!Rl+E7sE|{Nd7qJp-J#TtfZwIsYC%c5~!GsYaRC`~@R4qu1o!CK8}!kto?**W~1C z`n-WL16_KYo43>N=*{;4h^zKu_^QMa@@ab82#&NuecXm?vaqT$fGwrIT4j>B^18a(>V0qIDg2%A4c39|Y6B zKXBuf!k5mwbgSA;fkpqfVBQNo7`pPHd-1PbHrGozJCVh6?uoqFIWZ5+-BDHF^=J;$ zLOc4Q1)MsA{_qP;FJ+m(`(HE$+<4);Vi+ia`=g}+rONvDk=K0AOuNsN9EyYbi(Qt% z3A=<~o=zb9b-xa@!I?Aamo%{!!wK!BzU~EOU}&%24}JBT9L3a&rJ+s}O*hlZpb{|| zFTUJl$405Lq_`;^*M6>reLUyCF%BQqp`9HRf2U@BTNln)zj*EaONOdIXB{Ocn?fT+ z=`7hNMFF-JJUC+1<(#9auA^uwch>qS7ST=om9xtO;7Usw!oHEY@nlh`M*0Dh64J~j zA+X@hsfHneu)>Sc0Hbc4yW%q8182nTtgm7Bn;y)%mc?sUN~9Qly^n3DS5#`MCcA|# z+`cZt0(T~XY$f&tOy^mW6U?U*{nlN~^8+OyH88^6PL)m!JNw4^-SaeN zV&`rR&Sl(uZ*dH24P$X;k|~r^XM4V3TrAuA_Gks7j^MgIXqgZXhglBO&==LZ{VWdz zOHwivW^jG(tLypo*_J85nY6Bg);iA+vR=5$XG}aAy$qMiqlyk19^YM<$0A$r*fR>I z?O$^zaku4-;#~wga3QQU)c4g^(+rJQ68te$4^ixz_=8Ugot|H?M$zOr$4=ichqU@C zwK!}Vj33CQ_yDn7BIfY6n)AMnGud1&xxrr^#O!Qbna-oheU`gT&KxuOm813=T_Qc( zVUZThPm6gkm-{_q@BX^+7j$GaBoeJ+pj$dzbyeBX?;Nio>sRsU)f9`md#-UL{-c;3 zL9Zp+;PG_dI#*Q`#wEVPetJ8LO|p0!;GgFm4)^;CJO<^YR!-$h#>V^=HA0!xD2mjwGA}_{oieQs8tE`1onlOY5piW zaG806HyQV|Nf3>aZT8GMtY9O>AcBt2?b0+JVvJt0$S;gu=D1^Y-~QsvFeLbJ!gB*# zlI=P;nQ7ohu4YF}--OE-%x(c90&?4xpp@k?b<6S?Bt#k<456VXnuDu;bC(+=)y}W@ zFNakl1^OOyYT$bzOXoZLF5sV!X&IYwV07}-Lp*;Gj}Sg{yOV-MuXH@_+M!f&**d~_ zLBAwU9Id(3ELCn2|C>&B%d$~d*v+2oVgp?S5(r~EhbfxMikqoMF2BL|5Go?m5W;*d1C z>M_i3oLcU4EkmGg-UCvk8^TSm`{*tKQ=q`;lL*gvGW`6qEc|H_?4a~=%@O2c69U)# z5T%rgAFfdhA^T$rYs=a<3g~GxUdd4`$`fGClPRPp`PKO?nRSvuaa}y>rsNC7*=S-p z{rzgt)!_edzJ;NMDsIOsc|#$|-;$%CYnKOJO<#|3J3XQq!5f$_dhUVdyDR3mS5GLX zFp~^4Axd@(!2BHEC3o5P9zc3Tw*6a+rn*H3e^yvX7U z()VaZl?j$<7n=RwT`c7;)rTv`rXht(^~P9W+m0X44GRpr4E9N&BcA+#<+)pt#7Af zub(_40|_Z*0=Z(tD)!1D?ft|ATtnZY!Br7Y4Rc3Yh_j+=Z9)iM;~kXKaDHs4SC_&& zPMz$IBKc3Ro%T+MA^LzR9}`ZD?++j0!U_5=J(3bhQXk%Jim_?O4U;r9s56mlpNa@v-(Jn@0e+G{C;bvWTa6vjn?dI|#}&URC)cx+c}p z{X%SzQDzdO5I**i8K}nIl`qFS+*;~I)A>PH6rlR1xH5DO=}WGYE76mo{N&z;@0m<4!IoObAC>=c`3Xt;RqX@-RYshB_-TnBSss9;lrBuPiArHG6 zsWR(m!y0Ru0AEQ4t{y2Jbb9seywgB+}r$!V?P3c4D7t7TZgtd z5h!)P2%IMFnl%%~A24?oMNR;O>AJy0zIxIgcF=d9CXhL!Y>BTVsAD#|slBlu-0o%x zX0rVVQi%S1mp=uO9hl9;*J~P}yXSUs#`!#;P(^!JG(Xo7;;1HN^#@*x?-T!7U zuRIFbR+KrbKEg=CJ&_qF>b%T&KhrVUB=T{ba1+__wqS4ZH)d}t{l-A#o+{DMH4VZ0 z_TGM337=JvquxQfsb7zer!^C|<5VNHDhPS1TpnItVqQQd$fK0%lBR9DI`44RxVyo7 zqIN)pPt`Mx-21Zj>PcjRpD)nA3+hg3c(gOyElMJPyAE`rY^$L=IzA;6AgesQ<|s_$ z&HU>M`m2N0wY-bDdL572!Qeqm$ykm#c+;8nXIK>GQ+0b=hQ5E3_AI63X-!D7C(Bp6 z#I?oM+r1}NKf91d=R;?FQ&RN#&xM3h8>1Ib{qI1RlAEq|Z5vgG@BAH;&YF%16%w+j!_)i9^SuoFBlGB7^K5JFsBN7* zi@~m9^{tT4MHt@a?^?@CdCaqWAzqol(jA2zd_ z_S6RK{$Qa>(bi+i71plgkxvkKPRfNXSin*%AOn``xEx!02T_FYP_ z`LI*_IODk&QB+SnBYCpB`Up7~!F74{$Q6xo~&kbQ^@bDImoEd`T zFUKMup~~k;BZx=Dil2|197T)sqFS&%NqDsmGpOk*T@)%@(aORNLJ)S|G-%wnL272^ zA3IQY2|c&lb-|g;SDZaim^0Pgf3cr9l*Demb0Ei!lqAu&l~Vw|J`6T$?>kFbIb1hD z$8^Gu{wq)9y6J$A^JE8&g-VduHejUoiH|O0vcxK*6%@dN_wIBvIEbSwF7CFZhRlzF zprBJu*_a#xcgUq-L3{sA5lP4|5JFd{eh42z>OPudXebSCB)aUdz8staf!@fz#Y1&r zv7yBo_Myjjuc%1UX|#K=#8=YtaCwD&qMp9Ernj26gMh)myr@ zdkg$i=^1xGbM&i3}gGJa}ZIM=OHoD)Wp%i2Xp?f=kia0 zYb##$v`Ucj@GeoDL{hWFUq|ocBm9I_d$vf`;x=t2w!oZAfqkWhCnr4Bz8n0Jzb3b> z9O6XdmZLL1lTn(hy*>`){bcIyx6Db-jCCJt)G=FLSvNkh&&+!QpbUmvU%00-@18K| zEgFMKPH$})HnCgD(YGsozf>_EH+K=}64^81ize#5{{ccky}xyB1#2WK#0eXtow4Gc z6JiVOu};rYX#J&q{KHFC@bZch)Tg$y-(R)-lA6b^GhgsQdNS)3-_Ueo0iPx*Aic9b zYED|hx6vN|TyTR%k`E?^dc#E32gj3rF!VoPXv+FxsG28+U3b9C8D_{`Ad9c5<*Zb) zg?g4QoE|HQ_}j3gndLIod~K0WOYJxOTy=_m&)2c{_x-}dvw_Ln6n2w}$_F^`{st#t3_j>v&3$SLLSN-}+z|gN7>Qn+_HI*H?KH#N4~DRuWDFG>6EML9=qSk#@9K)) zkWHl)GL4!xNTKBHM`zm*u3jJy7UQGCp_Y$F=sicaTQPW z+eG!-SLo6tsZXoQDD5d|-&DcKA=>!Rpn)UX)i8XU9OB>Aa)f0m2bzB4xFMfela$TH zp~ZCQp2Kt9k~!S&2pxvs<=9un?E66uKKC@?EUC3Ld#WR5xFR-xk%e-`LPJO((f3G?{YKC0}Eb5@0L21D_fr_N56tyGYJrp*#|` zfg3GAP?HWAX@OOn98o0WhOWcBgqP>^12+t1?OZ@6k$CGBp>Lyek9JZJ#-!$uezYXRk0 za||psMV+oOdbFFMbDl9qUN8}L;jkl;Gm)i@FY($i+NchlbDFr|Dv5vnWnmcJPTAxl zetq|V!}{!@LBU}jdU>1gU)`nCs)Ibbd{DF6zcKaR8>)&#x#!s4L zHB+itm)>zFS}maDXe@ly(%R79Cy8TV3o?B)ZGQai(s1Lkw`y!q@o z@FEpUve+%Jj+2}e#VlAl|FfhKUP$sj#ad&L`{{O-#J%B0PyAI? z;h~vy*DB^ew=4Kut(FC~?Od`~4Nikh#JL}Q%>qV|hS+>U9zBD8@{UmwH@tq%zQ-E) za+@*gyEvfS#T^-UJkfuyANF7M6VGnx2RE#ibHV(LcGz&%5=q%=2p;pH8e?s~Gmh=_!~k_agf8;OxPU+`4ex-W&VJBZ z>y7U&p3s`)AZjM%d957i|CWnaY+&A(i45zpo{DFaIpJd-H|k4jmqYqcjIqVDFV0AM z?}qE$U4&Ou{f-^_&v(F}32u0-=!q3Rp2(i&i{h~zaeM6;Ec$c}%3VW{I8p=p!zJ%v zF9mEEA%}h=+UZ?g%YoIEoKarDBfYY@G3^siy?G&gI<8%|xWE z@b5}JMcX>bz`eYgCwkS2Is4;>h1}Zv{>`$`6;w*D49ITHxTV0&rtcBro|9GKBtTe=! zA!gW`XbIV#_9(M(MAIh+Xwx36@-1+?N&~mMkW!C*+`(2px6%>cPwLzCTJ44A>3%rk z=nwsWf}lM#2-*4maBb~?9j@LeOmxNGcsqnPs-yi_IWON!=2P8$e6VmkN7|j`8HM*k zf8v&{jq+M6kxQN%XD>cSZ>kk8bukirHYsCGW}YjGHGD8V%O6|5g~R9Q42<6L1arbd z@w%HXdPi%(oa#8NuY#>d6wsv-=w;W)jc>~7@g|p3PNlQ-{%v-fafQ1o?yz}51}{AN z#pO>c`0iO9cU)@bf6M!i9Tx0yf%!K#OgrX|Ze!g9CsD7vH*PDqVY!2|I3G)=*x*IH zh0xXK+F9ZJY8%8_+TlW|Gq(NXis=7bFmAmwerDKW^ha~}<{IGnB~|=YqAL88-KLtr zvC;tWQiQ{LpiE00y@N;ndcS=s7q9bxA>Z9u$PiTi$3kaTT87w&Ri<&Aga; zT4|Kue1?4!&+|*z7fSoV?luO(Q?#Xz4bl!;!mrKKT;BtjwnI@ssYY_b3|*M z2du(*Kj{r~1yU<8RY4uFd(*g@->eW!5M1>K8F{o8ptk(>iZ> zw(om3pZUNhjgK@o{4V@|uY1;Wk(QkBix1zTBQz5az9?Zxjyw*YXy7HoCO*=ULwlOK z=*jGuZ46HvYXsePz#$I@q(60ml7b7?MtCAa#S>3`e313U2Zq~ypxVh7-Ryi|s^bBN zGYVFxd5GyIh1xGhRZKcNJf zb^7S>%nW@VISCHWSla;Xc@l`*ejzw)+zD$pgdyfmDE?j>iXx+qxUn-3b5wny*25Lv zbIgUN+QPGzdWzpUWBd=^99_#e9SwByHO8keX0Vg7hFh#LddErXUYUGGZhgU4{cjBK zry%sF4nZ!sz9InsosYzSM`lAe@g=&<4HuljQz?4Fry-pM%0x)c=@w-~6v^YzMj11I6N`V%h zx~b!D3so#WqYb@@1~`)-`TIng!Bo!*o~HJQ>F0esK}ce|#rDOg?9BSRhUl?1Y%s%KHw$DAGDFuS4@hP2WAxE$h%Tmf)I*bn zCcJbt5I060Q|HQIP(U5cU5dFaIg_T}()qu?K8fD2m7XLn$L7%2=siszCNWC=By%M- zal79yT3a-8=wvc}nhcHyO78WQGC1tn$`7IS%=uq2m-We^P4+Lw&MM)_D|Iwhtf7u} zJ|`%qu^j(9-jUtiQ|`olm&a!1SfqDb=oq+RvEies**Etb^Traq#lDtFxr zb-g5K#>x}D<-8F4+zA;UY#{Y$75pyYLA8e*@bEdCWb3%V&H(FsI3atB7ydEqfHz-5 zFmX^AHmwYWW?(2fUh9ImvymuN>4MMQLb3BeARhMeheN8j;6J3#u@>{}Q@4x|bk|7m zB~xCTLuZ2(aM=`BHyA_e!@1I>-b^d#I8tLI1Bqz zUSQy~E-g<{s9{l$JU zKe<`<2hEmbQNjKd2diG8`m_^ty!C)3+kXg-K;KVwOxV`M={K4M7a%USncE5*xPN9N zzsuBe?wT4g#~RhAiQ`|@F>`DgcYD9)XW0vU?6pslCof^*v^D(r&knxtwTCt_SD5$V zoyZy1EN4_yaJ-ur<8sEYB&3SA#$WO(8A z5_i-FyW{Hua zJmECJ37W1J2&^^`zC5YUXI`-h4px~%%|MdRWLaTuu!+!`>&%tK<&J>iC1uQAD7gy_ ztkL%!%<0TvvUyc^Su z@kUZ_CY&%7z8~EMrdY7g4E<)A36^#Dt`>-%X^f6KmclzH&Gn@^IqBTwz+`Wsm7Fu% z7rpoTLfOR|x5oRT+}0bTmwSo4ATrt;!<+r!U*!kuWxmLI;En59k~;gfJLax;6wgBH zF_ZQo%rDpR@1z!X?4bz*EeDi_d86ODAW4E9hIQj3G44Sq&d7wKR52X=4Lc$HSty(@ zcEtPRA&_4cf`=0VaB02|PK4QtS%S3A>UKc^w@g*gahoo>`AK5GwFRcDnhTBd7gaO( zxf_ak-1PsMpkaz7I`($Nl{Pp0Gsqi8id|qicpPdh6R^`Y1SR%HLa!^;?dkQ_M}K)8 ztoyBj&H8Hid#(b~vRb*zxmj@Tx)_)8gijVPAG^=S)n~cx{4FNmNfW%Asdm}CQ&PyI zkIVTewvvsTfARO&kJR7vn%!1q3V+r0=z6|*-pu8vffiF4{8ul7AMUbZ_AcdljGtY} z)Jb_<(&qz5U3kg&MNe7fFS$E=zU7*US&ZIOLS4;TsyMXq%1v49NRStPv3pZA5qjs3 zS5T_$b$F&P^x&fsBr)I34BeNTVUDAb=t=bJrH>j#$ytw9hl7PS;+7j>fsGw@9Fx?{ z(QXLy_Q9tDPy90Q#km|mteod7czYJc{&<<=B^cakv;9Q9{(QS1Zbb!PMY9*y$9rRm zxfe3DU9rQ*9n;NhP~FW0SKal5=IrS&J50)U6K5&1M@JZc3d5`sp|Bkpj&WL{NM6)Q z@ZQ}Y1VbyS6MPSbV2f`j(Z|)E71nE72@SB69XBG(6?2>1;cpNk$(M#AM(+faUj@OeN*`C2>WUhD zqOLCf>82(02%8%yST5&UD`n`#`Vg~gV=ZSiI z(4cBo=-0B>u|M;}{v)okyx&Rw{UDXgy5-SwjO5POXrYg18|!Rk@p(SE_FQWA%n;{vtHjwkn169bV1-LCwr*mW7dlB9^u*!|s1- zcyd7l1~W7fJylb9CVnl@5I*pKFX$k~)==;sQm*OXUn^C?)hMX1q+DDhr7SC_Ek^kA z${I(_-O!@pg*$KDF>YD_Jo|bh@z)>DUYoKvGK#%LZBVko58F5UWB(X`#Psopv*ev# zbHW!7f_-s4*$ck@9tb+*jH6xb@pP{}3Pw2KMTsj;`}soqY#?SW4Z)Y4p;+b-h8;Ft zP&m3XhT3#S-n$TdG7mwJT__fv4u*c8j!^yRBkGLo(K;B|w~%igZgaxLM9zDZL#3m# z;`fqz{?B~0Lfm$9l&&<#nMg_PSzv{j?bcW~!w&nrokYzt`n@aEhj=44um|RU-6UoO zrrEmayh96XOtr-PL#iQ^at;H6YuL3{AzwcF$gv)O_&4c4UUQn$Yle5f!?dF}_;k@r ze(dvuA8wS>II)HXzZ)s#nJx0JX85H~?7HMU*U6mXvH_{A-ImXW`ITIDyq?FmwQz6= zd3KmA6xPbad4e34M95;rvSu2D)Cvzs^SP%~e0Y_6@7!kB-S22{F_%`gg&er9mOHOB zP}x!j`EL}2zN%4Dt6q5}hy1I`m>sT)4F!^VJ5dEbJ(Yn@ijXpiWoIa0L-`+OUD=x7 zd={T7czseo+TS@^sOw;bm6eV-Xyt(&S>7<0)B!Ipdn0hSJJvpPL-BZb!CM{kKOZc* z6o7%-e9%qaA8R-H;IgI<`kwRuvs>Z2H~fBkh@5a$Pj~!#&l4?g{qd@IAUe7S!k{1+ zucvmw;XzSIjgCb5%Pw%L>5TJxL-EeO6NZI^pkLQeR9key`H7uid`^;6(gBxN>ELOH zQrewM;r#QdjN0*2_<^PUbSZm4_J5}0*(a<`?wb`!C(%*Hd%HHts!{ zM2*aJ9y0hy&Hp|MAG|cy|D4@OX@9hFr9Ad$$wJP*nGJ`FxLH0!%o6tmeCC+}KX^K- zQ0TfIOqSdOijw-kLKe^N%b;hyEDoj1WBciL!Brf(IExy4uQRrB3)KVuGJr}IGO1HSXjA<<5f=`nA{D^^=#b5VvhT?k{oGWt^I~>aQJ42^g z^8OjSV8B6lu?NMjQ4)^_`=FPf4=xP%L!_oJydnco^UWV0y7}ScTR$j%@e_M!l&gL4 zYMmrTcy+@2;1HxPi-4+J7kp`p!E60)_&p;UPr{?1n;ni%`#NI;J7a#tpSyosMj)yt z`=I*?TO?1?fmEMA4&Q`6Y~j0Hp=hN0o*gx=fWr*<2f;)x4OchSm2~@VZ$Zagj2(HoTdOt;+a!_e?q;zRpwmOX(av zg<0!1^TeQJ{>3l!SIOsQl#9O2gF&rKc_|MmWB>U`HAsEZv3&#tHKzMp*cBayAHN7$-z znM%PAXg&6&$S?0H{oWQcw-U#U8jU6>U*f`D)L7%+wq`NQvH{BnB zmi`#@*$0313V__!4%pPU6Y@PmvG_$e2E6Zre>U`huTC%2Kk9{yH$CxhW;apuDShvZ zzhc9&{(UDD%?U&7jzFy5?v0wYHrRes2UB*{vAa_`4GSOg(#~hB^T_4Zxh*{ML7d_{s&p+6h($3W3au|~g z{77$Q`qu^~_A8~F$xmKg^_ut1uJil*J^bs=Zo&0D^3NNpC*(8HwT8X_X<|Z3i|D;c z`|c@&)$!uEmhjd|SrO7WD`n-zzi8xVwE{NVzM$sai>w%Si}^KQ_#wEI`AYSiHNBRj zpSMWv`gSh1Xrb4p2Da-LaOB;$tRHoki;C{B@oFN!WMAgi-if?+>VxPvx=8AUeTOQk z){;fzr-}S&c7=tl>0EDHCirtP^$j$8R>%<_A94D!%bc<9E#oFsveT(XQJmA$)b6=obkJfP6^tu+irsO9_ILAr;nW7lK3i9!ePCtcu-dzaX~i73wOoxx9+%J z=459i%<3%sI#Onjyp|JmBQ!B!Oc?{b zZ!!AlW)#)~F=vg|=~8=sZ*h(iumKi#{GqRl~a{a*)<(8?RQe zqxut`K5&x5humVk?t8lSEa2NmC6af%gx=m29I~Z_%MLbkNIq!tIfli1x>nQd*h2%Tn@lr?3=xQ(6`_IOEBVXGO zYYuflT|y^>HH5>xX9SKtj)A3WH$0lr9a^_~AfmD_LS=fuv!M%~o{zvJ??}YY|5Fc2 z`+vLF_~AsZD<&+~MakB3b_l%A;^nJ(p>z&gyPak1=4`6zlrY<{klJ&;aqRI|bS%3^ zx7v5~I#(;a(QB7!H4zWn-I?p-ZXODmvd{pp>M>KzN(5_PY^=%N|TirKG!vBw}>X>w010(EpgwA=1 zj3Hz@=?HDd-NnkNd($Yq#BCAjT&DY(SMKL>QtU6*27jm4(GN_1_mz7-m9eK$HGL13 z@^{ELu3MYJu*x(1-uW_>rzddm$}{}0{yP8bn8EqB74*JaFZdX6&0_NCr|dua8Bd)0 zDYTu=YXOTWIXqNoqqKiDrBgGdTtX>luko%r#;K{Hn^ik2{>`DXS0eqNC2{WEJX+jS z5T2#*N%~m&OckB1%Xl#39dqlyvRhw>9zsO6Jz|0!1e3qpZDZV6XDB?z)n3MknrDPP zFU;_BtsRmJU4coym^3aB^(~!{_aY3|!y_?xWfYIGm$A`s zcaK8fzanu*ITFT+T~M|m6pzjZJyMH{7wZ-50 zX3kR%`7;s_V!-eb}85?*_+DmbrFHicA^u92#PVs}sUj#vcww5Flxa&6GUVA|M$}~Q{n90OrCCoX~#B*7|b{AE_ z#IO6Mgm)$yg7I@ER#J0yG7uV1X*}2+tBy1;MdS}B$A(o2o_0Z7DSh&O(#-iWH$|M` zfgdUC8<4{r{#Cp>zl>wcGC6zkMf#a<7i{_QjgL86{S_Ba`pAjKgkkFIA*>0H%H%k6UTmQfaSr^L02NSUW1*hS0N=|E$v9=4|Iz-PP)UP4x{;U>c>X8Jr84Ca71P9YPZ`{a)5fgHmbfy<9`@sH&{;(n zu`Aoz^16m*?hVXNY-d)RJoFx^3$}a4DJv+dI-`4{50+*GV@Z!NwET|5qsLL$Y88!r zp8v$u85g-K

{n-rzvR`z$>6l`Z#MDD|48m+PY~-Ux&GnBz;O z6Oyj=z#-2=xNZ28X&z5#nfaDirl+y^aS~mh9AZVwa-P-M%&g8QX{CBz@EMMkUE`?X zX$yN}q>0QS8W{KC54Wl4mX_$RNIjv_o@`>0E;_@>cDvc^(>b3@kjQ|qvs$jubEur5DiPplKhx&-W zXN(0aOcA%h3~w5BG4`YeRIbUxTUj1P$2E|=)*M5lEMaG2gFCU7!Z&=Q#1L~Tj75J% zcd0#G?n>%;En^IJX=V?@SA6~Y0q4!n;05GxyHO4ozbm8k%$3HQKzFN)*fZDpTnGG} z(HW-}M7U!bjur^A}1EhNgW3xNp zbE+>+I(Xr7l>??Vm|)XeeY}*_5&jlw9DI}clq(}%aB7z71A6)|ma z2Qzf2v&Fgn-cWNNh#~p$@J=aY^wj%&U7W^!LsR*FOagBn+Q4<%QyB7dIltuY|C&g{xV};u^bpOe>l`WimO-?YN{Uv#GW3{SaPn>Mh#;ey_!tXQXjIMY`qzo}B zm#kZk4&F6u;kLFK2Fc05I-!DI(O>vJ^)+iYe&eUJH9Tnw;m_eeEn*%RshP(gKl2$E zCy5Ec#dMV1XLW@ooSIfCGy-yAU-@`{vf$S}N`EH!jj^Vc^ncOB!UP3;`l^Bb8T#lj z+Xx>1Mwq_H7%@*Q1*<9Pv>7J9mE7MGP2lEhDl~i9L+lW)>4ZUtU7(ZY44*1z-1m1z zj;|A@)j459o+BDxTH)I=9fUut;Jbdw%s6+GCHBv$F#ILQue!;Q#}8@0Lz0(nHiVzN zo0uP4{2PK*T|(hh8i5%hQMlsW6{pO5;oaXc=>1O&w6{f}SUwV!+oQ2UB@!ktqwu!9 z3mP25arkFv07nIHeeKOpT?C5~CODxg-h%ux)N=6K9WRJ`SI^St$dRJN8 z`Js&E)3h-uRTq1HnnTCL4$dWxPUPTk zXLX$Mcj}*ASub9BAj{JOb0WOa#nc0*8(d&I*%=#bY+(FYAN>^NQ2e%l;y`>g+1%ezBOM4X-3F9;rTp=g{Hj)@B4m?sy3>-EtXYSj}D*7e3{#vsT! z2FKH)aN$cA1baonFEAQc`bMHCBmz5}L(wBE2o(*1nAhF`?LGZrR_2XkKRjUQ=Pua7 z&^Lp>i@Mmq=G#*DmZdk85-6q!mn=BUlSj+wD3_*AC_Bn@Lf`e#zkl$?r&}6+e+%v zICT`yP{-W!N-!@_gSn9=8m4Mv+<#h7?5_H*LScEJ+R6|5_4<-o4F-M?bOelS-bQBZqJ`EofQmJwKUlkXXL!mtEVsGRs7M}(;p3^y>Vi?FV2*BV&qgw zy?5Rn_I37Xl(7_C!7s7$SmNC#SfTHB{mDbtEG?$hZAI)mZG~%_y>YoP5S357B!8l^{R3k`M9=s(=1ssa_5sThiTLb}WJ zW3D|$wR)q&$nIElV-=*mo5ANZ_`gL@I3n%>pN&~UWuwV_-g_;d9zDtfcMmg2c>_%z z?Bd|7g7f&&b21n*#d$Ru+t3DLed>o)-3IYhmMb zNzS!KSFnk;zt_a`3QeKMk?t6ivOaqZm)sS{{_Gi8m#rgoJYJ`?A)lrM<-z3p)fF^9 z`;HnP;~6sL1Wip}QRPShhlP}Kl|~+`mwjSg^D}0rpXAuf@nRnNcyloohqcr5ni}TR z>!YBnA>1#S3;)&r3M=>xbA;t#do0@F3d?L~#M(*zmN$Ixy~Ga_hljxRav*x02}Hph ze>{uwL$?n;V$Ys*1}xoA+xkB%F?(IPQWw%)4O0KY=OrqbZ={7$;|!sG*iq;)JFfA? z&6*(88g~XJghOdV7x8@WPK`q5z8DxW8lCS&Lv39+eqN0bdwaVKibSq=IN}V#VgIx< zmidREZFeB9wROO-=l=M2iXV2q4M4Zo0ir)xQ}2XtNhXNxpX@Bp5qnrd4IVq zj(h52(hm#l)Uw6zosQz(NcqweJ^bb)p)YAV<0BKop7OuAi`*Exo$5C>G41;?_I`Ms zr;_e4xZ*Z%r#)fs|78kam~`h&XIB{n+>%9v6KQ?5jh8%|xj(m^9k0mXzt8fb4`10$ z6JzgaqNkoF4rXZ}MnMg^FaPX~DJs*z&<1U+u+R~_*2Ob*g-3Sx<3IWNA_rY09oG@N zCK3-TN%G?cp5Bzrsh;=vSL*}zF3IM%Rdr$qzyak(UYJ-W{9&UvKA>f83Mcu0q2;Y| zIykn|C|e1^h3aCiX&h>V2M5g1X}t|Rl5BDIkuwIHJHuwPtI+X`boYaGXb?WGmDJrD zAvjPSh_5EWf}1v0)gSo}{c!G$C(LWzF>sb6R8HHAU82(7>e47vk@HDC73HzU&{VU= zqXUvNR^b61m0;wQgks{G2+aHti4%9iP!t!5Ub-lh@$_LAgoRJV?d~n?^50u(S3csam*-fG-CVfq zAU`x+r?q~8IG__0b&@PQWJlbit!U4#wP6}nn!FMGMC zA=qcA5Uzl^cjVC4o?XA$bSsi|=FH#e863urtzX?Vvi+2Az7C zAbEu@Rt-_dmM~3Rn`wkT-u6hB^~ItYAqbfn0@)sg^@$v`~EwDcyUwVnP$$E*H`x z`ZXtcz2Qs49LjC3qm*rtI#(XQ*C`;OS_!#f@^Etg&id(3Zf zLB>fBY`^9qdbQ&%I^fOZ0GM_S#(+0L@Tus4Q8NRvIyV@7XLrCP-vGGE`5|b|!1797AKAbS%FP^ozMbyJ zT7*BpaC`=zC!glZ>`OfEo=(@g5)Kb;X5;`_=$6T2`a1=D?9s{*gNtd~^C>Tly37f~ zp7P&gxm2Gm$vqR~abTDh5*ljYz@ zMh8sI3`Cb90odmm0Bw0M?8$LOs)>WhEmXQFqkMTj_Y~iy{MiI9YyL=s79|Y3ZG)y7 zZ-grZW5U-?C>;@kISV?8-rm6r{@B*R4RkZd*ulVq*Iz{ceX>;rmv=RS!EG0zN7{PD z8+lc(7?WU)89K%|Rc0Xi7!%}8adE9B#+h2;=X8CHw9*wmc}0G`a4_N>Ud(D9BwA4i9JYC?TK8xF6#Q}qNv(Pa9a}&nTp); z&~zOPU8jnnr<=t7-$S`~IKu57Tgr2}>Pizww<#fgyawD1wD4R>Td+5!`{VB9Hgn^I zPgJbGOXYX3g`Zx&SRUq$+9a~nA7!Dc44 zJmXtGVEY$i1i8DTI>86llJhibLy*uAtjO|3gq{!NE<2!4ixKpfwQ%OYujmwbU)+}! z!G>bT!~cq$vAeeu)?Ba_{yAx%F=cNfrJk+T$JMdzm9Ai3*n1kF?X;fQ8M^zUq0j=f zb}~fI_4?1Q!#&;vUY*J9AK^W0`Ok9A6inB#MuOCoM?i1%3z3OGX_jr;7g z@&mu-mQZJp=(mT-Uz>SaL!vMe*eug7i&21ket|m`e}Eg7h8kBTfgkSr5+9?`oUuM$#Z%joyH@#GaiGdC-{-Z7QU&TLlF?Ovs zrd$PVB5PQ={}**^N+=WCLY1)!Lfc%LrzP&|cOCwCb)}w;%O@RCFxUl|Yuqrb+yh2k zJz;;%69blbiMg*-53bSQ2NeOnn4an@qeCZ{v?) zy7)ES3YiUd_-m_;*q<%Ef43aY;TijPbbpr5=4A~Wxv!O9tJ*2$58eHs0sH0pxKnS0 zOJntMZSNnxm(-hOeoX@}ciE!pMigZK*#v`IkGN|54xW5^mOPnA)eToUbmul+3f{t& z9ydiD9WW=49XprNw5Xb9t+nhwwVsbYHgnCKRwl?+^VO&_)_Paa=Ry@1UajH9SB(ty zY2)->%}gEA#FC>m{Of)x6}9rX>UO5!ABMT)GxI|Ymm0Tnj+X*Lid6(7Ix9^ZvFQf5 zmu!Uf{>CueWq<|4^ssi14)&x;YOI=ex(>+W8~<0LXYDk$gwi@hT4zYl|M*79JJ4W= z3!{xNeT)%457NescopQA$zsM($#b}-gxy1x1k1jxN(oE1$U@4mKP#^!cFioCX@U4u zTbRZ>;`SD2d`xwM&u9niy=9NYtM-`k&o1eB0W0*-^P85~Z@I8pQ+Su9 zF|#0E4#5@5`0z{(u}(T-Z&A)eUBPMjcTzQHhTh}e>6iFu-aE$bC}gjtzxjI5FTrF> z%#?xO2~9NTYY9f@Dp@%=cc@{mq#jDY+sw(I^pW;gI81x)LI=M$G+ca)Ay=+5c-JEu z@3_sO8;`N@914&*e-9NlARU3W(Q^W=LHV$p6rHwT950()e_rGoR z@o#_$hHNl`O|B7!x?3PY!v^;|m|}~G0q(5TLF)gskbG1NbK*1*@KptBMWolD5`MIP z$LD@IY+Kt-DN}#UI}6N;aKMvFORSBwM!2&nwmmV#SOa~WTV^EK{?c4csu7*E(Hq;J zd%`i@8U8-jxPDa=QhxpPxCTn=BdOn0$}pGiY{>I5LML@AeD+U+)xdwH#R&XK1d@N~mh;geoo8i;BCZAOv81BUt? z=kU}2G3M_l^m9mK^qFT2RDCYE$HjF&*tvHu$F0xgj3!Aed!5E+pKsLP`kP6IO8MPha2N!rU}ldTViF2IS%>T2yfn%0&|4CFh#@};g7oJQh+h+f+OeWM`5T4P%o3Dg_dGRto zEJ|CA_QwxtJmMHXtV^QPG)c|aBa@ppe&^6RIdpIT#nf*3+??^9PwgLaWrsujYsp!< zhCZeHqi<}k`N``aesX-_S30I=vPa!F9+%JO@h(3&HX)sbKa+_3yHwnt!R((|+~@s; zpU=Id;hrQW&ACp?DR)`$GDC32mVK^eNAq?ryQP4;uXO}JN9x-+s$mR+M@ATEXCn4% z`N~Lg8y{^v>Z&DpBNknBFuuwN?#oP3@z?@Eh88dzX@#^UHb^~Yi^g1QnAckhf3#{Z zb0kKZ3J!}jH(dR&MKG0Yqh9h&>=zo?)$m)UGG3fj6T44S6WUoEQcdMCIb3@wm5(ED zFrY&g&99Tw3?=nRfE8RLt#E&*mGG8$jy6TnN*(-ItcBM#%2<1p9QL$^GnBK~b<|_( z_RHYJfEvcd|DU9@fXXWC!thIXcXxM*-Q6+v*xioZ-QC^Yfr$YK3L=V12#N@j(k;#V zfA{=r7Hej?W}MIO-gD00XYc(y%9xz5fbP|0R5pm=nCi!TYn#BPb=8zO@lsE$#6jC^ zmCE>ixA}f=7<sRB9C8`M#oI+jyph=Cc2@ z688UA$m1>9ynF1c;MZPy9m>pTS+1^G!hK&YG1g)caP2dX%3b2wEsv;hJci#xf3WOK zwwN1|ynHGFg`99Eoh6;0v+e2=>^15<^=iY|zB-;8G7@-y&*NOzs zOuADZsi2A0zjblyrWr16F~ynN#<()W6bkoE5ZuK`I0jVz8i|?xs2o!asW8LfaC10r zvc|YAwzxXi9*N3!I2G-P);k^Gx78Zy9=3=%V~dDBmZ+X&f+g#8@T-qJ8s=28Usf)k zZYbs1AI)qXtO@liYA8}`Vep>ceD>!ZXKx8*qUH@gskk8+?vle?r9uPOe`z5pNEMTx z$ziOG3f$zi(EpJd9vqj4{8BkQZ&lCJZ?if3@>3Re+RhTKb8M~pO?Y#{6#&1ydX~%9 zn6+~{^%v!Hib6HJJg%e7+3@)qv%V=kL!OINlTgSaeN;!I9wr~-> zy7G#RS3_x3`GH;@KZM7``eZgOJ$^EWi8Rmt#CvjYsB<@rMsBg3I5&-zhGcq&ei&pI zNsYdH*e>i01Ioih9V>U?9|rgr)5)ipHhcf@+JEVcO8-Ozz2xn|Pna?{lB#RoQa$Mv zJvE-O^MG@_W4@1%hTIj*+|c$v*!J9e<~fFN(~eh+s)^@?&)=E$Gn#TYPq6Kd*?ej= ziDO1?;@*EAF>zi3f6G4azc;J6@dYWdWLLH}gvmD9TJ<#-cd4uXwLotLGkm{gjMC>O z82rlwKmS;WUX75umawj~Ky+IhM69&Nv0w-Ek8#F&RaaCsJ3vwP+%0-oW6&R4>}+R) z?xkj8)=e#69`1hC^cY#f(Csxe8z_f+xPb0yztrE$QaIF5Msff{>4X)^LCJzTNsO<~VAsMl zM)gVMO4l?7x@L3R`W(hNrts3wr_}0pngQ~s>Dm1{Th%|~5tSG!#UzQ|-5oj2lpKI@ zQ}tosZGsJREih}fIpWI9a5u>um9jmyc7ZuYXcG!X2M>G0fY(=ee*8Oz zE>7j*{qGp(ewQ7tzv0Gxzd0>9mmRe~(PPO?sxP?5rkzO)cdKXfb9Jmapo#Bmlrd>Z zGvj;K09Qb$zjAzf4vS7kh;#a--A8WRQ$}fitiQQB4!l;!h6F%fKbOzcp777TbCmlT z!iAe3)9c15n#*0~&m&PB^5_e1_y5Y-?Voa5{b?==+QZ{L)^hQ#xwLUwEIWtyu=(&w zK3#HBeBURhM|0ndCxSJ3#x;c)MQo1Bnhv+NJU5y@r@mp0#WQvse4GD1d`Yk5UmVs`_PuviMYD&VUHM!AV}YGF zEyO*G?gvve$j$;OS4e76gL|O{eodFfhN}!PajB_bJDjq#Mb$1Vq$t=RzrY%CeQoe_ zl?{ro8VSFyRL3-49Lrrnw>hRLn#y}BDR)i<)ms0rIhB2GCT~~sQ%;SjBRMxJ;cu!Y z>RWVCp{#=BFS7l8e+qlY)6# z9HhKyb{yc{_OMbF4Hk&%mh1}^|&V!Q*dG6UKe*Aic7wj$x z&q1ioXF9tiGh}uwUp|w?3Q@;6&tfwN)gER4uGjdj`XP(Dyvgbd(nEuJt{CK#Y z3v`=VJWTf8U2hV6u_(1-b{m()VcF?yxfRd%6Yg>UhP@ncDfJ#Te_9bTR6O5>B_&Q|DoYaD%j;ql)}3y2AM= z-5u=kF+=irGvVQq{O|PvR)WW=aY7rjk5qEu`Y)6>eaW58$*d@^rQ|J@{1uYFx$`R> z%<|U4?q!B#(xoa zjrDt*Skzx0`hAV(g|*WRp)*n+tz_?#G-ul_rh%(F=ZoB{YiT$aZ%bi2 z_rI+ApoZby4dCpnkH8!1=w4q>zqm~1)TJ;h{HLhTL^^$?+{-8tgGs&NZH*q$*x?HM z6&&JR&mGihSVNt`EBL(kHpYAGXWZiL4AkGsiJ^aZFt>&;L#tV%R7;gU_00QH%SUs{ z_+w&%@L6Q4-r>+pSzdTBnfA(GcyIi31~i`I?e-@a-u@#0dv=v;HlJZh=n=m42;r2n zZ-Uj@)wn@8qc%k=38t6nIt7ee4^-LL(jd1)^pGyO^@}_6qp3UbAp@&i&BolF$exrvO>Qv2fT-!?c<0YPtlwWR|A=XkiFVw=@8ETO70_i;{ z?NrRiu77E{S`PbusAJq!9h|b(M?AF=o1%s1ZS*klh>`I6Nc&v7&Ps4QSV!efxm;NC zksZ~dcxP0KaN!+D);x{~_k=&1_S05v zpIDFP4<5<#jA!(AdcyR1_ZhS3D&J4KK!t(lxO4JR+Erf>xrwx=Nsg_Dzh&RqNO{cj zYUV1gAJKX$cny^-uQv}@jeJ?A<(W zhNCG4qTl`g6g_lT)xyAqx_FbWhb3wz_`TH#4^s`%wAc{y=j$LvNe>oJG~v=wUGz-- zRhPqsxEgkEPUkU=r?fJ@LiL0Xyl?uK@$)pq{^fhq9OHgiV1j`OGJJH9V4;pp#Vvwu z>t!U11=}2D$hgr=zcQZJryXYct{1$p>7$6p<$px*ahp4A2{}gpM+bQC`EeQyk;Nlc zdHg<34F|8Pp|6T6vdR^4^&TMA`15z=ae`Y4b(Is?@4-vz@w+?h_}{6cxz2;?W%;FE^=sZ zLNPa%D~rJ-zV+bMjr@=V)ZA6W>{OZ0yNk;i_^6N-vRr82*H7&B_cx`wL6eP!aN|fmAj!j9 z7H5X|VWwzOGR2?SR*2|oEBb*7FFK=ewyWs%lniguIXQBpXsVgJ5QVtgrDtKf-DmR}Fr$vPTP@4r(H9mxkaV z9lov%iG?HeZB&=bW6o;@3>>C}>z1m5moITokDoS%UsrP!UN(irBXg`fU?QB3k`K*g zuNnd(n|ZFRg4ddI#AlU!H7E4)Xp&XLH($y*;Nx#vk9;9c0nnWv$JEJsN zUirC~725u=5q*4xwRZTN;E4IDE?8UYhKqw;a7e)gqb9it=F7`iFBpFC#_r8tNc!Z9 zN7jCrGu#tx$9rJ8wkK+zcwoeIS9I;{jN~*|+&%4%h+}TJu)z(fF3$L8oi&OFnPWkm zGEzEM@t#)(KaKdtzs<6|{%QvAS^nS+=TFpekLTFb&vaM%#y+33>At;;dN%b`c}05k z*AkwqQT@y@YKjHgl^I}fj4DR_sif-3&z#%!I4$KS@$j69G|Sk}E@sa;c;i>TaQx1B z_VGME^DgVQ?xx}BHN0%PkmED9v#d=F6BCt1-<_0mPf1h7=IL^HFsqU-TR!q&*K<5N z@IKeQOX9k-1w5==PVY0NOdL`|mp+9&JiLsOJI=d4mou{SXdYTl1NnNE^#S(gDG5L9 z)ZrSikCE*$BV_OKF;$p5sA0<@6?AJ-7WXe5&#Iv1kp{FksS5_Ejg~IDm>6NpEHl9m zmDtu(T#W_O<(I1gbdSpRkV*15Tw2PVMkzd#7E8IqschGjE1VIRk5zL)QZ>VOWbyl* zP|nkS$d=z9**T+}!@8?t&nW|3R4~WiXI2=#(gqt^ti_DH#Qzwx+6A}fyW{g654;R^ z$EjIf(6sT#)NKK<9T$YQ1^$SL3qrA~KXej&P&LaNIh(z)X5jy8gw5sN=n&+K!M;97 ze(#5+nm&jg>5iIA2iOiagwYN;d^Z2isJGX+rS&D=`w+?0$`5>f_!%=FU*e;ZP<9AT zq`y}dSD0tBrs6wu1}3uW{CrC8(9c~BaOL>_`_{LfHbwmaT{QHl=ZkY`+?Mr_^X4w0 zf%(6TRocJ=zpFTBzICvS}X&X^x@)VutIdT|#x-D@wc=ANf$Id%zSlh9ZYd;lH zZdW0r;woqr+Q`^NX{)XlNdTl)v*&@MD^fyueu_-?HAWl9ESJ^}4Z`^)DE0D(1nfI>}?r zr9Z5%e90B>*Dz$w2*w^-$!D%1obC6G3&L_)ur;4u_vY~6rtciJDVCp0U$A4m2 zgHk^Gr$ryiJGAjzUq{qR7w(e9vBedPU!TthYYOPTp^64~n(3A+2ZQ+vf)ze7t(m>{ zHt^=h5*DAzV=MPeQPXuDUQUJ64UGTVLMh&RcU1xYUC0kzn|b`&Un-rgrNXdUO82Yn z-;+UZYIxFD1Bd=-!J?P0m=};7O`eZTaYo-n_;V$H$h&DKD4MK?P0N*Gv#ppLRl<1l z`9}Hd8_s;O2qAtmJx*wUgybwJkLKqlDzqI%qb~MXK-r9Z*M& zw6SQHI*!+A33rTIoE7?~dZ5iUZ^mdH6?ms#3n zKUY1yO{3n?{Al!+1@UjV{%#^S>149Ugba?F@{uKq4>+*Hbr#C@VyW+KVTvZaEsb%j z!W?!VjIr&eF5K=ZK-%xR?*GPD*X}X*(*eOq-aYuOs9Pxbl=IEQDoQ;NqZXFXsy?4V z?|(6Q`xpM6-9)KZah0k*x((JvQiKlf2k2nlNfjjRRsdx&nobNL<*$-op+!vvUh`D2 z`MV0F*}scD>NsFQAwz?|&`S9Q1H(Sh@Krt&e^%4@SUsoRs2BC!XAR|o1?8`BCRQ&o_MKs*R;wb-Y=4wk97`6}-t&mCDe|Q$@6<4q8VV z;7^{Om{F`t)WR48Ik-4f@St+8;9hs>pp6-0Y?1oa9aYc0u%a;#E`F`iP}de7r#d6# zMF$+a*bbA<1jDE_2tV!padoz@sQ*iILeGD=Aa{}@)CRf1F3?BtJ*AoOs&F@ykFyf2 zvOUpRw8w4XTb5?9UG2YdeA91!*qO_^IkMO_E}esV#4;r4HG3w$;)(|!xHTw;#=1=` zovJPJMr8wIY%tZw{>AFTqqq8MjbL%=H+{P_TE3t82gKl zY?Ihy<|pP^ycM--)%8EQ(5#4)%<4FPvl6Zzk>${d2FOp;LBMJwY%{VDJphu+*GSI< zQ{EV%m$5E9-4(DTqJ$k&;(1Ex47apB!pCnPQT5>$dhagaj30G0{UncRhKfktsDvyz zb%dSQ#4>$d!A6v3QoDZBLYp3n7&^LwGgij4XYxHpc8{m;c+xW041Gde&`aMFeewga zP_7M17IZ*pPCGHbSNo_9@?HePq9q7+&Aw>J@(`S6srP!|J$2zVlf1KM-Z){y6ffK@ z3lK9zzH@A`e}Eo5_SCb}tOO3c5JhkQZ*1;R!Z71n{=4fhGb}1uIx&qCOzy~@=`}X` zzM=ev-@I#8L(Rb&Vtzut#TX~g8sNOWreJ|ccX*aFa;WH(Pb8GkctauE-Y#I((_#+O zE9KSCWgN1zihk{Dc>QoCGkezX!XJoQNkIBEY&+?U{!4Y>cv%Y<vF;7vfOu89{W6!FB4FZ|LFL7PRNxpi(92c%WA zU~>&07!`1S(Py4K@j}eSjXZgU?zQ_E6n&Pf%ipkb@GmYtRn2SP75?uB?WiX_?c3%U zW4xm!x=pviz4tc4=_~d1NNoL;5ekspAf4AF(Q$qVdu3naht99KKkS?63%_JpDVS9w zE;n)85=FFeR>Aseb<94X4Xn@>-unBgYH*!H@xJ_<7sJMlA=F7q5dBtf&lusWgPmac zovZUk*|b2oziSQ4E^X1%s12SBZ;NpMR#>pT73P@uL$8mM=v9@R@Pqp%iub=MPL@O6 zwT8(w54Onc7b6_I>%I)`&7{J%%faa1MC zZxrx%$6s7CJ(B}#b6EW|mm3T-=unr%m5cv!#XT1+y1yFVPTAprraCJ9)bVby4zj=L zL20rPQsVV7=A{AFChFpDma7(Q2k`*7DlBJT9O4iZ17mQ8ni@dp|kGM>%Vl{bY-9ht$@*r6Gu2Mj@WlEo_L!bV50@hD z*`f%`IVN~pV2`C6U0~Ycj=@hoG2>ByV8G6yFE(8B6K)+3bsw0@*7bymj@YqB7YbMY za9Q^!Y@Ha(X%P*aX=a923!Ko}&_i%zi_f?qWV-{jPne-p+dwdPY%NrAsJ|Qv7q)PR zMH8Pc2PCf1qPs;RuX?oqFE9L1MdC<(G__d#pH8vu;g^- z*D0DY>HGQ5Qt!J7HQ^ zLmX|8owFS>IO}==&orq(d!;3;wLM@c=Oy_2Gkdu~cclx4YC8$$yzVAhJ~K)ayJDKz zb5<2k#TWDI<$CTM59rI{gcH^(Fm0oSsf&z7Jxk)14jbi!xrR=d7;KGDS4~Kc;oK}X-R`rk0d?L5ZGgZA)o8>sKn!SrmP(xdM-fxSIE9_A6&;q>==)*i8V*Wt#g*vsh zz+($XSe$f$LAfPnrRoW0qV!B>OtOUGJ4Y;c_QJ`NJ{aZT1G_P|x}(n&SNzm; zK;Qy%SiDk2yIyjT^N>U19Ysv-pboG1nvmAx*YIBqYJ@9ipxnyZSVmg=~5LIv?5ayU}lBx-s4nu~b% zMXI>(HB@=bY4<*Jd&FPPEK#3qu_ zwXs2mK@M=Sa0Au7u}GMjMkBX}~9^iJ#h5 zaCAbcnBBVeF^jH$!ue+0X@0N^XZH|2*~`2cU8_uy+FZ}pg)KBz12#NTfaFy^y-5i# zpULCq?G{eUljV+frL38n$!n|P=xlV0Jb#`uHhf@8MHQu9)py3Gf=}A(q6v2Iu|SZH zv0#eZZ!!?PlDrlboIJ0DkxyHwr&7n{3$>IS9r01h*gi&2+{tX&XpSH2t!0nb7Sq4m z;lXVi?3-eX#qexIh>Duk-QOt!%Bcncw^Er{AEn!YfoY=PN(g7ckhXj?%gHX{#o- zG;5<999!ivfQj=dGmwG&@b+dr4OXH-$z*8-8&j;P)0hVFd;kITTNNHwySe=-&ylG#+3kNgUrCAmuTSRj1pyzCt9Y(X5bgn+sBFj^B@i@-} zTjR`O|IHG`zE%jFX)T;7Uk_SgrM`t=;ot0Ugq82KL_AT~MIDz0X(GBr4?V6K!^7MR z#WO6h#?~5F)a`Jv#ZtuKk=?C?N6+`Y306Ea5}q)r2HLn+1v{Ern7*fx&*s%K*rY%( zwr@On&YTqwxP8h4{zwR+it9_JR=?tZlWy_X%foEZ+raqk6S;i!7&dlU&gRYcX;+s* z_XVXQ{@2z~giaR)NPJnTKH2iDj48)T1-ta1OA?zEQuu9o1*Lg~-B&DyZ&UIyi7F-AD)V*(>zLlI|A|5qOs!wk`}LRa|Jf<2T)zONnG11~*K;@vk{Y%0rz zu551R>Jfjr<*@8$bq(#`R`B0>MLaejTimm^+8N8kMvpnS%}svU{(_1w->H;T!|Ew& zm^9S@*A2{}pl*dPy0XvP$^n%Lw%FX>5%zxe*n8UnUHaQ$!fV-j36TZ+0so)tknY5l zvkkB+%T%1F!5?LDk+L0ZJ~^PF$y)eP%^R(RKWcY~A!5$yL-V{k9tW#I@|;WVrG~x= z$V{!{h!JHRx+;_YQL!|LyUm-)djz{8%I+Mk?Qii`=yl#TJ;T@o+nF+MBD2am^Y_qk zyc>Ce4?84rWLYT-@6~aBbQ7b-)lhq32A6$)MT6bpeDdEH&Oh~wdQUR>WKRJnWH#~K zMJ=p&Z;o~?&e-7TgP{gKa5*1{d9Q+DygdjZy@KKG8vqwiZ`5l!qVSfX*heZyq*HJE z7h1;Wh&-Wgh5~l4RfY3w4d~p}f_FOu!Ecv%eK!gX#NEf~Dl?p%U?$vo-IXn%>R~Tf z($e}LbIMVinb8wXaZ%kEJ_C&e%X@I30UBSL;De2&U;;}q=gl2fB39|&;)ovI`e5eh z%h-O^6TkN<;KI@dUiYlzU;kp-h35-DgXE?!FN~oE!$m*Aww7a@d2AQ6_C02;YqoHn zy$R65{+(tx``HR(ciP~&uY<@bQ^(8p#V7}8syX2HF9&>DY7aYWTM^68-D!nIUo3^OJN*~fuf_OoEm5x(xff!RGL z^GEDbs@L4+B#$J@pDpH{xz(cIa?pSIR7nV@VV^@Z3pmLVhr7(mdd-mY$-L5`Sg-}} z4%Wft7IV}sk^MQ(y5rD3Z>T%^LHAw&x-SgCneYI?SuSaD#n98%kUTsR-)h3m`>gJH zpI3vD8QVdY+rL%9ax+cXHmPIaa!owkt%AkYz{Qht$oi@xdWC$_v{ABL2~}T}uyeJB za0s=nw8fP(jwsc%L&p@^9$9CIhjZ0MUqJgnbqqhO50#E)=&C5o7f(B2p}Gq$+B)Hr ziZcqG{PEa%7*-#+1-X1*+_NVabgd9-#x6 zeLZtt?Bk{Dw|HdIHNI0k$04U~(o8#=-*=``ZEh|#eiU=erD7VIWpMnLD7LG*L7PG6 z`94gTA44V*#wBY zVUPK7v&iofRY@31u5O)h6=eGNRWwZsx_J8?gDL)${I%N(=osV85`9)GHZ3#+G# z5t?7xV|*7ERF87Q!v}8o^v4w&3xaU5dN>lt-+^iuKa73U#1mVJIpnJBd&qsmg0crJ z^0~oB!w=G`>tcRU+e)Q(hpDE1ln?jb;<2}%L~Upk$hVccxcbK!?L91UFvA92{yL#s zFMBlYbU>$U2aFulb#gYh{P4?QOAaf)y@4Hpi;l=4crt+uxU%V@eM@ z^h*E#Oog;Zq*j_?KwlHQ?V>AY$+ONh^1pAgoIbgPKd+Ut{oyL^eB4Nh5%qXfo#-ht z*_Xx3rr|t2;sk3ZY-hm!BXpm6nWoBD7}7qJg_mQw(drY|pNVJhYY9|-oG$J|Cj0(k z>nUlR+3N>yzWyQZ&^LYwr=2X8=z01CHP)Y?$FAorcmBfJXEPaTP(#V-)#K!JVB~fudY4P1a^fLB3Wr2(r z#^MY~?4K?2$f4TbxUX+L^%M25+Rp(cG4A3n`oL{(sMPvmaz;D6u^5YJgPSnj>5q** zb#yiR!yel|&~mhFt($D;w)@A$+Mn#ZpWTie0BFm{VYfk@Fm}SKkq{9yp`;r6WdYIN<6Q2bfjcp--wUR*bR6 z;yX4NQfP&9X4c3pwLHCVmx<2Ex9i-bgF=$CXq5P{{HVd|1QL<~Z_+beZ;Em}{HW!CFLE-|cS%gl1#|NN1s=0&pkYZkvh zQiN+aL;PA}iJ4byG3mN2UUX1|g-SA)gx%r7>h~Pnx|R{&jPO;q?j*jE<7W@lWqPCS zsQ^qm+7282CqP;LJ`S0+f@MLo@Sz{{{=!iP*EuHhIQNgc!-~(LoV@Zmb&ubtQ`aLL zYrL9C54JNc{~BL8e`ERf28R5jhJ*zMNXak}pC|mi6?V8@a0pmYjrPkd?49R%Ix{%NOb|RA#4wupFN)x{?Zxa1fvybJ78HKZF9#h{UntpB4 zDL1T`i*DC*_lg$b4cXa~9OlwYDTkDtGZn+1ap}1u?9%o+^)5#7{X7Pl-7^!IcjN+h+TY}slxVtzW^?9Z+4@JjYw1JOI!!Q2Fsvu$uG)ehq#&Bbi=Vx1Dpn&@?oYgki*-y= zxZ-^rrzE{*h0ApstUSU$>&|fY_OCoQwux=cHF5i<3C^9jfcY$QoE~G2gImlIx!M8) zUt2*}%L*F?SR-wQ73R*ih5KYnOxkFT+krO7uCm13P#gTUwib?phzFKf-^CO)wz^RC zS4BHr1)N#Y#Fvp->@emrpZ8tC&MW7Mvpl5uFy*|iaL1DvuBpi9WUXduzEi->;R^V# zPaR+774TO}9-&su-bOW)T!<~Bl<~k`U9bwfYH7l7l^XWFCTkv+5br+nP0$0T+f`jgmrTVrPZ*x| zPH>ndhKjtYI?fj8!ZX1Ljg>}7kJdw0zA zw<1o5X$ilnMz}rR^l}%D0^6y6$ow57T*Ia{?Gg3gB>eEdk59L}(PLE|D_#~+IzIyL zW%9L0CZ8V8<naQP@ix{20lU7d8xNu}H-P7tQ z)z*xzYGP5Q5)Q3V5d5FVdlk_yMF}x`wXiNoAEzf7V``>84*%2_tjaO#HL&(=GYj2I z_~YYG>N%uy^w}(i6{fNwKAiodA98wD1m#wzP~xg}+gLz%vnuNCR)B(%0S0c9y~nO@ zX#dg+-g>PtwAv5hLjq8r6o8gcU)WrCN0%LL*plpk%Lione4!;e*w_gkU(hKlB!-xv zOi>-qfqxmgGoNSs7PCtypu;f@3^-{F^+%R)iEx0!ZAbWhvctAnj$-cA{)D}7NNX9n z;n;5%Jn!uYr`c8*`N2Z$vy#(FEm#)o9rc9uMnBxF_Y)j(`w9M7(9j+o3a8+Y=1sw* z*!i?X%=@}WWN_85TrOK&#JxSr_+6utxicClF;`YZ*6@mT8OLoXp|)=ocaN4s;!#a( z9BYj8cMX7{Mi@895Z))$VYgNdk%_9pdAf3+4mM@$ig`=vj(5*R9UL8EEap0;JD=_M z%rQ0464$d$pzERwt$j)u)TfEcPL&+KF`t_wGR0lKM{^kOR9@ooTf6CFbAX#K9%0t# z8+?}<&(3oTIVed1ixRZpkYfn5sk&mmM&fVvsh9o#{^|<0(1~1Q=*jX8=}!0PNi*ac znxI~mYscxT!ToFt`(3K%pNa~>6udY+pLdUBa6)1lTXp@-l_QF{c2*q+)RD@+)No># z9_lZe!D6W`LjQ3WUMSOmV8mIp#zh8UZdDLQuM0%xd>`!Y?}Mp7y`kU1UEBqg7`tQl zC3nQj)~7VvaB`h7v^{mO^{hHB%+*AnNIlH-GZ!%`nuvvpfjGVbwa{-56o)k4gY3uIAroqkY_ZDa`SI+UR3puQ|f#bge8BvOuks*&^sm+4- zqt#j+gI>ti`AzqHq!R1A|a@m{f0HL%ZhRy%KF1DC4a|0cwr1IOwE3+8npQiR+el7H@|f+T(mHHyoYl zhW=9mpy%Tc$Ahw*;}xF-@e`{Glq2PO}4$Kmmw=dU%S3Nd^T$-C zF3Az@TeGacv`JME^#F-Sl>6CM^o_rdae&TNd(lH9-Sf6=^2Y`(f4GLULhW~dTzC>B z`h+ex_@l2`0A}3_#IK-0(MKTF1i$Y0z_(I&q(5~*);(vzq4aZgfqRQ9oceg9v4bDl zua)JJ6#-~o-3n^|1PJz^#2riR?gw>EUt}%x#P2dMywUZ9e3(1_8{mOO>)r7~!5t@; zx?ktS zIjqetst29ml`b2(!g3p<2HoSSzywyf{Shp(s_|rSHx*d*(GvTI))IYO?xu&2#hUo| zKoQq2DIxP;MSOatj;rHzg!g@4s0C6V+n~qFK-tSoPR)A4 zrE(9sEH{ob{PUQ3v4P?HRIqcnK4xZ_WBWo|gsC}UnY=R&KXySs9e0e~?gf`?z9?Vs zFBrYc4O$`MNH7wd+aPpjE6j=xg8rgbXz$WmeE&1d1BEl_KOH|@>fj4hd7#+E2fGqH z(Z$0byS0Nv?Wpy$0DKP%z+byo;#`q_Bo^g6_aJN@8X%aJ2W!01@u@dfEt16=8@H{{52Q0u>LNR*uo`)9hNdAtWMtn|Y3C0+4n`*P$Qmc?V&vbb*cXLj20 znVpw^V2k-{hOT?gobNwqnDCeLXDJ~rK^tKWdT1AHf;$f8f}5^hVJz705_?bb2Ynk= z&3BV>*!cJ(<2J_gmsJk?_NwNP0Y%Jr`M|K+TOzkOANYrxpFqsgF5jb%eQON_FZ_0a z0*qf(@}crK?k;-5Lu&W9dj1Vg?s<+uJ1&dffq0K!;+|T+8tAi44Iz1&FyC(g_a1sU z|4|no+i9X`sshZX)NpQ60hLGQ@$9y0u8UQH&OTi<{AU95Y)kC4w8zsk_E?u=3(4u? zQ>BAX*BZr)^IV5h;?AS8-#vCd{Z;rTPYhRr>sTXEm%g>s85VHG*l(UlzTz(W|GPB$ zV7qAmHf#^X1l3^pz77`mK<`fYVM$DDWS6x@?dD({z1s?YBZ8qZBM5g>{9$@A04EOx zVA*_ML@N4;9`dex1JHSK5CUa;h}T5fS#nnP`(14XMddcQlplxywIIQtE4k>04;O=w zecA_`|MP|WXm4EJ=ZR-GT=C(D6NW3gz;c!>Mx5h_iHn^@50bp0Gpz2oq1U6Xh)Z6I z$Wl9e-r2}&3uG~0-EXeA8P6UMuXCo#d0w0Rfg9&l^5h0p96G6u4@V4P;$Z~q>88-! zY9e}s3>~#0)vTpi`i6u`RtA-FwtN{CACwCBjH}6C?)qKMTeq@lvPk6{Nw|~&+Q!QURsbk!46VX>; zKhqXxZ#rU;haC#eSfbrHS?oDp0g`vjW6?AE{yfH!z4nT|K;mHxI;#X*KNI1Bdlut_ zv&Xz}{(*`w~B$=$(lcJfEq$yQ>04vYvwzr#W3+YkiTfHc_mIS^gWwMO?Lt>E%3Nc?v-YkZM1(+dV#uBf@>2J5NLs2lEr;C=QuSnh~f%J%5F z#2(sD90iB`)`%5YvCs>f_N(Asn|i)ES;^H;GQ|14@>>*7Z2QH{^O`wir5dJe)q>w( zeON9s5I$37bA8x0q}gH4uhAv7`IIyQvF!s<#y7NLu7U(V{}saV8v5*SbdqxKi=mL%~;;M zl)@Q}`CRq0ib}dIqUXqxn%MeD6CE=(VDGJt)&D3$;xnXn$fLPs7>CzwVnw$-bg6$U zcqS*eD+)&7lFeqg-QQO5n*3>tieDDOd#qWb1Yfr*4k?V6#Vt43Y?U3f#4hGkjq1M$O-QKApow9sxy(~WWs^y@5rSyy_{U z}i+Kud3B`<1RWyiGOmaPN5oqD3@yWd82{JGOeYsVsHj{3?+Mpt>xeJh=oU*KK8 zD9-Ma!qZpt1aoxY$`a39j5L zO@AzO@yCCgf}y)S2+qR;;djShGR5;1a{bU$d#S z{SPzxe&@%)H+=d!QuH>NKB%R~7$sC4)hLEJaFy-en5 zod*mJJ;i;Ew>VKPoV%}lVr=3Mb~#tT{>AmI|09PRTb0nTQ4UTWNoi(H;tt$QPUAls zvHbS-K6~Y!VB5OuYmz}-iAT!zPIM(6V^T_5H*`tgigfc=X zC1o@fEv1yAq0&(5`n~V>_wV~xpPuKr@9VzrYcMP57uVeAj7n>5C^i`*cdw<$iS`<7 zkMD&Jczw+orVAW!?t>dP{^JIlFYZt?a6-PS6Bf?&!tC!JD3$lYkWJpO(v^JU>OSyV z?~Q_6-tbQF#-`DIklo%F8}@j^FUu2)Hn}6u%L7s8U0}J}Rpd)bcXa25IAiNEN9;N6 z3_Cd|42W<=%zQ`uank`k*Vqb=;5<`HydTj^_+Cjff;w%q5c{I*<0W;juRf%CG?%g@ zeY?sS=adFQIWY+TnD-VQ1}*`Lh?uQ_|K`Xc;b0m~)RB9`uu7$NphVP&m&*x96;|DWQgADGw;W5%=PZ;a4fQE}Qpr zo49Oy7c9D^B5P?H`MAv(KF+7UK z7ZYjq@)JLI)G^tyjiKq}$Zx;7b!j6n?)=KR_8FWvBZjj}B3Mxx!K$SDJUR0n9WE3L z9Zb4DDR)**XiQ^uG%!j@1EH_f5k5st__axO5nWc1r;btREmxiIgaus{kk+j`5(D({ zb*>2>S6gHHGh4VVuoHRePI~{}?>ts_#_AUi(CO+1kN2*4)a)Yqyu7{_Zo(Ujnml2) z(;J;ud4c{u_&2~CW=p*gKg0{Wf4L(u-V?Rgys+(=i`dbaB)Qp;NnK z-7ytNc`H(H;C`}w(2%%8^dLT0$7c0z@QzVLLuD7phIB^gNE!Tjyqzz{x3Z}JFG_W$ z!`}Sl-#NcoWdIBeZewb26Az94PTiv!Oly3=u=r?}4Sp@O2yuGl!Vl+I$P?zKBr;=h zsmOMc=8~HEsG+!k1ct3I6n#$A$EtJQq_z{h~@PuWIDHGo29QCXZ|% z4QO7|#TP?K{~TqG*mP?wm$k*9r4kH1!bbGkajEtqS3OlSkLOHt$8~ErydU6+(}UbZ ze(R=v9;k?QL-0@!gg3k4^%755z4k=kI8WT~>y0CKB+uR18}{ow5ESTvqsA^mTbz2; z5gWHS;nQ?`gpYB8y}3QQu5*O4y$!5~+KF4cy0ba%EbE0`@@A;>HW9o#>Gv+>W9(mM zC^$pqvKlxWDubg7esW4_1ND5nU{I42I(^*=x1}C9bWRODFLo8${Ey)sG1P^fEDQ)P?k<8q^RdM2o3}#n0a{Z<+Y~T8Zkx}vd zX#0dKlOyQy?li9)I>g?qx3e_j3jemqqK8!tyQDR7Y~LmpXVtJ`L=hjZdCp~pdziOt zG7lF7aOl2cVutwcUn1VC1?!rHx6u8+D}>LC=cOOqmD9?zdpqHfOAEI=`^d<*_t<-8 zJQH=ku%nC|zEv4DAGAaUoWpoi!NHf-*vq%9kuG@#8yl>!f1fp$T#?M~Kb+u~=L)}i zH+-DrA@t(%lIOhQhbxX3df~gjGn8YT@#B>{E|<9Dk05uvOLfPUPOjo+D9wl4?c@gC zd+vghp|jZz{XOlm{fjLo*EwS75qq@WuowRC1D{(0yG=w!oz#<9dViF9<4E_Kr#|Un zRG>cg7V8Rbue*aLPVSP2@8@b-1iln^Q>j1K+~}V8rMz0?6sx{efmfTP-c9OYO;kO{ z_*YW1OCbaMwYt3&y%ESy6I9HZ=n9UPr;ga!fOv~G)M&fpK6{q`H@{HWtF1<8AO zvX~FDGFW;oit}amvDeWh+_rxMx0zjLiuqd(dsohgxO)B*_>+sGztB4(fzJ=caO8yd z?8vENNqv*>4cIw3neX3(uzzu^1oxLvx(lv-*G+g$rd8=cyW9{JTBi6~V}-l3ZLm|$ z0-Fw42~KvkWbSVFm-K;8F2dhE_?`={y>h{eBo73JdI(I{=bkhA1iQg&wiojJU9n`A zJN6#*z?Nkms2}8x`4w(h+ru4G3|!%*?1<`X_9z-;CpiDT|FeP702@4r7FhDk z5c%UIHCekWFWK z{>%%y*DGS;l?AYv;RKn_-5}NB@<1~u$W^d^_*=RM-DTOihlGAIO*0bMx8^$6%N(QS zx4T?(I+wp9zjLEr4Tr~mW{%}sj?lcve#;)vEFp&u^Cj5dQ3l1Qln^*TN%)#?oU0@9 ziKKYTXtpj6hUwx>Q+JFvQ^DcST`)Afn%Q4o(XQwe&)bHupI3ya;no3%InMc}m;oM# zykpq9TskZ)VUM}rI4Y`?6K7@8)!`8vkA`xy;US(qyoZi!_A@*xg!jvnSyfra_g9-Z zd~^e^SQhe9NF)u*LwRv+5?6+nv0-ZsrJ2eB*FMr#GnI3mexNjCclVigsw`0uS_}&b z_LJUMXJs28daWUD%`!#ACR0@XYk?9sb6AF2ppUf;Cac+t*?)DOBhp7oW_(vqJRazZ z;s{TX(=+O|E39|8;im+{Yj*R1${SbAyWoZ=c`m4u{H}4w35yM!k(lfXM|pd(zrPS` zfu5Tz(U@zAqbtp@cb_5JgS15-FTSIU-w_&Ew^13IS34nI=@)m|HSwNBXE9StH3rUM z%8+KHJWVX3m&{$BQwio|^$4oj{$y70bZi@Chwt)A@SfKRO&6MI7516_-=8o)_%K(L zoaeq#kD2&i96zKyV(5nZyj1^^$5cP@Yf_E4jSLx3!Us>%IXL4vTLO~Uuey*>X`*!h zD)nBM=0GRt>)_R81NbdAg0hqk-l~fOgL~j;wT|%OmEL<@%>>|9GFC_J}ETQ&E&_mIZU{i#oVh;c`N-Cd#7yTi|Yp^^X5?w z&HtBy9fuhnlE}yRzOkdBnSVF_VqEE0-i><8Cy_5{zA=l_pA|4bzla;F3b{17lpniP zP-S5qrw(bP$C@^7uoff`_34Zn~elNDAi3L25!2j zuI-M71FDGaB#RSvKgHefYgz$^d*#q9?2XVyS-wc(-jY}b#D>u|F@*XJ4>|bQOSXJ} z&R3&v(re>Eo?o_wGIx%1n9LQfa6ZpTS-0stESrPO>v`Csods8ZvrlUSlal+KgqAMx7b7xZ0`%e1^I-uT|g%I?1?^#-i7Y~qx|-}pc~hw{I&*eJn1(hR8_ zbtU23=i#o32n|KZsVE7}v{b+F>7a?&y12^Zlbu_Mk=_|FtywTJv|d$gSd`u%C2{{lVpkMKh016;W0DAziKQDOZ<;e)G}olUp1mE4`t zCVWUf$jc(Ou#Mq;o7pGo2h)s7IecIu<Q?AmlVC^YV>L0MGwS~?%OT~UznlTn59f7E(mz(g1=WgqF=QgE-!b4LAE3E?%81M7)cGN zx5xffRzgcC)%V0iDj;`uHOq^hFxB`rbsv1BYfvLwf`H5o4V>}vBX<|%Gx}2_rI`p) z{>YcUJ#lci0Uk_NN4I00VeR{iDPvo>Xi*Og@tKYrA=ao}p@rsG@^HxMj3WP5zGyGw z8koU1h!9T@FeO=ZxY@oMm{8-@2S+=dJ(n#;eD+9c_a#os zc*=#{GHE%un9_Min!zB|<9D9YN#sy{UfCkSu|GL1vXW8DvzU72G5gI+W_nDC$P2wZ zt4ZiyK3pu|f2nbdPCiMsfEB#4a~t1!M03LQQtCzhq_KGu%U0#_y!tIJRDD3_O`myg zC4>&|-=D_Veb5&1x%Sw3%>j?fCD^^y4))a!Sm^Bx*GNa<4S!2Od?MPIEZi^f|KFf)Gn6o=3Vt|ny{QGNRL6kDA z2gr;6zqGF>?M-7&?=##vYynGJHuJt!IP0dxv+`CZ2ilf$Y?l_!=-$DeOMY{Ib(7c| zPoCILso$5>mpA+`BjJ0MqHd1zgkH#*YbLU%YCX+yuGm!Ye{V*bh&qtcriB-(>X^1r z5sTGjV7jKBE2e(nu$$K;-*J%muD?EcP7CW-{Cp*Wz|IT2>eGla23@^T4@5AK# z)3~VHPBAkE&w0yKrO$l5v08k~HOCtH*uQ}??`lMTv}enA!C#m9Qun<2iN-_9xXSM* ztsI+K^|gVi)unXtOQ3_|ISH;=!TnR`a_sQk^qmpQ*@4A;E>};jmRhD?%Hp$*FnU~# zWL-!m)8_x=^?j;P>uZGTSFP}Go;|KRIpOsy2b|2dgYpLlWGXnI^syb5>~g?>6epZb zbb;}4R~)Q!!(L4{48P(iIGJf{?4iEP9+$Oj1jj_mm6v*8L|>JIG>1>>No?#RhZmL_ zIAzdX?Dm3o>L5=tQ^))Dfd36cT)Ec^p2ikJPlG}O95vU5v@Xl^lNEV5h5xz=EtyNJ zHfB7~!qy-axMay-RD2Wf3@fDVk$XH>dy+fDF0d&mjO!MLa@gm4T&`cp#M%~0{nKZR zR7A9sGK^m;VaH7k!D&oQGQ`RhBV4dCgVzr;sB2h4E6frK2hD_DazmO4YMx5^BKZ69;%e-f!LOy-IXN#=jR*PFK8oh}d zUB<8V zs#Nmux1TI__$e?#@`*Mc`&!R8J@aX{{W?P@Y-Cc(pN#vlgid=7^3L~IIxQ~Z$XzuY za%&D-qpaHS3A zY1)fhZJm=VGD_W`x!y(OI%l`IqA1lBS_v-LDeH#ZxenO7$4+qQHAk7k^r9io57tJn zo@#KCFCKcu2YVh+&hIS8=tQzs zy@0X5TX?Hc0d})g5r0SnDVo~YXs3${BP8GWU#2*{$sGUr_d=iHmKd97h0FRDICsfP zaGp*cwL(W969gLdM5&EFEC%X`YBJ8?9XoG$Pwin^Xda-T;Fr3;YTJfDdh0; zU$}65DeI%(v*-FaR!q4}%^f=!P&bz{?Gvc>)1UTd*KMa5Eap0Qh7Ow;bFWD7y|YFi=eLhR^!v1) zhdd7QpH8v7S^9}LU(|{`tX{1}{F)WV!&}Z$J>o8d%D?d8@otzh*bs#at>Dt#Nnp9Q z!S=XfDXHn+4)~^JkKl98V#nvW!4*$0xC<92I0d>MDGtq@ETf7pl1< zql_UlvY6@onhSfs7x%rkHd)b+v;wv9&D}s~RRHG#k<#7$zcDH7Hz$XG z{8c9WktcquaJ;}qr(>eZ8BURUR!n%yEn3bd;_|w~WHSo#8LblZ> z@u2o2hAoSr&*;0H_4F-Gr+gLp4L2{>b3^!7kwG?g-#u;~n#{!R^(@$=4(obToK&(E z8sN-tPWUO~0JE|7c${j7voaEFU*stAJ}=$zK-P0Fj7;#vU_CF)ap;YmYkjbyq&F65 zdf~LWCq{H}#+3{wJZ-as)U!wGHzLh)mR+Zdi4EFlxUMHMO{BV8DJNJd+7i3lEOE)s z7B(lX5Hv?JKRh(Ur2r#X)azj8WHn5k-Whk~$oO66=x1vQLmgctS7^XAR1GJKl|(ke z@7k{TCrl1SC7s~#=$FX!dKlZp%6Tm!+jGWg1z4u2<8Y`R{zx*xf_P)x$~A|_ZVL>k zmCUToc5wV)BQjO1S32URj4kdd+hO4eEBIeA#Ul$neEX#-e3$b+bb~Y_?_hcpC*G}O zOx{O^^-Ey?PB*!A-(`L@f6jr63+cV6jE37wXnHY?U36~HAo~c*P0rBOGn}K3T&4Sw zBdnOehEpf3V^H-Gb`-pzp86;HrGDb=!7uq~{}DFcp2-m^(H34mB4v;l zqk?Y9YUsa86+M;ZFsQtl_T{CVX(@SkdZ+P6Z3Z*z3fW^+CB5I*a*I_Rm$z2))0lD| znNdvB=qi3%-x+pqbj96G+Uw=7Ig6cO=SEi~7rJ0nlnV?s-SKXsD;%eLpqrB?ZpeAz zU56*MTYKaB=DyfJvM=(}20>A=A5Q)0gE9R*VQ%J*`RS5g(Qb*=r9BZGt`7AM1aMP{)}BC0yJrd3KII z*x1bm1+@}vc|{ACr~f~D@yr1g%#H1aivtzV;jMtsK8mRA-4)T<@<_e$|LjDmFVw#) zx?{#b1EJ^a{?P(OG3K~dZHoor_IS3&QQR)KBsjyj)B)GW*+c2L4SugQ#fl((%!$+# zd*K65WN;`Oh`H0o+R$pQeDaZf|A-;IE^_+0bv%`{kp)$E*!4oL;3Z4FQPVH zvGFVAcT`eobrpNeZegfWSFBLd6}+<40(R37OA2lu{k%JapMErStcITX>6zPKef2t9lD11tMt!d7pwFZ%ngweVIk z9rue2E~DkM5{}HT6nf95`CSk>Nmp>co445G&x>|A(A!4Lju-q4V7*3Dd}o^FLr^JWUfi8aK#QwCnRf0dfsmvBxac4 z_fd5$S|@`&rY#)Zt(lJ=O1`zVjcgw9h0A9?VfD4cR3E&VH=fPmrG?A+t}sIIQNz#F zutWJfZB}P;($f&GkKN7&#c)O@r&F<`f`tL!Ib})}+YNs(Pp5@b?~<3lwbR_Yk((Y@ z@;|>kR-H^`$FFyM^!Ytc$`v!%zlv48+d00W8<^UB)~e0w#U zL+0n-~3iJxHm~6U-ZNX(ytdVC$yfg0zR;ZQ#$|t^9!Z^M9Q6%kdvj3IZ0ah zW4#t!bhR)lTT|pfbSaY)`nx&jDn#al*{%Y~JeNbK;uL0FPZNCq!FF0AFL05g6&`k& z;lNNu#Q0Zm(DG+O2b}ZqE$92b^U2oi{@kDh$S%1 zS&E2Z^Ks;Jprp43BJs&=45|!3^X*CK+II}(pAA6GgI?IXwU&?To^f8U$F!=u#UERq zvR`2X=Y^XJpTfhB2V%RSFRWUsfHx1}M|3AljN|YWH6nY4pSu{^yU`_iEMj|s?eVZAM?>9k#xj7!FTH|q` z6Vw&Fku|;#c02gOJ9juX-yDU^i6e04+%TvW3_|h0{ZL`!jIbCjY|$uT;pjL~TcgMS zrrPFi_^2lbsjhpf^GE*sGlQ%3-f>hw9z)MIaIS_t@-($D-CY}619fply{GVrlVY8s z;T{4%<@#CQP^ue2#L!aAk8QE759p>f>QZU=6mW4dBnEB1mQKL5?;s^9!-4kuL;tc zhr+rv08hM^V&w0&h_?^I+KC6yta1n+)DEMn*8$Z0eF9tCPe4B5G`4>|ic!AD5O(0W zq%IuCv=t}t$S)WJhn~UgM@JxYe?R2Bc4OU|AO!gD!f(GFC_cCmmshU9_c?)>y0kAU z*P6jFUkzq`WQ1?GwAUI&O~s`r^U?D9FZe861jVs4al>>F{=BA#w97Sg+m=pQzannF Stb+VAo_J;GgZ}||Cx8{~Jac*g diff --git a/rsciio/tests/registry.txt b/rsciio/tests/registry.txt index 593391ed2..ad2418035 100644 --- a/rsciio/tests/registry.txt +++ b/rsciio/tests/registry.txt @@ -131,7 +131,6 @@ 'digitalmicrograph/test_stackbuilder_imagestack.dm3' 41070d0fd25a838a504f705e1431735192b7a97ca7dd15d9328af5e939fe74a2 'digitalsurf/test_RGB.sur' 802f3d915bf9feb7c264ef3f1242df35033da7227e5a7a5924fd37f8f49f4778 'digitalsurf/test_RGBSURFACE.sur' 15e8b345cc5d67e7399831c881c63362fd92bc075fad8d763f3ff0d26dfe29a2 -'digitalsurf/test_isurface.sur' 6ed59a9a235c0b6dc7e15f155d0e738c5841cfc0fe78f1861b7e145f9dcaadf4 'digitalsurf/test_profile.pro' fdd9936a4b5e205b819b1d82813bb21045b702b4610e8ef8d1d0932d63344f6d 'digitalsurf/test_spectra.pro' ea1602de193b73046beb5e700fcac727fb088bf459edeec3494b0362a41bdcb1 'digitalsurf/test_spectral_map.sur' f9c863e3fd61be89c3b68cef6fa2434ffedc7e486efe2263c2241109fa58c3f7 diff --git a/rsciio/tests/test_digitalsurf.py b/rsciio/tests/test_digitalsurf.py index 4f08ce398..7a4ab0b16 100644 --- a/rsciio/tests/test_digitalsurf.py +++ b/rsciio/tests/test_digitalsurf.py @@ -409,7 +409,7 @@ def test_load_spectrum(): def test_load_surface(): - fname = TEST_DATA_PATH / "test_isurface.sur" + fname = TEST_DATA_PATH / "test_surface.sur" s = hs.load(fname) md = s.metadata assert md.Signal.quantity == "CL Intensity (a.u.)" @@ -580,7 +580,7 @@ def test_get_comment_dict(): "test_spectral_map_compressed.sur", "test_spectrum.pro", "test_spectrum_compressed.pro", - "test_isurface.sur", + "test_surface.sur", "test_RGBSURFACE.sur", ], ) From 8440db18eb96c15e90d6a1fc22818b1cbe9ba275 Mon Sep 17 00:00:00 2001 From: Nicolas Tappy Date: Thu, 4 Jul 2024 10:24:18 +0200 Subject: [PATCH 17/21] Exposed parse_metadata to public api --- rsciio/digitalsurf/__init__.py | 4 +- rsciio/digitalsurf/_api.py | 128 +++++++++++++++++---------------- rsciio/tests/test_import.py | 5 ++ 3 files changed, 74 insertions(+), 63 deletions(-) diff --git a/rsciio/digitalsurf/__init__.py b/rsciio/digitalsurf/__init__.py index 49230cbba..4627e25ea 100644 --- a/rsciio/digitalsurf/__init__.py +++ b/rsciio/digitalsurf/__init__.py @@ -1,6 +1,6 @@ -from ._api import file_reader, file_writer +from ._api import file_reader, file_writer, parse_metadata -__all__ = ["file_reader", "file_writer"] +__all__ = ["file_reader", "file_writer", "parse_metadata"] def __dir__(): diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index b33f331ab..7689bd474 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -59,6 +59,72 @@ _logger = logging.getLogger(__name__) +def parse_metadata(cmt : str, prefix : str = '$', delimiter : str = '=') -> dict: + """ + Parse metadata from the comment field of a digitalsurf file, or any other + str in similar formatting. Return it as a hyperspy-compatible nested dict. + + Parameters + ---------- + cmt : str + Str containing contents of a digitalsurf file "comment" field. + prefix : str, default = '$' + Prefix character, must be present at the start of each line. + Otherwise, the line is ignored. '$' for digitalsurf files, + typically '' when parsing from text files. + delimiter : string, default = '=' + Character that delimit key-value pairs in digitalsurf comment. + Usually, '=' is used. + + Returns + ------- + dict_md : dict + Nested dictionnary containing comment contents. + + """ + # dict_ms is created as an empty dictionnary + dict_md = {} + # Title lines start with an underscore + titlestart = "{:s}_".format(prefix) + + key_main = None + + for line in cmt.splitlines(): + # Here we ignore any empty line or line starting with @@ + ignore = False + if not line.strip() or line.startswith("@@"): + ignore = True + # If the line must not be ignored + if not ignore: + if line.startswith(titlestart): + # We strip keys from whitespace at the end and beginning + key_main = line[len(titlestart) :].strip() + dict_md[key_main] = {} + elif line.startswith(prefix): + if key_main is None: + key_main = "UNTITLED" + dict_md[key_main] = {} + key, *li_value = line.split(delimiter) + # Key is also stripped from beginning or end whitespace + key = key[len(prefix) :].strip() + str_value = li_value[0] if len(li_value) > 0 else "" + # remove whitespace at the beginning of value + str_value = str_value.strip() + li_value = str_value.split(" ") + try: + if key == "Grating": + dict_md[key_main][key] = li_value[ + 0 + ] # we don't want to eval this one + else: + dict_md[key_main][key] = ast.literal_eval(li_value[0]) + except Exception: + dict_md[key_main][key] = li_value[0] + if len(li_value) > 1: + dict_md[key_main][key + "_units"] = li_value[1] + return dict_md + + class DigitalSurfHandler(object): """Class to read Digital Surf MountainsMap files. @@ -1657,7 +1723,7 @@ def _build_original_metadata( # Check if it is the case and append it to original metadata if yes valid_comment = self._check_comments(a["_60_Comment"], "$", "=") if valid_comment: - parsedict = self._MS_parse(a["_60_Comment"], "$", "=") + parsedict = parse_metadata(a["_60_Comment"], "$", "=") parsedict = {k.lstrip("_"): m for k, m in parsedict.items()} original_metadata_dict[key].update({"Parsed": parsedict}) @@ -1850,66 +1916,6 @@ def _check_comments(commentsstr, prefix, delimiter): # return falsiness of the string. return valid - @staticmethod - def _MS_parse(str_ms, prefix, delimiter): - """Parses a string containing metadata information. The string can be - read from the comment section of a .sur file, or, alternatively, a file - containing them with a similar formatting. - - Parameters - ---------- - str_ms: string containing metadata - prefix: string (or char) character assumed to start each line. - '$' if a .sur file. - delimiter: string that delimits the keyword from value. always '=' - - Returns - ------- - dict_ms: dictionnary in the correct hyperspy metadata format - - """ - # dict_ms is created as an empty dictionnary - dict_ms = {} - # Title lines start with an underscore - titlestart = "{:s}_".format(prefix) - - key_main = None - - for line in str_ms.splitlines(): - # Here we ignore any empty line or line starting with @@ - ignore = False - if not line.strip() or line.startswith("@@"): - ignore = True - # If the line must not be ignored - if not ignore: - if line.startswith(titlestart): - # We strip keys from whitespace at the end and beginning - key_main = line[len(titlestart) :].strip() - dict_ms[key_main] = {} - elif line.startswith(prefix): - if key_main is None: - key_main = "UNTITLED" - dict_ms[key_main] = {} - key, *li_value = line.split(delimiter) - # Key is also stripped from beginning or end whitespace - key = key[len(prefix) :].strip() - str_value = li_value[0] if len(li_value) > 0 else "" - # remove whitespace at the beginning of value - str_value = str_value.strip() - li_value = str_value.split(" ") - try: - if key == "Grating": - dict_ms[key_main][key] = li_value[ - 0 - ] # we don't want to eval this one - else: - dict_ms[key_main][key] = ast.literal_eval(li_value[0]) - except Exception: - dict_ms[key_main][key] = li_value[0] - if len(li_value) > 1: - dict_ms[key_main][key + "_units"] = li_value[1] - return dict_ms - @staticmethod def _get_comment_dict( original_metadata: dict, method: str = "auto", custom: dict = {} diff --git a/rsciio/tests/test_import.py b/rsciio/tests/test_import.py index 770014183..53b11358c 100644 --- a/rsciio/tests/test_import.py +++ b/rsciio/tests/test_import.py @@ -140,6 +140,11 @@ def test_dir_plugins(plugin): "parse_exposures", "parse_timestamps", ] + elif plugin["name"] == "DigitalSurf": + assert dir(plugin_module) == [ + "file_reader", + "file_writer", + "parse_metadata"] elif plugin["writes"] is False: assert dir(plugin_module) == ["file_reader"] else: From 3d842692f8d6bba490d8d92313761985e85343ee Mon Sep 17 00:00:00 2001 From: Nicolas Tappy Date: Thu, 4 Jul 2024 10:28:24 +0200 Subject: [PATCH 18/21] docstring updates --- rsciio/digitalsurf/_api.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index 7689bd474..86a255938 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -68,18 +68,18 @@ def parse_metadata(cmt : str, prefix : str = '$', delimiter : str = '=') -> dict ---------- cmt : str Str containing contents of a digitalsurf file "comment" field. - prefix : str, default = '$' - Prefix character, must be present at the start of each line. - Otherwise, the line is ignored. '$' for digitalsurf files, - typically '' when parsing from text files. - delimiter : string, default = '=' + prefix : str + Prefix character, must be present at the start of each line, + otherwise the line is ignored. ``"$"`` for digitalsurf files, + typically an empty string (``""``) when parsing from text files. + Default is ``"$"``. + delimiter : str Character that delimit key-value pairs in digitalsurf comment. - Usually, '=' is used. - + Default is ``"="``. Returns ------- - dict_md : dict - Nested dictionnary containing comment contents. + dict + Nested dictionnary of the metadata. """ # dict_ms is created as an empty dictionnary From 3319edd6111d576a20bc05303271c95a435efd87 Mon Sep 17 00:00:00 2001 From: Nicolas Tappy Date: Thu, 4 Jul 2024 10:36:17 +0200 Subject: [PATCH 19/21] More linting --- rsciio/digitalsurf/_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index 86a255938..fc751b4b6 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -76,11 +76,11 @@ def parse_metadata(cmt : str, prefix : str = '$', delimiter : str = '=') -> dict delimiter : str Character that delimit key-value pairs in digitalsurf comment. Default is ``"="``. + Returns ------- dict Nested dictionnary of the metadata. - """ # dict_ms is created as an empty dictionnary dict_md = {} From 53341dd1d0f425f16ae69b7548cf86b2e0b50a43 Mon Sep 17 00:00:00 2001 From: Eric Prestat Date: Thu, 4 Jul 2024 09:48:26 +0100 Subject: [PATCH 20/21] ruff fixes --- rsciio/digitalsurf/_api.py | 16 ++++----- rsciio/tests/test_digitalsurf.py | 61 +++++++++++++++----------------- rsciio/tests/test_import.py | 5 +-- 3 files changed, 37 insertions(+), 45 deletions(-) diff --git a/rsciio/digitalsurf/_api.py b/rsciio/digitalsurf/_api.py index fc751b4b6..cdc78e718 100644 --- a/rsciio/digitalsurf/_api.py +++ b/rsciio/digitalsurf/_api.py @@ -59,7 +59,7 @@ _logger = logging.getLogger(__name__) -def parse_metadata(cmt : str, prefix : str = '$', delimiter : str = '=') -> dict: +def parse_metadata(cmt: str, prefix: str = "$", delimiter: str = "=") -> dict: """ Parse metadata from the comment field of a digitalsurf file, or any other str in similar formatting. Return it as a hyperspy-compatible nested dict. @@ -70,11 +70,11 @@ def parse_metadata(cmt : str, prefix : str = '$', delimiter : str = '=') -> dict Str containing contents of a digitalsurf file "comment" field. prefix : str Prefix character, must be present at the start of each line, - otherwise the line is ignored. ``"$"`` for digitalsurf files, + otherwise the line is ignored. ``"$"`` for digitalsurf files, typically an empty string (``""``) when parsing from text files. Default is ``"$"``. delimiter : str - Character that delimit key-value pairs in digitalsurf comment. + Character that delimit key-value pairs in digitalsurf comment. Default is ``"="``. Returns @@ -919,7 +919,7 @@ def _norm_data(self, data: np.ndarray, is_special: bool): raise MountainsMapFileError( "digitalsurf file formats do not support export of complex data. Convert data to real-value representations before before export" ) - elif data_type == bool: + elif np.issubdtype(data_type, bool): pointsize = 16 Zmin = 0 Zmax = 1 @@ -1157,10 +1157,8 @@ def _build_workdict( comment_len = len(f"{comment}".encode("latin-1")) if comment_len >= 2**15: - warnings.warn( - f"Comment exceeding max length of 32.0 kB and will be cropped" - ) - comment_len = np.int16(2**15-1) + warnings.warn("Comment exceeding max length of 32.0 kB and will be cropped") + comment_len = np.int16(2**15 - 1) self._work_dict["_50_Comment_size"]["value"] = comment_len @@ -1169,7 +1167,7 @@ def _build_workdict( warnings.warn( "Private size exceeding max length of 32.0 kB and will be cropped" ) - privatesize = np.uint16(2**15-1) + privatesize = np.uint16(2**15 - 1) self._work_dict["_51_Private_size"]["value"] = privatesize diff --git a/rsciio/tests/test_digitalsurf.py b/rsciio/tests/test_digitalsurf.py index 7a4ab0b16..d50d7e134 100644 --- a/rsciio/tests/test_digitalsurf.py +++ b/rsciio/tests/test_digitalsurf.py @@ -141,7 +141,7 @@ def test_invalid_data(): - dsh = DigitalSurfHandler('untitled.sur') + dsh = DigitalSurfHandler("untitled.sur") with pytest.raises(MountainsMapFileError): dsh._Object_type = "INVALID" @@ -435,7 +435,7 @@ def test_load_surface(): def test_choose_signal_type(): - reader = DigitalSurfHandler('untitled.sur') + reader = DigitalSurfHandler("untitled.sur") # Empty dict should not raise error but return empty string mock_dict = {} @@ -659,7 +659,7 @@ def test_split(test_tuple): @pytest.mark.parametrize("special", [True, False]) @pytest.mark.parametrize("fullscale", [True, False]) def test_norm_int_data(dtype, special, fullscale): - dh = DigitalSurfHandler('untitled.sur') + dh = DigitalSurfHandler("untitled.sur") if fullscale: minint = np.iinfo(dtype).min @@ -688,7 +688,6 @@ def test_norm_int_data(dtype, special, fullscale): assert Zmax == maxval - @pytest.mark.parametrize("transpose", [True, False]) def test_writetestobjects_rgb(tmp_path, transpose): # This is just a different test function because the @@ -730,7 +729,6 @@ def test_writetestobjects_rgb(tmp_path, transpose): assert np.allclose(ax.axis, ax3.axis) - @pytest.mark.parametrize( "dtype", [np.int8, np.int16, np.int32, np.float64, np.uint8, np.uint16] ) @@ -746,7 +744,6 @@ def test_writegeneric_validtypes(tmp_path, dtype, compressed): assert np.allclose(gen2.data, gen.data) - @pytest.mark.parametrize("compressed", [True, False]) def test_writegeneric_nans(tmp_path, compressed): """This test establishes the capability of saving a generic signal @@ -764,7 +761,6 @@ def test_writegeneric_nans(tmp_path, compressed): assert np.allclose(gen2.data, gen.data, equal_nan=True) - def test_writegeneric_transposedprofile(tmp_path): """This test checks the expected behaviour that a transposed profile gets correctly saved but a warning is raised.""" @@ -780,14 +776,14 @@ def test_writegeneric_transposedprofile(tmp_path): assert np.allclose(gen2.data, gen.data) -def test_writegeneric_transposedsurface(tmp_path,): +def test_writegeneric_transposedsurface( + tmp_path, +): """This test establishes the possibility of saving RGBA surface series while discarding A channel and warning""" size = (44, 58) - - gen = hs.signals.Signal2D( - np.random.random(size=size)*1e4 - ) + + gen = hs.signals.Signal2D(np.random.random(size=size) * 1e4) gen = gen.T fgen = tmp_path.joinpath("test.sur") @@ -814,7 +810,6 @@ def test_writegeneric_failingtypes(tmp_path, dtype): gen.save(fgen, overwrite=True) - def test_writegeneric_failingformat(tmp_path): gen = hs.signals.Signal1D(np.zeros((3, 4, 5, 6))) fgen = tmp_path.joinpath("test.sur") @@ -961,43 +956,45 @@ def test_writegeneric_surfaceseries(tmp_path, dtype, compressed): def test_writegeneric_datetime(tmp_path): - gen = hs.signals.Signal1D(np.random.rand(87)) - gen.metadata.General.date = '2024-06-30' - gen.metadata.General.time = '13:29:10' - + gen.metadata.General.date = "2024-06-30" + gen.metadata.General.time = "13:29:10" + fgen = tmp_path.joinpath("test.pro") gen.save(fgen) gen2 = hs.load(fgen) assert gen2.original_metadata.Object_0_Channel_0.Header.H40_Seconds == 10 assert gen2.original_metadata.Object_0_Channel_0.Header.H41_Minutes == 29 - assert gen2.original_metadata.Object_0_Channel_0.Header.H42_Hours == 13 - assert gen2.original_metadata.Object_0_Channel_0.Header.H43_Day == 30 - assert gen2.original_metadata.Object_0_Channel_0.Header.H44_Month == 6 - assert gen2.original_metadata.Object_0_Channel_0.Header.H45_Year == 2024 + assert gen2.original_metadata.Object_0_Channel_0.Header.H42_Hours == 13 + assert gen2.original_metadata.Object_0_Channel_0.Header.H43_Day == 30 + assert gen2.original_metadata.Object_0_Channel_0.Header.H44_Month == 6 + assert gen2.original_metadata.Object_0_Channel_0.Header.H45_Year == 2024 assert gen2.original_metadata.Object_0_Channel_0.Header.H46_Day_of_week == 6 def test_writegeneric_comments(tmp_path): - gen = hs.signals.Signal1D(np.random.rand(87)) fgen = tmp_path.joinpath("test.pro") - res = "".join(["a" for i in range(2**15+2)]) - cmt = {'comment': res} + res = "".join(["a" for i in range(2**15 + 2)]) + cmt = {"comment": res} with pytest.raises(MountainsMapFileError): - gen.save(fgen,set_comments='somethinginvalid') + gen.save(fgen, set_comments="somethinginvalid") with pytest.warns(): - gen.save(fgen,set_comments='custom',comments=cmt) - + gen.save(fgen, set_comments="custom", comments=cmt) + gen2 = hs.load(fgen) - assert gen2.original_metadata.Object_0_Channel_0.Parsed.UNTITLED.comment.startswith('a') - assert len(gen2.original_metadata.Object_0_Channel_0.Parsed.UNTITLED.comment) < 2**15-1 + assert gen2.original_metadata.Object_0_Channel_0.Parsed.UNTITLED.comment.startswith( + "a" + ) + assert ( + len(gen2.original_metadata.Object_0_Channel_0.Parsed.UNTITLED.comment) + < 2**15 - 1 + ) - priv = res.encode('latin-1') + priv = res.encode("latin-1") with pytest.warns(): - gen.save(fgen,private_zone=priv,overwrite=True) - + gen.save(fgen, private_zone=priv, overwrite=True) diff --git a/rsciio/tests/test_import.py b/rsciio/tests/test_import.py index 53b11358c..dbe849fb1 100644 --- a/rsciio/tests/test_import.py +++ b/rsciio/tests/test_import.py @@ -141,10 +141,7 @@ def test_dir_plugins(plugin): "parse_timestamps", ] elif plugin["name"] == "DigitalSurf": - assert dir(plugin_module) == [ - "file_reader", - "file_writer", - "parse_metadata"] + assert dir(plugin_module) == ["file_reader", "file_writer", "parse_metadata"] elif plugin["writes"] is False: assert dir(plugin_module) == ["file_reader"] else: From 1f5e4c55d569102eeadd6075aaa0a8e1a8cae61f Mon Sep 17 00:00:00 2001 From: Eric Prestat Date: Thu, 4 Jul 2024 10:07:56 +0100 Subject: [PATCH 21/21] Improve changelog entry --- upcoming_changes/280.enhancements.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/upcoming_changes/280.enhancements.rst b/upcoming_changes/280.enhancements.rst index bd637c83b..1af831919 100644 --- a/upcoming_changes/280.enhancements.rst +++ b/upcoming_changes/280.enhancements.rst @@ -1 +1,5 @@ -:ref:`DigitalSurf surfaces `: Add file_writer support, add series of RGB images / surfaces support. \ No newline at end of file +:ref:`DigitalSurf surfaces `: + +- add support for saving file - see :func:`~.digitalsurf.file_writer` +- add the :func:`~.digitalsurf.parse_metadata` function to parse metadata from ``sur`` file +- add series of RGB images / surfaces support. \ No newline at end of file