diff --git a/src/bpm_data_combiner/app/bdata.py b/src/bpm_data_combiner/app/bdata.py new file mode 100644 index 0000000..b6d0405 --- /dev/null +++ b/src/bpm_data_combiner/app/bdata.py @@ -0,0 +1,81 @@ +from typing import Sequence, Mapping +import numpy as np + +from ..data_model.bpm_data_collection import BPMDataCollectionStats + +# is this the correct way to convert the data ? +scale_bits = 2 ** 15 / 10 + +# scale rms so that the slow orbit feedback accepts the data +# factor 100 seems to be enough. +# I think I should add some check that the noise is large enough +scale_rms = 20 + +nm2mm = 1e-6 + + +def convert(data: Sequence[float], scale_axis: float = 1.0): + """ + Todo: + include conversion to np.int16 at this stage? + """ + return (np.asarray(data) * (nm2mm * scale_bits * scale_axis)).astype(np.int16) + + +def convert_noise(data, scale_axis: float = 1): + noise = convert(data.std, scale_axis=scale_axis * scale_rms) + # at least one bit has to be set ... + # otherwise it will not consider it as noise + noise[data.n_readings > 0] = np.clip(noise, 1, None)[data.n_readings > 0] + # so sofb Orbit will consider it as not existing + noise[data.n_readings <= 0] = 0 + return noise + + +def stat_data_to_bdata( + data: BPMDataCollectionStats, + *, + device_index: Mapping[str, int], + n_bpms: int, + scale_x_axis: float +): + """ + device_names: need to contain empty ones too, e.g. as None empty or any + other unique place holder + + Todo: + do I need to map data to positions again? + How is stat handling it + """ + n_entries = len(data.x.values) + if n_entries > n_bpms: + raise ValueError("number of bpms %s too many. max %s", n_entries, n_bpms) + + bdata = np.empty([8, n_bpms], dtype=np.int16) + bdata.fill(0.0) + + indices = [device_index[name] for name in data.names] + # flipping coordinate system to get the dispersion on the correct side + # todo: check at which state this should be done + # fmt:off + bdata[0, indices] = - convert(data.x.values, scale_axis=scale_x_axis) + bdata[1, indices] = convert(data.y.values) + # fmt:on + # intensity z 1.3 + # bdata[2] = 3 + # intensityz z 1.3 + # bdata[3] = 3 + # AGC status needs to be three for valid data + # todo: find out what to set if only one plane is valid? + bdata[4, indices] = np.where( + (data.x.n_readings > 0) | (data.y.n_readings > 0), 3, 0 + ) + bdata[4, -1] = 2 + + bdata[6, indices] = convert_noise(data.x, scale_axis=scale_x_axis) + bdata[7, indices] = convert_noise(data.y) + + return bdata + + +__all__ = ["stat_data_to_bdata"] diff --git a/src/bpm_data_combiner/app/controller.py b/src/bpm_data_combiner/app/controller.py index 68edd61..046326b 100644 --- a/src/bpm_data_combiner/app/controller.py +++ b/src/bpm_data_combiner/app/controller.py @@ -1,18 +1,25 @@ from enum import Enum import logging -from typing import Sequence, Union, Tuple +from typing import Sequence, Union -import numpy as np from collector import Collector, CollectionItemInterface from ..bl.accumulator import Accumulator from ..interfaces.controller import ControllerInterface -from ..monitor_devices import MonitorDevicesStatus, MonitorDeviceSynchronisation, StatusField -from ..post_processor.combine import collection_to_bpm_data_collection, accumulated_collections_to_array +from ..monitor_devices import ( + MonitorDevicesStatus, + MonitorDeviceSynchronisation, + StatusField, +) +from ..post_processor.combine import ( + collection_to_bpm_data_collection, + accumulated_collections_to_array, +) from ..post_processor.handle_active_planes import pass_data_for_active_planes from ..post_processor.statistics import compute_mean_weights_for_planes +from .bdata import stat_data_to_bdata from .config import Config from .view import Views @@ -92,7 +99,9 @@ def _update(self, *, cmd, dev_name, tpro, **kwargs): else: raise AssertionError(f"plane {plane} unknown") elif cmd == ValidCommands.sync_stat: - return self.dev_status(dev_name, StatusField.synchronised, kwargs["sync_stat"]) + return self.dev_status( + dev_name, StatusField.synchronised, kwargs["sync_stat"] + ) elif cmd == ValidCommands.known_device_names: return self.set_device_names(device_names=kwargs["known_device_names"]) elif cmd == ValidCommands.cfg_comp_median: @@ -112,8 +121,7 @@ def new_value(self, dev_name: str, value: Sequence[int]): cnt, x, y = value collection = self.collector.new_item( pass_data_for_active_planes( - cnt, x, y, - device_status=self.monitor_devices.devices_status[dev_name] + cnt, x, y, device_status=self.monitor_devices.devices_status[dev_name] ) ) if collection.ready: @@ -133,12 +141,20 @@ def dev_status( def periodic_trigger(self): """Present new (averaged) bpm data on periodic trigger""" - data = accumulated_collections_to_array(self.accumulator.get(), dev_names_index=self.dev_name_index) + data = accumulated_collections_to_array( + self.accumulator.get(), dev_names_index=self.dev_name_index + ) stat_data = compute_mean_weights_for_planes(data) self.views.periodic_data.update(stat_data) logger.debug("pushing stat data to bdata_view") #: todo ... need to get kwargs from config - self.views.bdata.update(stat_data, n_bpms=32, scale_x_axis=1./1.4671) + bdata = stat_data_to_bdata( + stat_data, + device_index=self.dev_name_index, + n_bpms=32, + scale_x_axis=1.0 / 1.4671, + ) + self.views.bdata.update(bdata.ravel()) logger.debug("pushing stat data to bdata_view done") def _on_new_collection_ready(self, col: CollectionItemInterface): @@ -166,4 +182,3 @@ def compute_show_median(self): self.views.monitor_device_sync.update( *self.monitor_device_synchronisation.offset_from_median() ) - diff --git a/src/bpm_data_combiner/app/view.py b/src/bpm_data_combiner/app/view.py index c71b17d..98064cb 100644 --- a/src/bpm_data_combiner/app/view.py +++ b/src/bpm_data_combiner/app/view.py @@ -4,9 +4,9 @@ from ..data_model.bpm_data_collection import BPMDataCollection, BPMDataCollectionStats import pydev -pydev_supports_sequence = True - import sys + +pydev_supports_sequence = True stream = sys.stdout @@ -18,12 +18,14 @@ class ViewBPMMonitoring: def __init__(self, prefix: str): self.prefix = prefix - def update(self, *, - names: Sequence[str], - active: Sequence[bool], - synchronised: Sequence[bool], - usable: Sequence[bool], - ): + def update( + self, + *, + names: Sequence[str], + active: Sequence[bool], + synchronised: Sequence[bool], + usable: Sequence[bool], + ): # names = string_array_to_bytes(names) names = list(names) label = self.prefix + ":" + "names" @@ -50,7 +52,6 @@ def update(self, *, pydev.iointr(label, usable.tolist()) - class ViewBPMDataCollection: def __init__(self, prefix: str): self.prefix = prefix @@ -134,53 +135,16 @@ class ViewBPMDataAsBData: def __init__(self, prefix: str): self.prefix = prefix - def update(self, data: BPMDataCollectionStats, *, n_bpms, scale_x_axis): - """prepare data as expected - """ - logger.debug("view bdata: publishing data %s", data) - nm2mm = 1e-6 - n_entries = len(data.x.values) - if n_entries > n_bpms: - raise ValueError("number of bpms %s too many. max %s", n_entries, n_bpms) - - bdata = np.empty([8, n_bpms], dtype=float) - bdata.fill(0.0) - # is this the correct way to convert the data ? - scale_bits = 2**15/10 - - # flipping coordinate system to get the dispersion on the correct side - # todo: check at which state this should be done - # fmt:off - def convert(data, scale_axis = 1.0): - return data * (nm2mm * scale_bits * scale_axis) - bdata[0, :n_entries] = - convert(data.x.values, scale_axis=scale_x_axis) - bdata[1, :n_entries] = convert(data.y.values) - # fmt:on - # intensity z 1.3 - # bdata[2] = 3 - # intensityz z 1.3 - # bdata[3] = 3 - # AGC status needs to be three for valid data - # todo: find out what to set if only one plane is valid? - bdata[4,:n_entries] = np.where((data.x.n_readings > 0) | (data.y.n_readings > 0), 3, 0) - bdata[4, -1] = 2 - # scale rms so that the slow orbit feedback accepts the data - # factor 100 seems to be enough. - # I think I should add some check that the noise is large enough - scale_rms = 20 - def convert_noise(data, scale_axis = 1): - noise = convert(data.std, scale_axis = scale_axis * scale_rms) - noise[data.n_readings > 0] = np.clip(noise, 1, None)[data.n_readings>0] - # so sofb Orbit will consider it as not existing - noise[data.n_readings<= 0] = 0 - return noise - bdata[6, :n_entries] = convert_noise(data.x, scale_axis=scale_x_axis) - bdata[7, :n_entries] = convert_noise(data.y) + def update(self, bdata: Sequence[int]): + """prepare data as expected""" + logger.debug("view bdata: publishing data %s", bdata) label = f"{self.prefix}" - bdata = [float(v) for v in bdata.ravel().astype(np.int16)] + bdata = np.asarray(bdata).astype(np.int16) + if not pydev_supports_sequence: + bdata = [int(v) for v in bdata] pydev.iointr(label, bdata) - logger.debug("view bdata: label %s, %d n_entries", label, n_entries) + logger.debug("view bdata: label %s, %d n_entries", label, len(bdata)) class ViewStringBuffer: @@ -209,8 +173,8 @@ def __init__(self, prefix: str): def update(self, median: int, offset_from_median: Sequence[np.int32]): # stream.write(f"updating {self.prefix} with median {median}\n") # stream.flush() - pydev.iointr(self.prefix + ':median', median) - pydev.iointr(self.prefix + ':offset', list(offset_from_median)) + pydev.iointr(self.prefix + ":median", median) + pydev.iointr(self.prefix + ":offset", list(offset_from_median)) class ViewConfiguration: @@ -220,7 +184,7 @@ def __init__(self, prefix: str): def update(self, median_computation: bool): stream.write(f"updating {self.prefix} with median {median_computation}\n") stream.flush() - pydev.iointr(self.prefix + ':comp:median', bool(median_computation)) + pydev.iointr(self.prefix + ":comp:median", bool(median_computation)) class Views: diff --git a/src/bpm_data_combiner/monitor_devices/interfaces/monitor_devices_status.py b/src/bpm_data_combiner/monitor_devices/interfaces/monitor_devices_status.py index ca7b83d..0e7d8af 100644 --- a/src/bpm_data_combiner/monitor_devices/interfaces/monitor_devices_status.py +++ b/src/bpm_data_combiner/monitor_devices/interfaces/monitor_devices_status.py @@ -43,5 +43,8 @@ def update(self, dev_name : str, field: StatusField, info: Union[bool,Synchronis @abstractmethod def get_device_names(self) -> Sequence[str]: """names of currently usable devices + + Todo: should one provide device name index instead? + does it belong here """ pass diff --git a/src/bpm_data_combiner/post_processor/statistics.py b/src/bpm_data_combiner/post_processor/statistics.py index 214568d..8c6972c 100644 --- a/src/bpm_data_combiner/post_processor/statistics.py +++ b/src/bpm_data_combiner/post_processor/statistics.py @@ -1,10 +1,8 @@ import numpy as np -from numpy import ma -# which numpy version supports that ? -# from numpy.typing import ArrayLike -from bpm_data_combiner.data_model.bpm_data_accumulation import BPMDataAccumulationForPlane, BPMDataAccumulation -from bpm_data_combiner.data_model.bpm_data_collection import BPMDataCollectionStatsPlane, BPMDataCollectionStats +from ..data_model.bpm_data_accumulation import BPMDataAccumulationForPlane, BPMDataAccumulation +from ..data_model.bpm_data_collection import BPMDataCollectionStatsPlane, BPMDataCollectionStats + def compute_weights_scaled(values#: ArrayLike, ,*, n_readings: int): # -> ArrayLike: diff --git a/src/tests/test_statistics.py b/src/tests/test_statistics.py new file mode 100644 index 0000000..20f648c --- /dev/null +++ b/src/tests/test_statistics.py @@ -0,0 +1,16 @@ +import numpy as np + +from bpm_data_combiner.data_model.bpm_data_accumulation import BPMDataAccumulationForPlane +from bpm_data_combiner.post_processor.statistics import compute_mean_weight + + +def test_compute_mean_weight_single_data(): + p = BPMDataAccumulationForPlane( + values=np.array([[1, 2, 3, 4]]), + valid=np.array([[True]*4]) + ) + print(p) + r = compute_mean_weight(p) + r.values == [1, 2, 3, 4] + assert (r.n_readings == 1).all() + assert (r.std == 0).all() \ No newline at end of file