Skip to content

Commit

Permalink
Peaks and events (#285)
Browse files Browse the repository at this point in the history
>> Peak processing

Restructured plugins
Fixed area fraction top calculation
Peak classification works well, moved to peak_basics (to tune with options)
Introduced gain correction (to tune with options, for now)
Introduced position reconstruction with center of gravity

>> Events processing

Restructured plugins
Added simple S2 area correction for electron lifetime (to tune with options, for now)
Added positions at event level
Added new peak info at event level: waveform and area per channel

>> Extra

A new function to see default options per plugin (st.get_config_defaults)
More plugins registered in the context (and tested!)

---------

Co-authored-by: acolijn <[email protected]>
Co-authored-by: tobiasdenhollander <[email protected]>
  • Loading branch information
3 people authored Dec 7, 2023
1 parent 64d0378 commit 39ade32
Show file tree
Hide file tree
Showing 19 changed files with 422 additions and 403 deletions.
3 changes: 2 additions & 1 deletion .readthedocs.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# Config for writing documentation
# Fails if not it master branch

# Required
version: 2
Expand Down Expand Up @@ -26,4 +27,4 @@ python:

formats:
- pdf
- epub
- epub
29 changes: 29 additions & 0 deletions amstrax/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import numpy as np
import strax


export, __all__ = strax.exporter()
__all__ += ['amstrax_dir',
'to_pe',
Expand Down Expand Up @@ -119,6 +120,34 @@ def print_versions(
return info
return df

@export
def get_config_defaults(st, exclude=['raw_records', 'records'], include=[]):
configs = []
for plugin in st._plugin_class_registry.values():
if len(include) > 0:
skip = True
for t in include:
if t in plugin.provides:
skip = False
if skip:
continue

# if raw_records or records in plugin.provides:
skip = False
for t in exclude:
if t in plugin.provides:
print(f"{plugin.__name__} {t} skipped")
skip = True
if not skip:
takes_config = dict(plugin.takes_config)
for name, config in plugin.takes_config.items():
configs.append((name, config.default))
configs = set(configs)

df = pd.DataFrame(configs, columns=['name', 'default'])

return df


def _version_info_for_module(module_name, include_git):
try:
Expand Down
21 changes: 11 additions & 10 deletions amstrax/contexts.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,20 +24,21 @@
register_all=[],
register=[ax.DAQReader,
ax.PulseProcessing,
# Peaks
ax.Peaks,
ax.PeakClassification,
ax.PeakBasics,
# ax.RecordsLED,
# ax.LEDCalibration,
ax.PeakClassification,
ax.PeakProximity,
ax.PeakPositions,
# Events
ax.Events,
# ax.CorrectedAreas,
# ax.EventPositions,
ax.EventBasics,
# ax.EventInfo,
# ax.EnergyEstimates,
ax.EventBasics,
ax.EventPositions,
ax.CorrectedAreas,
ax.EventInfo,
ax.EventWaveform,
ax.EventAreaPerChannel,
# LED plugins not default
# ax.RecordsLED,
# ax.LEDCalibration,
],
store_run_fields=(
'name', 'number',
Expand Down
17 changes: 11 additions & 6 deletions amstrax/plugins/events/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,21 @@
from . import event_basics
from .event_basics import *

from . import corrected_areas
from .corrected_areas import *

from . import energy_estimates
from .energy_estimates import *

from . import event_positions
from .event_positions import *

from . import corrected_areas
from .corrected_areas import *

from . import event_info
from .event_info import *

from . import event_area_per_channel
from .event_area_per_channel import *

from . import event_waveform
from .event_waveform import *




90 changes: 33 additions & 57 deletions amstrax/plugins/events/corrected_areas.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,22 @@
import numba
from typing import Tuple

import numpy as np
import strax
import amstrax

export, __all__ = strax.exporter()

@export
@strax.takes_config(
strax.Option(
"elife",
default=30000,
help="electron lifetime in [ns] (should be implemented in db soon)",
),
)
class CorrectedAreas(strax.Plugin):
"""
Plugin which applies light collection efficiency maps and electron
life time to the data.
"""Plugin which applies light collection efficiency maps and electron life time to the data.
Computes the cS1/cS2 for the main/alternative S1/S2 as well as the
corrected life time.
Note:
Expand All @@ -16,77 +25,44 @@ class CorrectedAreas(strax.Plugin):
There are now 3 components of cS2s: cs2_top, cS2_bottom and cs2.
cs2_top and cs2_bottom are corrected by the corresponding maps,
and cs2 is the sum of the two.
"""
__version__ = '0.5.1'

depends_on = ['event_basics', 'event_positions']
__version__ = "0.5.1"

depends_on = ("event_basics", "event_positions")

def infer_dtype(self):
dtype = []
dtype += strax.time_fields

for peak_type, peak_name in zip(['', 'alt_'], ['main', 'alternate']):
# Only apply
for peak_type, peak_name in zip(["", "alt_"], ["main", "alternate"]):
# Only apply
dtype += [
(f'{peak_type}cs1', np.float32, f'Corrected area of {peak_name} S1 [PE]'),
(
f'{peak_type}cs1_wo_timecorr', np.float32,
f'Corrected area of {peak_name} S1 (before LY correction) [PE]',
),
(f"{peak_type}cs1", np.float32, f"Corrected area of {peak_name} S1 [PE]"),
]
names = ['_wo_timecorr', '_wo_picorr', '_wo_elifecorr', '']
descriptions = ['S2 xy', 'SEG/EE', 'photon ionization', 'elife']
for i, name in enumerate(names):
if i == len(names) - 1:
description = ''
elif i == 0:
# special treatment for wo_timecorr, apply elife correction
description = ' (before ' + ' + '.join(descriptions[i + 1:-1])
description += ', after ' + ' + '.join(
descriptions[:i + 1] + descriptions[-1:]) + ')'
else:
description = ' (before ' + ' + '.join(descriptions[i + 1:])
description += ', after ' + ' + '.join(descriptions[:i + 1]) + ')'
dtype += [
(
f'{peak_type}cs2{name}', np.float32,
f'Corrected area of {peak_name} S2{description} [PE]',
),
(
f'{peak_type}cs2_area_fraction_top{name}', np.float32,
f'Fraction of area seen by the top PMT array for corrected '
f'{peak_name} S2{description}',
),
]
dtype += [
(f"{peak_type}cs2", np.float32, f"Corrected area of {peak_name} S2 [PE]"),
]

return dtype


def compute(self, events):
result = np.zeros(len(events), self.dtype)
result['time'] = events['time']
result['endtime'] = events['endtime']
result["time"] = events["time"]
result["endtime"] = events["endtime"]

# S1 corrections depend on the actual corrected event position.
# We use this also for the alternate S1; for e.g. Kr this is
# fine as the S1 correction varies slowly.
event_positions = np.vstack([events['x'], events['y'], events['z']]).T
# event_positions = np.vstack([events["x"], events["y"], events["z"]]).T

for peak_type in ["", "alt_"]:
result[f"{peak_type}cs1"] = (
result[f"{peak_type}cs1_wo_timecorr"] / 1) #self.rel_light_yield)
elife = self.config["elife"]

# now can start doing corrections
for peak_type in ["", "alt_"]:
# S2(x,y) corrections use the observed S2 positions
s2_positions = np.vstack([events[f'{peak_type}s2_x'], events[f'{peak_type}s2_y']]).T

# collect electron lifetime correction
# for electron lifetime corrections to the S2s,
# use drift time computed using the main S1.
el_string = peak_type + "s2_interaction_" if peak_type == "alt_" else peak_type
elife_correction = 1 #np.exp(events[f'{el_string}drift_time'] / self.elife)

# apply electron lifetime correction
result[f"{peak_type}cs2"] = events[f"{peak_type}s2_area"] * elife_correction


result[f"{peak_type}cs1"] = events[f"{peak_type}s1_area"]
result[f"{peak_type}cs2"] = events[f"{peak_type}s2_area"]*np.exp(events["drift_time"]/elife)

return result
#
36 changes: 0 additions & 36 deletions amstrax/plugins/events/energy_estimates.py

This file was deleted.

100 changes: 100 additions & 0 deletions amstrax/plugins/events/event_area_per_channel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
from immutabledict import immutabledict
import numpy as np
import strax
import amstrax

export, __all__ = strax.exporter()


@export
@strax.takes_config(
strax.Option(
"channel_map",
track=False,
type=immutabledict,
help="immutabledict mapping subdetector to (min, max) "
),
)
class EventAreaPerChannel(strax.Plugin):
"""Simple plugin that provides area per channel for main and alternative S1/S2 in the event."""

depends_on = ("event_basics", "peaks")
provides = ("event_area_per_channel", "event_n_channel")
data_kind = immutabledict(zip(provides, ("events", "events")))
__version__ = "0.1.1"

compressor = "zstd"
save_when = immutabledict({
"event_area_per_channel": strax.SaveWhen.EXPLICIT,
"event_n_channel": strax.SaveWhen.ALWAYS,
})

def infer_dtype(self):
# setting data type from peak dtype
pfields_ = self.deps["peaks"].dtype_for("peaks").fields
# populating data type
infoline = {
"s1": "main S1",
"s2": "main S2",
"alt_s1": "alternative S1",
"alt_s2": "alternative S2",
}
dtype = []
# populating APC
ptypes = ["s1", "s2", "alt_s1", "alt_s2"]
for type_ in ptypes:
dtype += [
(
(f"Area per channel for {infoline[type_]}", f"{type_}_area_per_channel"),
pfields_["area_per_channel"][0],
)
]
dtype += [
(
(f"Length of the interval in samples for {infoline[type_]}", f"{type_}_length"),
pfields_["length"][0],
)
]
dtype += [
(
(f"Width of one sample for {infoline[type_]} [ns]", f"{type_}_dt"),
pfields_["dt"][0],
)
]
# populating S1 n channel properties
n_channel_dtype = [
(("Main S1 count of contributing PMTs", "s1_n_channels"), np.int16),
]
return {
"event_area_per_channel": dtype + n_channel_dtype + strax.time_fields,
"event_n_channel": n_channel_dtype + strax.time_fields,
}

def compute(self, events, peaks):
area_per_channel = np.zeros(len(events), self.dtype["event_area_per_channel"])
area_per_channel["time"] = events["time"]
area_per_channel["endtime"] = strax.endtime(events)
n_channel = np.zeros(len(events), self.dtype["event_n_channel"])
n_channel["time"] = events["time"]
n_channel["endtime"] = strax.endtime(events)

split_peaks = strax.split_by_containment(peaks, events)
for event_i, (event, sp) in enumerate(zip(events, split_peaks)):
for type_ in ["s1", "s2", "alt_s1", "alt_s2"]:
type_index = event[f"{type_}_index"]
if type_index != -1:
type_area_per_channel = sp["area_per_channel"][type_index]
area_per_channel[f"{type_}_area_per_channel"][event_i] = type_area_per_channel
area_per_channel[f"{type_}_length"][event_i] = sp["length"][type_index]
area_per_channel[f"{type_}_dt"][event_i] = sp["dt"][type_index]
if type_ == "s1":
area_per_channel["s1_n_channels"][event_i] = (
type_area_per_channel > 0
).sum()
for field in ["s1_n_channels", ]:
n_channel[field] = area_per_channel[field]
result = {
"event_area_per_channel": area_per_channel,
"event_n_channel": n_channel,
}
return result
Loading

0 comments on commit 39ade32

Please sign in to comment.