Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

reduce codefactor #590

Merged
merged 5 commits into from
Jul 15, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
# -- Project information -----------------------------------------------------
import straxen
project = 'straxen'
# pylint: disable=redefined-builtin
copyright = '2018, straxen contributors and the XENON collaboration'
author = 'straxen contributors and the XENON collaboration'

Expand All @@ -46,7 +47,6 @@
]

# ADDED MANUALLY
import sys
from unittest.mock import MagicMock

class Mock(MagicMock):
Expand Down
2 changes: 1 addition & 1 deletion straxen/analyses/waveform_plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ def plot_records_matrix(context, run_id,
bbox = inset_locator.inset_axes(ax,
width="20%", height="22%",
loc=cbar_loc)
[bbox.spines[k].set_visible(False) for k in bbox.spines]
_ = [bbox.spines[k].set_visible(False) for k in bbox.spines]
bbox.patch.set_facecolor((1, 1, 1, 0.9))
bbox.set_xticks([])
bbox.set_yticks([])
Expand Down
19 changes: 7 additions & 12 deletions straxen/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ def get_resource(x: str, fmt='text'):
f'cannot download it from anywhere.')


# Deprecated placeholder for resource management system in the future?
# Legacy loader for public URL files
def resource_from_url(html: str, fmt='text'):
"""
Return contents of file or URL html
Expand All @@ -191,10 +191,6 @@ def resource_from_url(html: str, fmt='text'):
your lamentations shall pass over the mountains, etc.
:return: The file opened as specified per it's format
"""
warn("Loading files from a URL is deprecated, and will be replaced "
"by loading from the database. See:"
"https://github.com/XENONnT/straxen/pull/311",
DeprecationWarning)

if '://' not in html:
raise ValueError('Can only open urls!')
Expand All @@ -216,6 +212,7 @@ def resource_from_url(html: str, fmt='text'):
break
else:
print(f'Did not find {cache_fn} in cache, downloading {html}')
# disable bandit
result = urllib.request.urlopen(html).read()
is_binary = fmt not in _text_formats
if not is_binary:
Expand All @@ -227,14 +224,12 @@ def resource_from_url(html: str, fmt='text'):
for cache_folder in cache_folders:
if not osp.exists(cache_folder):
continue
if not os.access(cache_folder, os.W_OK):
continue
cf = osp.join(cache_folder, cache_fn)
try:
with open(cf, mode=m) as f:
f.write(result)
except Exception:
pass
else:
available_cf = cf
with open(cf, mode=m) as f:
f.write(result)
available_cf = cf
if available_cf is None:
raise RuntimeError(
f"Could not store {html} in on-disk cache,"
Expand Down
3 changes: 1 addition & 2 deletions straxen/corrections_services.py
Original file line number Diff line number Diff line change
Expand Up @@ -305,14 +305,13 @@ def cacheable_naming(*args, fmt='.npy', base='./resource_cache/'):
except (FileExistsError, PermissionError):
pass
for arg in args:
if not type(arg) == str:
if not isinstance(arg, str):
raise TypeError(f'One or more args of {args} are not strings')
return base + '_'.join(args) + fmt


class GainsNotFoundError(Exception):
"""Fatal error if a None value is returned by the corrections"""
pass


def get_cmt_local_versions(global_version):
Expand Down
2 changes: 1 addition & 1 deletion straxen/itp_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def __init__(self, data, method='WeightedNearestNeighbors', **kwargs):
self.data = data

# Decompress / dequantize the map
# TODO: support multiple map names
# We should support multiple map names!
if 'compressed' in self.data:
compressor, dtype, shape = self.data['compressed']
self.data['map'] = np.frombuffer(
Expand Down
2 changes: 0 additions & 2 deletions straxen/mini_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,6 @@ def wrapped_f(context: strax.Context, run_id: str, **kwargs):
holoviews.extension('bokeh')
_hv_bokeh_initialized = True

# TODO: This is a placeholder until the corrections system
# is more fully developed
if 'to_pe' in parameters and 'to_pe' not in kwargs:
kwargs['to_pe'] = straxen.get_correction_from_cmt(
run_id,
Expand Down
2 changes: 1 addition & 1 deletion straxen/mongo_storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ def compute_md5(abs_path):
return ""
# Also, disable all the Use of insecure MD2, MD4, MD5, or SHA1
# hash function violations in this function.
# bandit: disable=B303
# disable bandit
hash_md5 = hashlib.md5()
with open(abs_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
Expand Down
1 change: 0 additions & 1 deletion straxen/plugins/peaklet_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,6 @@ def compute(self, records, start, end):
lone_hits=lone_hits)

def natural_breaks_threshold(self, peaks):
# TODO avoid duplication with PeakBasics somehow?
rise_time = -peaks['area_decile_from_midpoint'][:, 1]

# This is ~1 for an clean S2, ~0 for a clean S1,
Expand Down
99 changes: 55 additions & 44 deletions tests/test_basics.py
Original file line number Diff line number Diff line change
@@ -1,51 +1,62 @@
import numpy as np
import straxen
import tempfile
import os
import unittest
import shutil
import uuid

import numpy as np
import straxen

test_run_id_1T = '180423_1021'


def test_straxen():
with tempfile.TemporaryDirectory() as temp_dir:
try:
print("Temporary directory is ", temp_dir)
os.chdir(temp_dir)

print("Downloading test data (if needed)")
st = straxen.contexts.demo()
# Ignore strax-internal warnings
st.set_context_config({'free_options': tuple(st.config.keys())})

run_df = st.select_runs(available='raw_records')
print(run_df)
run_id = run_df.iloc[0]['name']
assert run_id == test_run_id_1T

print("Test processing")
df = st.get_df(run_id, 'event_info')

assert len(df) > 0
assert 'cs1' in df.columns
assert df['cs1'].sum() > 0
assert not np.all(np.isnan(df['x'].values))

print('Test common.get_livetime_sec')
events = st.get_array(run_id, 'peaks')
straxen.get_livetime_sec(st, test_run_id_1T, things=events)
# TODO: find a way to break up the tests
# surely pytest has common startup/cleanup?

print("Test mini analysis")
@straxen.mini_analysis(requires=('raw_records',))
def count_rr(raw_records):
return len(raw_records)

n = st.count_rr(test_run_id_1T)
assert n > 100

# On windows, you cannot delete the current process'
# working directory, so we have to chdir out first.
finally:
os.chdir('..')
class TestBasics(unittest.TestCase):
JoranAngevaare marked this conversation as resolved.
Show resolved Hide resolved
JoranAngevaare marked this conversation as resolved.
Show resolved Hide resolved
@classmethod
def setUpClass(cls) -> None:
JoranAngevaare marked this conversation as resolved.
Show resolved Hide resolved
temp_folder = uuid.uuid4().hex
# Keep one temp dir because we don't want to download the data every time.
cls.tempdir = os.path.join(tempfile.gettempdir(), temp_folder)
assert not os.path.exists(cls.tempdir)

print("Downloading test data (if needed)")
st = straxen.contexts.demo()
cls.run_id = test_run_id_1T
cls.st = st

@classmethod
def tearDownClass(cls):
JoranAngevaare marked this conversation as resolved.
Show resolved Hide resolved
# Make sure to only cleanup this dir after we have done all the tests
if os.path.exists(cls.tempdir):
shutil.rmtree(cls.tempdir)

def test_run_selection(self):
JoranAngevaare marked this conversation as resolved.
Show resolved Hide resolved
st = self.st
# Ignore strax-internal warnings
st.set_context_config({'free_options': tuple(st.config.keys())})

run_df = st.select_runs(available='raw_records')
print(run_df)
run_id = run_df.iloc[0]['name']
assert run_id == test_run_id_1T

def test_processing(self):
JoranAngevaare marked this conversation as resolved.
Show resolved Hide resolved
st = self.st
df = st.get_df(self.run_id, 'event_info')

assert len(df) > 0
JoranAngevaare marked this conversation as resolved.
Show resolved Hide resolved
assert 'cs1' in df.columns
assert df['cs1'].sum() > 0
assert not np.all(np.isnan(df['x'].values))

def test_get_livetime_sec(self):
JoranAngevaare marked this conversation as resolved.
Show resolved Hide resolved
st = self.st
events = st.get_array(self.run_id, 'peaks')
straxen.get_livetime_sec(st, test_run_id_1T, things=events)

def test_mini_analysis(self):
JoranAngevaare marked this conversation as resolved.
Show resolved Hide resolved
@straxen.mini_analysis(requires=('raw_records',))
def count_rr(raw_records):
JoranAngevaare marked this conversation as resolved.
Show resolved Hide resolved
return len(raw_records)

n = self.st.count_rr(self.run_id)
assert n > 100
4 changes: 2 additions & 2 deletions tests/test_channel_split.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ def test_channel_split(records):
result_2 = channel_split_naive(records, channel_range)

assert len(result) == len(result_2)
for i in range(len(result)):
for i, _ in enumerate(result):
np.testing.assert_array_equal(
np.unique(result[i]['channel']),
np.unique(result_2[i]['channel']))
np.testing.assert_array_equal(result[i], result_2[i])
np.testing.assert_array_equal(result[i], result_2[i])
3 changes: 1 addition & 2 deletions tests/test_count_pulses.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,13 @@ def test_count_pulses_2(records):


def _check_pulse_count(records):
# TODO: numba starts complaining if n_channels == 1, maybe file bug?
# numba starts complaining if n_channels == 1, maybe file bug?
n_ch = records['channel'].max() + 2 if len(records) else 0
counts = straxen.plugins.pulse_processing.count_pulses(
records, n_channels=n_ch)

assert counts.dtype == straxen.pulse_count_dtype(n_ch)

# TODO temporary hack until we fix strax issue #239
if not len(records):
assert len(counts) == 0
return
Expand Down