Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Init code style checking, pytest, and coverage #31

Merged
merged 11 commits into from
Jul 10, 2023
Merged
5 changes: 5 additions & 0 deletions .bumpversion.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
[bumpversion]
current_version = 0.2.0
files = setup.py alea/__init__.py
commit = True
tag = True
10 changes: 10 additions & 0 deletions .coveragerc
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# .coveragerc to control coverage.py
[report]
# omit =

# Regexes for lines to exclude from consideration
exclude_lines =
if __name__ == .__main__.:
raise

ignore_errors = True
27 changes: 27 additions & 0 deletions .github/ISSUE_TEMPLATE/bug_report.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: bug
assignees: ''

---

**Describe the bug**
A clear and concise description of what the bug is.

**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error

**Expected behavior**
A clear and concise description of what you expected to happen.

**Screenshots**
If applicable, add screenshots to help explain your problem.

**Additional context**
Add any other context about the problem here.
25 changes: 25 additions & 0 deletions .github/workflows/code_style.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# copied from https://github.com/XENONnT/straxen/blob/master/.github/workflows/code_style.yml

name: Python style
on:
pull_request:
# types: [opened]
jobs:
qa:
name: Quality check
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@master
with:
python-version: 3.8
- name: patch reviewdog
run: sudo chown -R root:root $GITHUB_WORKSPACE
- name: Wemake Python Stylguide
uses: wemake-services/[email protected]
continue-on-error: true
with:
reporter: 'github-pr-review'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
69 changes: 69 additions & 0 deletions .github/workflows/pytest.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
# copied from https://github.com/XENONnT/straxen/blob/master/.github/workflows/pytest.yml
# Test alea on each PR.
# We run three types of tests:
# - Pytest -> these are the "normal" tests and should be run for all
# python versions
# - Coveralls -> this is to see if we are covering all our lines of
# code with our tests. The results get uploaded to
# coveralls.io/github/XENONnT/alea
# - pytest_no_database -> we want to make sure we can run the tests even
# if we don't have access to our database since this will e.g. happen
# when someone is pushing a PR from their own fork as we don't
# propagate our secrets there.

name: Test package

# Trigger this code when a new release is published
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]

jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ "ubuntu-latest" ]
python-version: [ 3.8, 3.9 ]
test: [ 'coveralls', 'pytest' ]

steps:
# Setup and installation
- name: Set up Python ${{ matrix.python-version }}
uses: actions/[email protected]
with:
python-version: ${{ matrix.python-version }}

- name: Checkout repo
uses: actions/checkout@v3

- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install pytest coverage coveralls
pip install -r requirements.txt

- name: Install Appletree
kdund marked this conversation as resolved.
Show resolved Hide resolved
run: |
pip install .

- name: Test package
# This is running a normal test
run: |
coverage run --source=alea -m pytest --durations 0
coverage report

- name: Coveralls
# Make the coverage report and upload
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
if: matrix.test == 'coveralls'
run: |
coverage run --source=alea -m pytest -v
coveralls --service=github

- name: goodbye
run: echo "tests done, bye bye"
8 changes: 5 additions & 3 deletions alea/examples/gaussian_model.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
from typing import Optional
from alea.statistical_model import StatisticalModel
import scipy.stats as stats

from scipy import stats
import numpy as np

from alea.statistical_model import StatisticalModel


class GaussianModel(StatisticalModel):
def __init__(self, parameter_definition: Optional[dict or list] = None):
Expand All @@ -13,7 +15,7 @@ def __init__(self, parameter_definition: Optional[dict or list] = None):
sigma is fixed in this example.
"""
if parameter_definition is None:
parameter_definition = ["mu", "sigma"]
parameter_definition = ['mu', 'sigma']
super().__init__(parameter_definition=parameter_definition)

def _ll(self, mu=None, sigma=None):
Expand Down
55 changes: 36 additions & 19 deletions alea/parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,11 @@ class Parameter:
nominal_value (float, optional): The nominal value of the parameter.
fittable (bool, optional): Indicates if the parameter is fittable or always fixed.
ptype (str, optional): The type of the parameter.
uncertainty (float or str, optional): The uncertainty of the parameter. If a string,
uncertainty (float or str, optional):
dachengx marked this conversation as resolved.
Show resolved Hide resolved
The uncertainty of the parameter. If a string,
it can be evaluated as a numpy or scipy function to define non-gaussian constraints.
relative_uncertainty (bool, optional): Indicates if the uncertainty is relative to the nominal_value.
relative_uncertainty (bool, optional):
Indicates if the uncertainty is relative to the nominal_value.
blueice_anchors (list, optional): Anchors for blueice template morphing.
fit_limits (tuple, optional): The limits for fitting the parameter.
parameter_interval_bounds (tupe, optional): limits for computing confidence intervals
Expand Down Expand Up @@ -49,11 +51,12 @@ def __init__(
self.description = description

def __repr__(self) -> str:
parameter_str = [f"{k}={v}" for k,
v in self.__dict__.items() if v is not None]
parameter_str = [
f"{k}={v}" for k, v in self.__dict__.items() if v is not None]
dachengx marked this conversation as resolved.
Show resolved Hide resolved
parameter_str = ", ".join(parameter_str)
return f'{self.__class__.__module__}.{self.__class__.__qualname__}'\
f'({parameter_str})'
repr = f'{self.__class__.__module__}.{self.__class__.__qualname__}'
dachengx marked this conversation as resolved.
Show resolved Hide resolved
repr += f'({parameter_str})'
dachengx marked this conversation as resolved.
Show resolved Hide resolved
return repr

@property
def uncertainty(self) -> float or Any:
Expand All @@ -67,7 +70,8 @@ def uncertainty(self) -> float or Any:
return eval(self._uncertainty)
else:
raise ValueError(
f"Uncertainty string '{self._uncertainty}' must start with 'scipy.' or 'numpy.'")
f"Uncertainty string '{self._uncertainty}'"
" must start with 'scipy.' or 'numpy.'")
else:
return self._uncertainty

Expand Down Expand Up @@ -155,8 +159,9 @@ def from_list(cls, names: List[str]):

def __repr__(self) -> str:
parameter_str = ", ".join(self.names)
return f'{self.__class__.__module__}.{self.__class__.__qualname__}'\
f'({parameter_str})'
repr = f'{self.__class__.__module__}.{self.__class__.__qualname__}'
dachengx marked this conversation as resolved.
Show resolved Hide resolved
repr += f'({parameter_str})'
dachengx marked this conversation as resolved.
Show resolved Hide resolved
return repr

def add_parameter(self, parameter: Parameter) -> None:
"""
Expand All @@ -181,14 +186,20 @@ def fit_guesses(self) -> Dict[str, float]:
"""
Returns a dictionary of fit guesses.
"""
return {name: param.fit_guess for name, param in self.parameters.items() if param.fit_guess is not None}
return {
name: param.fit_guess
for name, param in self.parameters.items()
if param.fit_guess is not None}

@property
def fit_limits(self) -> Dict[str, float]:
"""
Returns a dictionary of fit limits.
"""
return {name: param.fit_limits for name, param in self.parameters.items() if param.fit_limits is not None}
return {
name: param.fit_limits
for name, param in self.parameters.items()
if param.fit_limits is not None}

@property
def fittable(self) -> List[str]:
Expand All @@ -209,16 +220,21 @@ def nominal_values(self) -> dict:
"""
return a dict of name:nominal value for all applicable parameters
"""
return {k: i.nominal_value for k, i in self.parameters.items() if i.nominal_value is not None}

def __call__(self, return_fittable: bool = False,
**kwargs: Any) -> Dict[str, float]:
return {
k: i.nominal_value
for k, i in self.parameters.items()
if i.nominal_value is not None}

def __call__(
self, return_fittable: bool = False,
**kwargs: Any) -> Dict[str, float]:
"""
Returns a dictionary of parameter values, optionally filtered
to return only fittable parameters.

Args:
return_fittable (bool, optional): Indicates if only fittable parameters should be returned.
return_fittable (bool, optional):
Indicates if only fittable parameters should be returned.
dachengx marked this conversation as resolved.
Show resolved Hide resolved
dachengx marked this conversation as resolved.
Show resolved Hide resolved
**kwargs: Additional keyword arguments to override parameter values.

Returns:
Expand All @@ -233,7 +249,7 @@ def __call__(self, return_fittable: bool = False,

for name, param in self.parameters.items():
new_val = kwargs.get(name, None)
if (return_fittable and param.fittable) or not return_fittable:
if (return_fittable and param.fittable) or (not return_fittable):
dachengx marked this conversation as resolved.
Show resolved Hide resolved
values[name] = new_val if new_val is not None else param.nominal_value
return values

Expand Down Expand Up @@ -285,5 +301,6 @@ def values_in_fit_limits(self, **kwargs: Any) -> bool:
"""
Returns True if all values are within the fit limits.
"""
return all(self.parameters[name].value_in_fit_limits(value)
for name, value in kwargs.items())
return all(
self.parameters[name].value_in_fit_limits(value)
for name, value in kwargs.items())
Loading