Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enabling 3.12, using pytest-benchmark #61

Merged
merged 2 commits into from
Jan 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ jobs:
fail-fast: false
matrix:
python-version:
- "3.12"
- "3.11"
- "3.10"
- "3.9"
Expand Down
8 changes: 4 additions & 4 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,19 +7,19 @@ repos:
- id: check-toml
- id: check-yaml
- repo: https://github.com/PyCQA/isort
rev: "5.12.0"
rev: "5.13.2"
hooks:
- id: isort
- repo: https://github.com/psf/black
rev: "22.6.0"
rev: "23.12.1"
hooks:
- id: black-jupyter
- repo: https://github.com/PyCQA/flake8
rev: "4.0.1"
rev: "7.0.0"
hooks:
- id: flake8
- repo: https://github.com/pre-commit/mirrors-mypy
rev: "v1.0.0"
rev: "v1.8.0"
hooks:
- id: mypy
additional_dependencies:
Expand Down
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ dev =
pydantic>2
pytest
pytest-asyncio
pytest-benchmark
pytest-cov
pytest-xdist
snakeviz
Expand All @@ -92,7 +93,6 @@ dev =
omit =
src/measured/_parser.py
src/measured/pytest.py
tests/performance_harness.py

[isort]
profile = black
Expand Down
17 changes: 0 additions & 17 deletions src/measured/_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,6 @@ def match_examples(
self, (UnexpectedToken, UnexpectedEOF)
) and isinstance(ut, (UnexpectedToken, UnexpectedEOF)):
if ut.token == self.token: ##

logger.debug("Exact Match at example [%s][%s]" % (i, j))
return label

Expand Down Expand Up @@ -357,7 +356,6 @@ def classify(seq, key=None, value=None):
def _deserialize(data, namespace, memo):
if isinstance(data, dict):
if "__type__" in data: ##

class_ = namespace[data["__type__"]]
return class_.deserialize(data, memo)
elif "@" in data:
Expand Down Expand Up @@ -482,7 +480,6 @@ def get_regexp_width(expr):


class Meta:

empty: bool
line: int
column: int
Expand Down Expand Up @@ -642,7 +639,6 @@ def _apply_v_args(cls, visit_wrapper):
assert mro[0] is cls
libmembers = {name for _cls in mro[1:] for name, _ in getmembers(_cls)}
for name, value in getmembers(cls):

##

if name.startswith("_") or (
Expand Down Expand Up @@ -776,7 +772,6 @@ def _call_userfunc(self, tree, new_children=None):


class TransformerChain(Generic[_Leaf_T, _Return_T]):

transformers: "Tuple[Union[Transformer, TransformerChain], ...]"

def __init__(self, *transformers: "Union[Transformer, TransformerChain]") -> None:
Expand All @@ -797,7 +792,6 @@ def __mul__(
class Transformer_InPlace(Transformer):
# --
def _transform_tree(self, tree): ##

return self._call_userfunc(tree)

def transform(self, tree: Tree[_Leaf_T]) -> _Return_T:
Expand Down Expand Up @@ -1199,7 +1193,6 @@ def __eq__(self, other):


class Pattern(Serialize, ABC):

value: str
flags: Collection[str]
raw: Optional[str]
Expand Down Expand Up @@ -1309,7 +1302,6 @@ def __repr__(self):

def user_repr(self) -> str:
if self.name.startswith("__"): ##

return self.pattern.raw or self.name
else:
return self.name
Expand Down Expand Up @@ -1522,7 +1514,6 @@ def _build_mres(self, terminals, max_size):
try:
mre = self.re_.compile(pattern, self.g_regex_flags)
except AssertionError: ##

return self._build_mres(terminals, max_size // 2)

mres.append((mre, {i: n for n, i in mre.groupindex.items()}))
Expand Down Expand Up @@ -1604,7 +1595,6 @@ def make_lexer_state(self, text):


class BasicLexer(Lexer):

terminals: Collection[TerminalDef]
ignore_types: FrozenSet[str]
newline_types: FrozenSet[str]
Expand Down Expand Up @@ -1745,7 +1735,6 @@ def next_token(self, lex_state: LexerState, parser_state: Any = None) -> Token:


class ContextualLexer(Lexer):

lexers: Dict[str, BasicLexer]
root_lexer: BasicLexer

Expand Down Expand Up @@ -2021,7 +2010,6 @@ def __call__(self, children):
if filtered:
filtered += children[i].children
else: ##

filtered = children[i].children
else:
filtered.append(children[i])
Expand All @@ -2045,7 +2033,6 @@ def __call__(self, children):
if filtered:
filtered += children[i].children
else: ##

filtered = children[i].children
else:
filtered.append(children[i])
Expand Down Expand Up @@ -2271,7 +2258,6 @@ def default_callback(data, children):
default_callback = self.tree_class

for rule, wrapper_chain in self.rule_builders:

user_callback_name = (
rule.alias or rule.options.template_source or rule.origin.name
)
Expand Down Expand Up @@ -2631,7 +2617,6 @@ def __init__(self, lexer_conf, parser_conf, options, parser=None):
##

if parser: ##

self.parser = parser
else:
create_parser = _parser_creators.get(parser_conf.parser_type)
Expand Down Expand Up @@ -2704,7 +2689,6 @@ def parse_interactive(self, text=None, start=None):
def _validate_frontend_args(parser, lexer) -> None:
assert_config(parser, ("lalr", "earley", "cyk"))
if not isinstance(lexer, type): ##

expected = {
"lalr": ("basic", "contextual"),
"earley": ("basic", "dynamic", "dynamic_complete"),
Expand Down Expand Up @@ -3082,7 +3066,6 @@ def __init__(self, grammar: "Union[Grammar, str, IO[str]]", **options) -> None:
self._load(cached_parser_data, **options)
return
except Exception: ##

logger.exception(
"Failed to load Lark from cache: %r. We will try to carry on."
% cache_fn
Expand Down
100 changes: 0 additions & 100 deletions tests/performance_harness.py

This file was deleted.

55 changes: 55 additions & 0 deletions tests/test_performance.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
from pytest_benchmark.fixture import BenchmarkFixture

from measured import One, Quantity
from measured.astronomical import JulianYear
from measured.si import Ampere, Meter, Ohm, Second, Volt
from measured.us import Ounce, Ton


def test_quantity_construction(benchmark: BenchmarkFixture) -> None:
def quantity_construction() -> Quantity:
return Quantity(1000001, Meter)

result = benchmark(quantity_construction)

assert result == Quantity(1000001, Meter)


def test_low_dimensional_equality(benchmark: BenchmarkFixture) -> None:
a = Quantity(1000, Meter)
b = Quantity(1000, Meter)

def low_dimensional_equality() -> bool:
return bool(a == b)

assert benchmark(low_dimensional_equality) is True


def test_high_dimensional_equality(benchmark: BenchmarkFixture) -> None:
a = Quantity(1000, Ohm)
b = Quantity(1000, Ohm)

def high_dimensional_equality() -> bool:
return bool(a == b)

assert benchmark(high_dimensional_equality) is True


def test_computing_resistances(benchmark: BenchmarkFixture) -> None:
a = Quantity(1000, Ampere)
v = Quantity(1000, Volt)

def computing_resistances() -> Quantity:
return v / a

assert benchmark(computing_resistances) == Quantity(1, Ohm)


def test_complicated_conversions(benchmark: BenchmarkFixture) -> None:
o = Quantity(1000, Ounce / (JulianYear * Ampere))
t = Quantity(1000, Ton / (Second * Ampere))

def divide() -> Quantity:
return (t / o).in_unit(One)

assert benchmark(divide).unit == One