diff --git a/.readthedocs.yaml b/.readthedocs.yaml index d180754e6..7d72db2a1 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -11,4 +11,3 @@ python: - requirements: requirements_docs.txt - method: pip path: . - system_packages: false diff --git a/CHANGES b/CHANGES index 1f5fe347d..a2f17c345 100644 --- a/CHANGES +++ b/CHANGES @@ -105,6 +105,12 @@ Pint Changelog (Issue #1030, #574) - Added angular frequency documentation page. - Move ASV benchmarks to dedicated folder. (Issue #1542) +- An ndim attribute has been added to Quantity and DataFrame has been added to upcast + types for pint-pandas compatibility. (#1596) +- Fix a recursion error that would be raised when passing quantities to `cond` and `x`. + (Issue #1510, #1530) +- Update test_non_int tests for pytest. +- Better support for uncertainties (See #1611, #1614) - Implement `numpy.broadcast_arrays` (#1607) - An ndim attribute has been added to Quantity and DataFrame has been added to upcast types for pint-pandas compatibility. (#1596) diff --git a/pint/compat.py b/pint/compat.py index 6be906f4d..552ff3f7e 100644 --- a/pint/compat.py +++ b/pint/compat.py @@ -12,14 +12,21 @@ import sys import math -import tokenize from decimal import Decimal from importlib import import_module -from io import BytesIO from numbers import Number from collections.abc import Mapping from typing import Any, NoReturn, Callable, Optional, Union -from collections.abc import Generator, Iterable +from collections.abc import Iterable + +try: + from uncertainties import UFloat, ufloat + from uncertainties import unumpy as unp + + HAS_UNCERTAINTIES = True +except ImportError: + UFloat = ufloat = unp = None + HAS_UNCERTAINTIES = False if sys.version_info >= (3, 10): @@ -58,19 +65,6 @@ def _inner(*args: Any, **kwargs: Any) -> NoReturn: return _inner -def tokenizer(input_string: str) -> Generator[tokenize.TokenInfo, None, None]: - """Tokenize an input string, encoded as UTF-8 - and skipping the ENCODING token. - - See Also - -------- - tokenize.tokenize - """ - for tokinfo in tokenize.tokenize(BytesIO(input_string.encode("utf-8")).readline): - if tokinfo.type != tokenize.ENCODING: - yield tokinfo - - # TODO: remove this warning after v0.10 class BehaviorChangeWarning(UserWarning): pass @@ -83,7 +77,10 @@ class BehaviorChangeWarning(UserWarning): HAS_NUMPY = True NUMPY_VER = np.__version__ - NUMERIC_TYPES = (Number, Decimal, ndarray, np.number) + if HAS_UNCERTAINTIES: + NUMERIC_TYPES = (Number, Decimal, ndarray, np.number, UFloat) + else: + NUMERIC_TYPES = (Number, Decimal, ndarray, np.number) def _to_magnitude(value, force_ndarray=False, force_ndarray_like=False): if isinstance(value, (dict, bool)) or value is None: @@ -92,6 +89,11 @@ def _to_magnitude(value, force_ndarray=False, force_ndarray_like=False): raise ValueError("Quantity magnitude cannot be an empty string.") elif isinstance(value, (list, tuple)): return np.asarray(value) + elif HAS_UNCERTAINTIES: + from pint.facets.measurement.objects import Measurement + + if isinstance(value, Measurement): + return ufloat(value.value, value.error) if force_ndarray or ( force_ndarray_like and not is_duck_array_type(type(value)) ): @@ -144,16 +146,13 @@ def _to_magnitude(value, force_ndarray=False, force_ndarray_like=False): "lists and tuples are valid magnitudes for " "Quantity only when NumPy is present." ) - return value + elif HAS_UNCERTAINTIES: + from pint.facets.measurement.objects import Measurement + if isinstance(value, Measurement): + return ufloat(value.value, value.error) + return value -try: - from uncertainties import ufloat - - HAS_UNCERTAINTIES = True -except ImportError: - ufloat = None - HAS_UNCERTAINTIES = False try: from babel import Locale @@ -326,16 +325,25 @@ def isnan(obj: Any, check_all: bool) -> Union[bool, Iterable[bool]]: Always return False for non-numeric types. """ if is_duck_array_type(type(obj)): - if obj.dtype.kind in "if": + if obj.dtype.kind in "ifc": out = np.isnan(obj) elif obj.dtype.kind in "Mm": out = np.isnat(obj) else: - # Not a numeric or datetime type - out = np.full(obj.shape, False) + if HAS_UNCERTAINTIES: + try: + out = unp.isnan(obj) + except TypeError: + # Not a numeric or UFloat type + out = np.full(obj.shape, False) + else: + # Not a numeric or datetime type + out = np.full(obj.shape, False) return out.any() if check_all else out if isinstance(obj, np_datetime64): return np.isnat(obj) + elif HAS_UNCERTAINTIES and isinstance(obj, UFloat): + return unp.isnan(obj) try: return math.isnan(obj) except TypeError: diff --git a/pint/facets/__init__.py b/pint/facets/__init__.py index 4fd1597a6..22fbc6ce1 100644 --- a/pint/facets/__init__.py +++ b/pint/facets/__init__.py @@ -7,7 +7,7 @@ keeping each part small enough to be hackable. Each facet contains one or more of the following modules: - - definitions: classes describing an specific unit related definiton. + - definitions: classes describing specific unit-related definitons. These objects must be immutable, pickable and not reference the registry (e.g. ContextDefinition) - objects: classes and functions that encapsulate behavior (e.g. Context) - registry: implements a subclass of PlainRegistry or class that can be diff --git a/pint/facets/measurement/objects.py b/pint/facets/measurement/objects.py index b9cacdafe..a339ff60e 100644 --- a/pint/facets/measurement/objects.py +++ b/pint/facets/measurement/objects.py @@ -52,7 +52,7 @@ class Measurement(PlainQuantity): """ - def __new__(cls, value, error, units=MISSING): + def __new__(cls, value, error=MISSING, units=MISSING): if units is MISSING: try: value, units = value.magnitude, value.units @@ -64,17 +64,18 @@ def __new__(cls, value, error, units=MISSING): error = MISSING # used for check below else: units = "" - try: - error = error.to(units).magnitude - except AttributeError: - pass - if error is MISSING: + # We've already extracted the units from the Quantity above mag = value - elif error < 0: - raise ValueError("The magnitude of the error cannot be negative") else: - mag = ufloat(value, error) + try: + error = error.to(units).magnitude + except AttributeError: + pass + if error < 0: + raise ValueError("The magnitude of the error cannot be negative") + else: + mag = ufloat(value, error) inst = super().__new__(cls, mag, units) return inst diff --git a/pint/facets/numpy/quantity.py b/pint/facets/numpy/quantity.py index 880f86003..5257766bc 100644 --- a/pint/facets/numpy/quantity.py +++ b/pint/facets/numpy/quantity.py @@ -29,6 +29,16 @@ set_units_ufuncs, ) +try: + import uncertainties.unumpy as unp + from uncertainties import ufloat, UFloat + + HAS_UNCERTAINTIES = True +except ImportError: + unp = np + ufloat = Ufloat = None + HAS_UNCERTAINTIES = False + def method_wraps(numpy_func): if isinstance(numpy_func, str): @@ -224,6 +234,11 @@ def __getattr__(self, item) -> Any: ) else: raise exc + elif ( + HAS_UNCERTAINTIES and item == "ndim" and isinstance(self._magnitude, UFloat) + ): + # Dimensionality of a single UFloat is 0, like any other scalar + return 0 try: return getattr(self._magnitude, item) diff --git a/pint/facets/plain/quantity.py b/pint/facets/plain/quantity.py index d2c9054c4..4115175cf 100644 --- a/pint/facets/plain/quantity.py +++ b/pint/facets/plain/quantity.py @@ -55,6 +55,17 @@ if HAS_NUMPY: import numpy as np # noqa +try: + import uncertainties.unumpy as unp + from uncertainties import ufloat, UFloat + + HAS_UNCERTAINTIES = True +except ImportError: + unp = np + ufloat = Ufloat = None + HAS_UNCERTAINTIES = False + + MagnitudeT = TypeVar("MagnitudeT", bound=Magnitude) ScalarT = TypeVar("ScalarT", bound=Scalar) @@ -133,6 +144,8 @@ class PlainQuantity(Generic[MagnitudeT], PrettyIPython, SharedRegistryObject): def ndim(self) -> int: if isinstance(self.magnitude, numbers.Number): return 0 + if str(self.magnitude) == "": + return 0 return self.magnitude.ndim @property @@ -256,7 +269,12 @@ def __bytes__(self) -> bytes: return str(self).encode(locale.getpreferredencoding()) def __repr__(self) -> str: - if isinstance(self._magnitude, float): + if HAS_UNCERTAINTIES: + if isinstance(self._magnitude, UFloat): + return f"" + else: + return f"" + elif isinstance(self._magnitude, float): return f"" return f"" @@ -1288,6 +1306,9 @@ def bool_result(value): # We compare to the plain class of PlainQuantity because # each PlainQuantity class is unique. if not isinstance(other, PlainQuantity): + if other is None: + # A loop in pandas-dev/pandas/core/common.py(86)consensus_name_attr() can result in OTHER being None + return bool_result(False) if zero_or_nan(other, True): # Handle the special case in which we compare to zero or NaN # (or an array of zeros or NaNs) diff --git a/pint/facets/plain/registry.py b/pint/facets/plain/registry.py index fb7797d6c..c9c7d94d2 100644 --- a/pint/facets/plain/registry.py +++ b/pint/facets/plain/registry.py @@ -63,8 +63,9 @@ Handler, ) +from ... import pint_eval from ..._vendor import appdirs -from ...compat import babel_parse, tokenizer, TypeAlias, Self +from ...compat import babel_parse, TypeAlias, Self from ...errors import DimensionalityError, RedefinitionError, UndefinedUnitError from ...pint_eval import build_eval_tree from ...util import ParserHelper @@ -1324,7 +1325,7 @@ def parse_expression( for p in self.preprocessors: input_string = p(input_string) input_string = string_preprocessor(input_string) - gen = tokenizer(input_string) + gen = pint_eval.tokenizer(input_string) def _define_op(s: str): return self._eval_token(s, case_sensitive=case_sensitive, **values) diff --git a/pint/formatting.py b/pint/formatting.py index 561133c6b..b00b771c7 100644 --- a/pint/formatting.py +++ b/pint/formatting.py @@ -375,9 +375,13 @@ def formatter( # Don't remove this positional! This is the format used in Babel key = pat.replace("{0}", "").strip() break - division_fmt = compound_unit_patterns.get("per", {}).get( - babel_length, division_fmt - ) + + tmp = compound_unit_patterns.get("per", {}).get(babel_length, division_fmt) + + try: + division_fmt = tmp.get("compound", division_fmt) + except AttributeError: + division_fmt = tmp power_fmt = "{}{}" exp_call = _pretty_fmt_exponent if value == 1: diff --git a/pint/pint_eval.py b/pint/pint_eval.py index a2952ecda..3f030505b 100644 --- a/pint/pint_eval.py +++ b/pint/pint_eval.py @@ -9,16 +9,27 @@ """ from __future__ import annotations +from io import BytesIO import operator import token as tokenlib +import tokenize from tokenize import TokenInfo from typing import Any, Optional, Union +try: + from uncertainties import ufloat + + HAS_UNCERTAINTIES = True +except ImportError: + HAS_UNCERTAINTIES = False + ufloat = None + from .errors import DefinitionSyntaxError # For controlling order of operations _OP_PRIORITY = { + "+/-": 4, "**": 3, "^": 3, "unary": 2, @@ -32,6 +43,12 @@ } +def _ufloat(left, right): + if HAS_UNCERTAINTIES: + return ufloat(left, right) + raise TypeError("Could not import support for uncertainties") + + def _power(left: Any, right: Any) -> Any: from . import Quantity from .compat import is_duck_array @@ -47,6 +64,225 @@ def _power(left: Any, right: Any) -> Any: return operator.pow(left, right) +# https://stackoverflow.com/a/1517965/1291237 +class tokens_with_lookahead: + def __init__(self, iter): + self.iter = iter + self.buffer = [] + + def __iter__(self): + return self + + def __next__(self): + if self.buffer: + return self.buffer.pop(0) + else: + return self.iter.__next__() + + def lookahead(self, n): + """Return an item n entries ahead in the iteration.""" + while n >= len(self.buffer): + try: + self.buffer.append(self.iter.__next__()) + except StopIteration: + return None + return self.buffer[n] + + +def _plain_tokenizer(input_string): + for tokinfo in tokenize.tokenize(BytesIO(input_string.encode("utf-8")).readline): + if tokinfo.type != tokenlib.ENCODING: + yield tokinfo + + +def uncertainty_tokenizer(input_string): + def _number_or_nan(token): + if token.type == tokenlib.NUMBER or ( + token.type == tokenlib.NAME and token.string == "nan" + ): + return True + return False + + def _get_possible_e(toklist, e_index): + possible_e_token = toklist.lookahead(e_index) + if ( + possible_e_token.string[0] == "e" + and len(possible_e_token.string) > 1 + and possible_e_token.string[1].isdigit() + ): + end = possible_e_token.end + possible_e = tokenize.TokenInfo( + type=tokenlib.STRING, + string=possible_e_token.string, + start=possible_e_token.start, + end=end, + line=possible_e_token.line, + ) + elif ( + possible_e_token.string[0] in ["e", "E"] + and toklist.lookahead(e_index + 1).string in ["+", "-"] + and toklist.lookahead(e_index + 2).type == tokenlib.NUMBER + ): + # Special case: Python allows a leading zero for exponents (i.e., 042) but not for numbers + if ( + toklist.lookahead(e_index + 2).string == "0" + and toklist.lookahead(e_index + 3).type == tokenlib.NUMBER + ): + exp_number = toklist.lookahead(e_index + 3).string + end = toklist.lookahead(e_index + 3).end + else: + exp_number = toklist.lookahead(e_index + 2).string + end = toklist.lookahead(e_index + 2).end + possible_e = tokenize.TokenInfo( + type=tokenlib.STRING, + string=f"e{toklist.lookahead(e_index+1).string}{exp_number}", + start=possible_e_token.start, + end=end, + line=possible_e_token.line, + ) + else: + possible_e = None + return possible_e + + def _apply_e_notation(mantissa, exponent): + if mantissa.string == "nan": + return mantissa + if float(mantissa.string) == 0.0: + return mantissa + return tokenize.TokenInfo( + type=tokenlib.NUMBER, + string=f"{mantissa.string}{exponent.string}", + start=mantissa.start, + end=exponent.end, + line=exponent.line, + ) + + def _finalize_e(nominal_value, std_dev, toklist, possible_e): + nominal_value = _apply_e_notation(nominal_value, possible_e) + std_dev = _apply_e_notation(std_dev, possible_e) + next(toklist) # consume 'e' and positive exponent value + if possible_e.string[1] in ["+", "-"]: + next(toklist) # consume "+" or "-" in exponent + exp_number = next(toklist) # consume exponent value + if ( + exp_number.string == "0" + and toklist.lookahead(0).type == tokenlib.NUMBER + ): + exp_number = next(toklist) + assert exp_number.end == end + # We've already applied the number, we're just consuming all the tokens + return nominal_value, std_dev + + # when tokenize encounters whitespace followed by an unknown character, + # (such as ±) it proceeds to mark every character of the whitespace as ERRORTOKEN, + # in addition to marking the unknown character as ERRORTOKEN. Rather than + # wading through all that vomit, just eliminate the problem + # in the input by rewriting ± as +/-. + input_string = input_string.replace("±", "+/-") + toklist = tokens_with_lookahead(_plain_tokenizer(input_string)) + for tokinfo in toklist: + line = tokinfo.line + start = tokinfo.start + if ( + tokinfo.string == "+" + and toklist.lookahead(0).string == "/" + and toklist.lookahead(1).string == "-" + ): + plus_minus_op = tokenize.TokenInfo( + type=tokenlib.OP, + string="+/-", + start=start, + end=toklist.lookahead(1).end, + line=line, + ) + for i in range(-1, 1): + next(toklist) + yield plus_minus_op + elif ( + tokinfo.string == "(" + and ((seen_minus := 1 if toklist.lookahead(0).string == "-" else 0) or True) + and _number_or_nan(toklist.lookahead(seen_minus)) + and toklist.lookahead(seen_minus + 1).string == "+" + and toklist.lookahead(seen_minus + 2).string == "/" + and toklist.lookahead(seen_minus + 3).string == "-" + and _number_or_nan(toklist.lookahead(seen_minus + 4)) + and toklist.lookahead(seen_minus + 5).string == ")" + ): + # ( NUM_OR_NAN +/- NUM_OR_NAN ) POSSIBLE_E_NOTATION + possible_e = _get_possible_e(toklist, seen_minus + 6) + if possible_e: + end = possible_e.end + else: + end = toklist.lookahead(seen_minus + 5).end + if seen_minus: + minus_op = next(toklist) + yield minus_op + nominal_value = next(toklist) + tokinfo = next(toklist) # consume '+' + next(toklist) # consume '/' + plus_minus_op = tokenize.TokenInfo( + type=tokenlib.OP, + string="+/-", + start=tokinfo.start, + end=next(toklist).end, # consume '-' + line=line, + ) + std_dev = next(toklist) + next(toklist) # consume final ')' + if possible_e: + nominal_value, std_dev = _finalize_e( + nominal_value, std_dev, toklist, possible_e + ) + yield nominal_value + yield plus_minus_op + yield std_dev + elif ( + tokinfo.type == tokenlib.NUMBER + and toklist.lookahead(0).string == "(" + and toklist.lookahead(1).type == tokenlib.NUMBER + and toklist.lookahead(2).string == ")" + ): + # NUM_OR_NAN ( NUM_OR_NAN ) POSSIBLE_E_NOTATION + possible_e = _get_possible_e(toklist, 3) + if possible_e: + end = possible_e.end + else: + end = toklist.lookahead(2).end + nominal_value = tokinfo + tokinfo = next(toklist) # consume '(' + plus_minus_op = tokenize.TokenInfo( + type=tokenlib.OP, + string="+/-", + start=tokinfo.start, + end=tokinfo.end, # this is funky because there's no "+/-" in nominal(std_dev) notation + line=line, + ) + std_dev = next(toklist) + if "." not in std_dev.string: + std_dev = tokenize.TokenInfo( + type=std_dev.type, + string="0." + std_dev.string, + start=std_dev.start, + end=std_dev.end, + line=line, + ) + next(toklist) # consume final ')' + if possible_e: + nominal_value, std_dev = _finalize_e( + nominal_value, std_dev, toklist, possible_e + ) + yield nominal_value + yield plus_minus_op + yield std_dev + else: + yield tokinfo + + +if HAS_UNCERTAINTIES: + tokenizer = uncertainty_tokenizer +else: + tokenizer = _plain_tokenizer + import typing UnaryOpT = typing.Callable[ @@ -60,6 +296,7 @@ def _power(left: Any, right: Any) -> Any: _UNARY_OPERATOR_MAP: dict[str, UnaryOpT] = {"+": lambda x: x, "-": lambda x: x * -1} _BINARY_OPERATOR_MAP: dict[str, BinaryOpT] = { + "+/-": _ufloat, "**": _power, "*": operator.mul, "": operator.mul, # operator for implicit ops diff --git a/pint/testsuite/test_issues.py b/pint/testsuite/test_issues.py index 9540814c3..c98ac61bf 100644 --- a/pint/testsuite/test_issues.py +++ b/pint/testsuite/test_issues.py @@ -881,6 +881,66 @@ def test_issue_1300(self): m = module_registry.Measurement(1, 0.1, "meter") assert m.default_format == "~P" + @helpers.requires_babel() + def test_issue_1400(self, sess_registry): + q1 = 3 * sess_registry.W + q2 = 3 * sess_registry.W / sess_registry.cm + assert q1.format_babel("~", locale="es_Ar") == "3 W" + assert q1.format_babel("", locale="es_Ar") == "3 vatios" + assert q2.format_babel("~", locale="es_Ar") == "3.0 W / cm" + assert q2.format_babel("", locale="es_Ar") == "3.0 vatios por centímetros" + + @helpers.requires_uncertainties() + def test_issue1611(self, module_registry): + from numpy.testing import assert_almost_equal + from uncertainties import ufloat + + from pint import pint_eval + + pint_eval.tokenizer = pint_eval.uncertainty_tokenizer + + u1 = ufloat(1.2, 0.34) + u2 = ufloat(5.6, 0.78) + q1_u = module_registry.Quantity(u2 - u1, "m") + q1_str = str(q1_u) + q1_str = "{:.4uS}".format(q1_u) + q1_m = q1_u.magnitude + q2_u = module_registry.Quantity(q1_str) + # Not equal because the uncertainties are differently random! + assert q1_u != q2_u + q2_m = q2_u.magnitude + + assert_almost_equal(q2_m.nominal_value, q1_m.nominal_value, decimal=9) + assert_almost_equal(q2_m.std_dev, q1_m.std_dev, decimal=4) + + q3_str = "12.34(5678)e-066 m" + q3_u = module_registry.Quantity(q3_str) + q3_m = q3_u.magnitude + assert q3_m < 1 + + @helpers.requires_uncertainties + def test_issue1614(self, module_registry): + from uncertainties import UFloat, ufloat + + q = module_registry.Quantity(1.0, "m") + assert isinstance(q, module_registry.Quantity) + m = module_registry.Measurement(2.0, 0.3, "m") + assert isinstance(m, module_registry.Measurement) + + u1 = ufloat(1.2, 3.4) + u2 = ufloat(5.6, 7.8) + q1_u = module_registry.Quantity(u1, "m") + m1 = module_registry.Measurement(q1_u) + assert m1.value.magnitude == u1.nominal_value + assert m1.error.magnitude == u1.std_dev + m2 = module_registry.Measurement(5.6, 7.8) # dimensionless + q2_u = module_registry.Quantity(m2) + assert isinstance(q2_u.magnitude, UFloat) + assert q2_u.magnitude.nominal_value == m2.value + assert q2_u.magnitude.nominal_value == u2.nominal_value + assert q2_u.magnitude.std_dev == m2.error + assert q2_u.magnitude.std_dev == u2.std_dev + if np is not None: diff --git a/pint/testsuite/test_measurement.py b/pint/testsuite/test_measurement.py index 9de2762e3..f3716289e 100644 --- a/pint/testsuite/test_measurement.py +++ b/pint/testsuite/test_measurement.py @@ -270,3 +270,11 @@ def test_measurement_comparison(self): y = self.Q_(5.0, "meter").plus_minus(0.1) assert x <= y assert not (x >= y) + + def test_tokenization(self): + from pint import pint_eval + + pint_eval.tokenizer = pint_eval.uncertainty_tokenizer + for p in pint_eval.tokenizer("8 + / - 4"): + print(p) + assert True diff --git a/pint/testsuite/test_pint_eval.py b/pint/testsuite/test_pint_eval.py index b5b94f0d9..fc0012e6d 100644 --- a/pint/testsuite/test_pint_eval.py +++ b/pint/testsuite/test_pint_eval.py @@ -1,9 +1,11 @@ import pytest -from pint.compat import tokenizer -from pint.pint_eval import build_eval_tree +from pint.pint_eval import build_eval_tree, tokenizer from pint.util import string_preprocessor +# This is how we enable the parsing of uncertainties +# tokenizer = pint.pint_eval.uncertainty_tokenizer + class TestPintEval: def _test_one(self, input_text, parsed, preprocess=False): diff --git a/pint/testsuite/test_util.py b/pint/testsuite/test_util.py index a61194d3e..70136cf35 100644 --- a/pint/testsuite/test_util.py +++ b/pint/testsuite/test_util.py @@ -5,6 +5,7 @@ import pytest +from pint import pint_eval from pint.util import ( ParserHelper, UnitsContainer, @@ -15,7 +16,6 @@ sized, string_preprocessor, to_units_container, - tokenizer, transpose, ) @@ -194,7 +194,7 @@ def test_calculate(self): assert dict(seconds=1) / z() == ParserHelper(0.5, seconds=1, meter=-2) def _test_eval_token(self, expected, expression): - token = next(tokenizer(expression)) + token = next(pint_eval.tokenizer(expression)) actual = ParserHelper.eval_token(token) assert expected == actual assert type(expected) == type(actual) diff --git a/pint/toktest.py b/pint/toktest.py new file mode 100644 index 000000000..ef606d6a9 --- /dev/null +++ b/pint/toktest.py @@ -0,0 +1,29 @@ +import tokenize +from pint.pint_eval import _plain_tokenizer, uncertainty_tokenizer + +tokenizer = _plain_tokenizer + +input_lines = [ + "( 8.0 + / - 4.0 ) e6 m", + "( 8.0 ± 4.0 ) e6 m", + "( 8.0 + / - 4.0 ) e-6 m", + "( nan + / - 0 ) e6 m", + "( nan ± 4.0 ) m", + "8.0 + / - 4.0 m", + "8.0 ± 4.0 m", + "8.0(4)m", + "8.0(.4)m", + "8.0(-4)m", # error! + "pint == wonderfulness ^ N + - + / - * ± m J s", +] + +for line in input_lines: + result = [] + g = list(uncertainty_tokenizer(line)) # tokenize the string + for toknum, tokval, _, _, _ in g: + result.append((toknum, tokval)) + + print("====") + print(f"input line: {line}") + print(result) + print(tokenize.untokenize(result)) diff --git a/pint/util.py b/pint/util.py index e940ea6c2..d14722a04 100644 --- a/pint/util.py +++ b/pint/util.py @@ -32,10 +32,11 @@ ) from collections.abc import Hashable, Generator -from .compat import NUMERIC_TYPES, tokenizer, Self +from .compat import NUMERIC_TYPES, Self from .errors import DefinitionSyntaxError from .formatting import format_unit from .pint_eval import build_eval_tree +from . import pint_eval from ._typing import Scalar @@ -762,7 +763,7 @@ def from_string(cls, input_string: str, non_int_type: type = float) -> ParserHel else: reps = False - gen = tokenizer(input_string) + gen = pint_eval.tokenizer(input_string) ret = build_eval_tree(gen).evaluate( partial(cls.eval_token, non_int_type=non_int_type) ) @@ -1039,6 +1040,9 @@ def to_units_container( return unit_like._units elif str in mro: if registry: + # TODO: document how to whether to lift preprocessing loop out to caller + for p in registry.preprocessors: + unit_like = p(unit_like) # TODO: Why not parse.units here? return registry._parse_units(unit_like) else: