Skip to content

Commit

Permalink
Pre-commit autoupdate (#148)
Browse files Browse the repository at this point in the history
  • Loading branch information
quant-ranger[bot] authored Apr 3, 2024
1 parent 2557627 commit 47b13ed
Show file tree
Hide file tree
Showing 9 changed files with 47 additions and 40 deletions.
8 changes: 4 additions & 4 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,22 +1,22 @@
repos:
- repo: https://github.com/Quantco/pre-commit-mirrors-ruff
rev: 0.2.2
rev: 0.3.4
hooks:
- id: ruff-conda
- id: ruff-format-conda
- repo: https://github.com/Quantco/pre-commit-mirrors-mypy
rev: "1.8.0"
rev: "1.9.0"
hooks:
- id: mypy-conda
additional_dependencies: [-c, conda-forge, types-setuptools]
- repo: https://github.com/Quantco/pre-commit-mirrors-pyupgrade
rev: 3.15.0
rev: 3.15.2
hooks:
- id: pyupgrade-conda
args:
- --py38
- repo: https://github.com/Quantco/pre-commit-mirrors-prettier
rev: 3.2.4
rev: 3.2.5
hooks:
- id: prettier-conda
files: "\\.md$"
7 changes: 3 additions & 4 deletions src/spox/_schemas.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Exposes information related to reference ONNX operator schemas, used by StandardOpNode."""

import itertools
from typing import (
Callable,
Expand All @@ -16,11 +17,9 @@


class _Comparable(Protocol):
def __lt__(self, other):
...
def __lt__(self, other): ...

def __gt__(self, other):
...
def __gt__(self, other): ...


S = TypeVar("S")
Expand Down
12 changes: 4 additions & 8 deletions src/spox/_scope.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,12 +59,10 @@ def __contains__(self, item: Union[str, H]) -> bool:
)

@overload
def __getitem__(self, item: H) -> str:
...
def __getitem__(self, item: H) -> str: ...

@overload
def __getitem__(self, item: str) -> H:
...
def __getitem__(self, item: str) -> H: ...

def __getitem__(self, item: Union[str, H]):
"""Access the name of an object or an object with a given name in this (or outer) namespace."""
Expand All @@ -76,12 +74,10 @@ def __getitem__(self, item: Union[str, H]):
return self.name_of[item]

@overload
def __setitem__(self, key: str, value: H):
...
def __setitem__(self, key: str, value: H): ...

@overload
def __setitem__(self, key: H, value: str):
...
def __setitem__(self, key: H, value: str): ...

def __setitem__(self, _key, _value):
"""Set the name of an object in exactly this namespace. Both ``[name] = obj`` and ``[obj] = name`` work."""
Expand Down
1 change: 1 addition & 0 deletions src/spox/_standard.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Module implementing a base for standard ONNX operators, which use the functionality of ONNX node-level inference."""

from typing import TYPE_CHECKING, Callable, Dict, Tuple

import numpy
Expand Down
10 changes: 6 additions & 4 deletions src/spox/opset/ai/onnx/ml/v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -1836,12 +1836,14 @@ def tree_ensemble_regressor(
'SUM,' 'MIN,' 'MAX.'
base_values
Attribute.
Base values for classification, added to final class score; the size
must be the same as the classes or can be left unassigned (assumed 0)
Base values for regression, added to final prediction after applying
aggregate_function; the size must be the same as the classes or can be
left unassigned (assumed 0)
base_values_as_tensor
Attribute.
Base values for classification, added to final class score; the size
must be the same as the classes or can be left unassigned (assumed 0)
Base values for regression, added to final prediction after applying
aggregate_function; the size must be the same as the classes or can be
left unassigned (assumed 0)
n_targets
Attribute.
The total number of targets.
Expand Down
9 changes: 5 additions & 4 deletions src/spox/opset/ai/onnx/ml/v4.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ class Attributes(BaseAttributes):
default_float: AttrFloat32
default_int64: AttrInt64
default_string: AttrString
default_tensor: AttrTensor
default_tensor: Optional[AttrTensor]
keys_floats: Optional[AttrFloat32s]
keys_int64s: Optional[AttrInt64s]
keys_strings: Optional[AttrStrings]
Expand Down Expand Up @@ -95,7 +95,7 @@ def label_encoder(
default_float: float = -0.0,
default_int64: int = -1,
default_string: str = "_Unused",
default_tensor: np.ndarray,
default_tensor: Optional[np.ndarray] = None,
keys_floats: Optional[Iterable[float]] = None,
keys_int64s: Optional[Iterable[int]] = None,
keys_strings: Optional[Iterable[str]] = None,
Expand Down Expand Up @@ -144,7 +144,8 @@ def label_encoder(
A string.
default_tensor
Attribute.
A default tensor.
A default tensor. {"*Unused"} if values*\ \* has string type, {-1} if
values\_\* has integral type, and {-0.f} if values\_\* has float type.
keys_floats
Attribute.
A list of floats.
Expand Down Expand Up @@ -192,7 +193,7 @@ def label_encoder(
default_float=AttrFloat32(default_float, name="default_float"),
default_int64=AttrInt64(default_int64, name="default_int64"),
default_string=AttrString(default_string, name="default_string"),
default_tensor=AttrTensor(default_tensor, name="default_tensor"),
default_tensor=AttrTensor.maybe(default_tensor, name="default_tensor"),
keys_floats=AttrFloat32s.maybe(keys_floats, name="keys_floats"),
keys_int64s=AttrInt64s.maybe(keys_int64s, name="keys_int64s"),
keys_strings=AttrStrings.maybe(keys_strings, name="keys_strings"),
Expand Down
36 changes: 21 additions & 15 deletions src/spox/opset/ai/onnx/v17.py
Original file line number Diff line number Diff line change
Expand Up @@ -4615,7 +4615,7 @@ def batch_normalization(
training_mode
Attribute.
If set to true, it indicates BatchNormalization is being used for
training, and outputs 1, 2, 3, and 4 would be populated.
training, and outputs 1 and 2 are to be computed.
Returns
=======
Expand Down Expand Up @@ -8532,7 +8532,11 @@ def layer_normalization(
indicate the i-th dimension of ``X``. If ``X``'s shape is
``[d[0], ..., d[axis-1], d[axis], ..., d[rank-1]]``, the shape of
``Mean`` and ``InvStdDev`` is ``[d[0], ..., d[axis-1], 1, ..., 1]``.
``Y`` and ``X`` have the same shape.
``Y`` and ``X`` have the same shape. This operator supports
unidirectional broadcasting (tensors ``Scale`` and ``B`` should be
unidirectional broadcastable to tensor ``X``); for more details please
check `the
doc <https://github.com/onnx/onnx/blob/main/docs/Broadcasting.md>`__.
Parameters
==========
Expand Down Expand Up @@ -9302,7 +9306,8 @@ def max_pool(
output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1)
if ceil_mode is enabled. ``pad_shape[i]`` is the sum of pads along axis
``i``.
``i``. Sliding windows that would start in the right padded region are
ignored.
``auto_pad`` is a DEPRECATED attribute. If you are using them currently,
the output spatial shape will be following when ceil_mode is enabled:
Expand Down Expand Up @@ -14978,16 +14983,16 @@ def top_k(
) -> Tuple[Var, Var]:
r"""
Retrieve the top-K largest or smallest elements along a specified axis.
Given an input tensor of shape [a_1, a_2, ..., a_n, r] and integer
Given an input tensor of shape [a_0, a_1, ..., a\_{n-1}] and integer
argument k, return two outputs:
- Value tensor of shape [a_1, a_2, ..., a\_{axis-1}, k, a\_{axis+1},
... a_n] which contains the values of the top k elements along the
specified axis
- Value tensor of shape [a_0, a_1, ..., a\_{axis-1}, k, a\_{axis+1},
... a\_{n-1}] which contains the values of the top k elements along
the specified axis
- Index tensor of shape [a_1, a_2, ..., a\_{axis-1}, k, a\_{axis+1},
... a_n] which contains the indices of the top k elements (original
indices from the input tensor).
- Index tensor of shape [a_0, a_1, ..., a\_{axis-1}, k, a\_{axis+1},
... a\_{n-1}] which contains the indices of the top k elements
(original indices from the input tensor).
- If "largest" is 1 (the default value) then the k largest elements are
returned.
Expand All @@ -15006,7 +15011,7 @@ def top_k(
==========
X
Type T.
Tensor of shape [a_1, a_2, ..., a_n, r]
Tensor of shape [a_0, a_1, ..., a\_{n-1}]
K
Type tensor(int64).
A 1-D tensor containing a single positive value corresponding to the
Expand All @@ -15027,12 +15032,13 @@ def top_k(
=======
Values : Var
Type T.
Tensor of shape [a_1, a_2, ..., a\_{axis-1}, k, a\_{axis+1}, ... a_n]
containing top K values from the input tensor
Tensor of shape [a_0, a_1, ..., a\_{axis-1}, k, a\_{axis+1}, ...
a\_{n-1}] containing top K values from the input tensor
Indices : Var
Type I.
Tensor of shape [a_1, a_2, ..., a\_{axis-1}, k, a\_{axis+1}, ... a_n]
containing the corresponding input tensor indices for the top K values.
Tensor of shape [a_0, a_1, ..., a\_{axis-1}, k, a\_{axis+1}, ...
a\_{n-1}] containing the corresponding input tensor indices for the top
K values.
Notes
=====
Expand Down
3 changes: 2 additions & 1 deletion src/spox/opset/ai/onnx/v19.py
Original file line number Diff line number Diff line change
Expand Up @@ -821,7 +821,8 @@ def average_pool(
output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1)
if ceil_mode is enabled. ``pad_shape[i]`` is the sum of pads along axis
``i``.
``i``. Sliding windows that would start in the right padded region are
ignored.
``auto_pad`` is a DEPRECATED attribute. If you are using them currently,
the output spatial shape will be following when ceil_mode is enabled:
Expand Down
1 change: 1 addition & 0 deletions tests/test_custom_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
for the respective fields ``attrs/inputs/outputs`` and ``infer_output_types`` will be useful as well.
Of these, ``propagate_values`` is probably least common.
"""

from dataclasses import dataclass
from typing import Dict

Expand Down

0 comments on commit 47b13ed

Please sign in to comment.