diff --git a/src/spox/_fields.py b/src/spox/_fields.py index 1286b55..03ae741 100644 --- a/src/spox/_fields.py +++ b/src/spox/_fields.py @@ -7,6 +7,7 @@ from collections.abc import Iterable, Iterator, Sequence from dataclasses import dataclass from typing import Any, Optional, Union +from typing_extensions import Self from ._attributes import Attr from ._exceptions import InferenceWarning @@ -190,7 +191,6 @@ def vars(self, prop_values) -> Vars: return self.Vars(**vars_structure) - @dataclass class BaseOutputs(BaseVarInfos, metaclass=BaseVarsMeta): @dataclass diff --git a/src/spox/_function.py b/src/spox/_function.py index 16a106b..9865411 100644 --- a/src/spox/_function.py +++ b/src/spox/_function.py @@ -61,7 +61,7 @@ def constructor(self, attrs, inputs): f"Function {type(self).__name__} does not implement a constructor." ) - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: from . import _graph func_args_var = _graph.arguments_dict( @@ -147,7 +147,7 @@ class Attributes(BaseAttributes): op_type = OpType(name, domain, version) def constructor(self, attrs, inputs): - return self.Outputs(*fun(*inputs.get_fields().values())) + return self.Outputs(*unwrap_vars(fun(*wrap_vars(inputs.get_fields().values())))) return _Func @@ -192,11 +192,12 @@ def init(*args: Var): def alt_fun(*args: Var) -> Iterable[Var]: cls = init(*args) - return wrap_vars( - cls(cls.Attributes(), cls.Inputs(*unwrap_vars(args))) + return [ + Var(var_info) + for var_info in cls(cls.Attributes(), cls.Inputs(*unwrap_vars(args))) .outputs.get_fields() .values() - ) + ] return alt_fun # type: ignore diff --git a/src/spox/_inline.py b/src/spox/_inline.py index b8bf407..6c0356f 100644 --- a/src/spox/_inline.py +++ b/src/spox/_inline.py @@ -111,7 +111,7 @@ def opset_req(self) -> set[tuple[str, int]]: ("", INTERNAL_MIN_OPSET) } - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: # First, type check that we match the ModelProto type requirements for i, var in zip(self.graph.input, self.inputs.inputs): if var.type is not None and not ( diff --git a/src/spox/_internal_op.py b/src/spox/_internal_op.py index 835c451..7ed71b0 100644 --- a/src/spox/_internal_op.py +++ b/src/spox/_internal_op.py @@ -88,7 +88,7 @@ def post_init(self, **kwargs): if self.attrs.name is not None: self.outputs.arg._rename(self.attrs.name.value) - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: # Output type is based on the value of the type attribute return {"arg": self.attrs.type.value} @@ -121,7 +121,7 @@ class Outputs(BaseOutputs): inputs: BaseInputs outputs: Outputs - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: # Output type is based on the value of the type attribute arr = self.attrs.value.value return {"arg": Tensor(arr.dtype, arr.shape)} @@ -161,7 +161,7 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: return { f"outputs_{i}": arr.type for i, arr in enumerate(self.inputs.inputs) diff --git a/src/spox/_node.py b/src/spox/_node.py index 85d0ba3..de40b7d 100644 --- a/src/spox/_node.py +++ b/src/spox/_node.py @@ -95,7 +95,7 @@ def __init__( out_variadic: Optional[int] = None, infer_types: bool = True, validate: bool = True, - initializers=[], + initializers={}, **kwargs, ): """ @@ -127,7 +127,7 @@ def __init__( # As inference functions may access which output vars we initialized (e.g. variadics) # we inject uninitialized vars first self.outputs = self._init_output_vars() - self.inference(infer_types) + self.inference(infer_types, initializers) else: self.outputs = outputs @@ -215,7 +215,7 @@ def propagate_values(self, initializers) -> dict[str, PropValueType]: """ return {} - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers) -> dict[str, Type]: """ Inference routine for output types. Often overriden by inheriting Node types. @@ -223,10 +223,10 @@ def infer_output_types(self) -> dict[str, Type]: """ return {} - def inference(self, infer_types: bool = True): + def inference(self, infer_types: bool = True, initializers={}): # Type inference routine - call infer_output_types if required # and check if it provides the expected outputs. - out_types = self.infer_output_types() if infer_types else {} + out_types = self.infer_output_types(initializers=initializers) if infer_types else {} for key, var in self.outputs.get_vars().items(): if var.type is None: # If no existing type from init_output_vars diff --git a/src/spox/_standard.py b/src/spox/_standard.py index 531d7af..9767fb4 100644 --- a/src/spox/_standard.py +++ b/src/spox/_standard.py @@ -6,9 +6,11 @@ from typing import TYPE_CHECKING, Callable import onnx +from onnx.numpy_helper import from_array import onnx.reference import onnx.shape_inference from onnx.defs import OpSchema +import numpy as np from . import _value_prop from ._exceptions import InferenceError @@ -18,6 +20,7 @@ from ._shape import SimpleShape from ._type_system import Optional, Sequence, Tensor, Type from ._value_prop import PropValueType +from ._utils import from_array if TYPE_CHECKING: from ._graph import Graph @@ -48,7 +51,7 @@ def min_output(self) -> int: return self.schema.min_output def to_singleton_onnx_model( - self, *, dummy_outputs: bool = True, with_dummy_subgraphs: bool = True + self, *, dummy_outputs: bool = True, with_dummy_subgraphs: bool = True, prop_values={} ) -> tuple[onnx.ModelProto, Scope]: """ Build a singleton model consisting of just this StandardNode. Used for type inference. @@ -97,7 +100,11 @@ def out_value_info(curr_key, curr_var): ] # Initializers, passed in to allow partial data propagation # - used so that operators like Reshape are aware of constant shapes - initializers = [] + initializers = [ + from_array(prop.value, name) # type: ignore + for name, prop in prop_values.items() + if prop is not None and isinstance(prop.value, np.ndarray) + ] # Graph and model graph = onnx.helper.make_graph( [node_proto], @@ -117,13 +124,13 @@ def out_value_info(curr_key, curr_var): ) return model, scope - def infer_output_types_onnx(self) -> dict[str, Type]: + def infer_output_types_onnx(self, initializers={}) -> dict[str, Type]: """Execute type & shape inference with ``onnx.shape_inference.infer_node_outputs``.""" # Check that all (specified) inputs have known types, as otherwise we fail if any(var.type is None for var in self.inputs.get_vars().values()): return {} - model, _ = self.to_singleton_onnx_model() + model, _ = self.to_singleton_onnx_model(prop_values=initializers) # Attempt to do shape inference - if an error is caught, we extend the traceback a bit try: @@ -161,7 +168,7 @@ def propagate_values_onnx(self, initializers) -> dict[str, PropValueType]: if next(iter(self.subgraphs), None) is not None: # Cannot do propagation with subgraphs implicitly for performance - should be reimplemented return {} - model, scope = self.to_singleton_onnx_model(with_dummy_subgraphs=False) + model, scope = self.to_singleton_onnx_model(with_dummy_subgraphs=False, prop_values=initializers) wrap_feed, run, unwrap_feed = _value_prop.get_backend_calls() input_feed = { scope.var[var_info]: wrap_feed(initializers[name]) @@ -179,8 +186,8 @@ def propagate_values_onnx(self, initializers) -> dict[str, PropValueType]: } return {k: v for k, v in results.items() if k is not None} - def infer_output_types(self) -> dict[str, Type]: - return self.infer_output_types_onnx() + def infer_output_types(self, initializers={}) -> dict[str, Type]: + return self.infer_output_types_onnx(initializers) def propagate_values(self, initializers) -> dict[str, PropValueType]: if _value_prop._VALUE_PROP_BACKEND != _value_prop.ValuePropBackend.NONE: diff --git a/src/spox/opset/ai/onnx/ml/v3.py b/src/spox/opset/ai/onnx/ml/v3.py index c80ce52..22fac5a 100644 --- a/src/spox/opset/ai/onnx/ml/v3.py +++ b/src/spox/opset/ai/onnx/ml/v3.py @@ -1,29 +1,39 @@ -# Copyright (c) QuantCo 2023-2024 -# SPDX-License-Identifier: BSD-3-Clause - # ruff: noqa: E741 -- Allow ambiguous variable name -from collections.abc import Iterable, Sequence +import typing +import warnings from dataclasses import dataclass +from collections.abc import Iterable, Sequence from typing import ( + Any, + Callable, Optional, + Union, ) +from typing import cast as typing_cast import numpy as np +import numpy.typing as npt +from spox._var import Var, VarInfo, result_type, unwrap_vars, get_value +from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._attributes import ( + AttrDtype, AttrFloat32, AttrFloat32s, + AttrGraph, AttrInt64, AttrInt64s, AttrString, AttrStrings, AttrTensor, + AttrType, ) -from spox._fields import BaseAttributes, BaseInputs, BaseOutputs +from spox._graph import Graph, subgraph +from spox._internal_op import intro from spox._node import OpType from spox._standard import InferenceError, StandardNode -from spox._type_system import Tensor, Type -from spox._var import Var, VarInfo, get_value, unwrap_vars +from spox._type_system import Tensor, Type, Sequence as SpoxSequence +from spox._value_prop import PropValueType class _ArrayFeatureExtractor(StandardNode): @@ -40,7 +50,7 @@ class Inputs(BaseInputs): class Outputs(BaseOutputs): Z: VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: if not self.inputs.fully_typed: return {} xt, yt = self.inputs.X.unwrap_tensor(), self.inputs.Y.unwrap_tensor() @@ -54,14 +64,12 @@ def infer_output_types(self) -> dict[str, Type]: return {"Z": Tensor(xt.dtype, (1, yt.shape[-1]))} shape = tuple(list(xt.shape[:-1]) + [yt.shape[-1]]) # type: ignore return {"Z": Tensor(xt.dtype, shape)} - op_type = OpType("ArrayFeatureExtractor", "ai.onnx.ml", 1) attrs: Attributes inputs: Inputs outputs: Outputs - class _Binarizer(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -75,16 +83,14 @@ class Inputs(BaseInputs): class Outputs(BaseOutputs): Y: VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: return {"Y": self.inputs.X.type} if self.inputs.X.type is not None else {} - op_type = OpType("Binarizer", "ai.onnx.ml", 1) attrs: Attributes inputs: Inputs outputs: Outputs - class _CastMap(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -106,7 +112,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _CategoryMapper(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -123,7 +128,7 @@ class Inputs(BaseInputs): class Outputs(BaseOutputs): Y: VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: if not self.inputs.fully_typed: return {} cats1, cats2 = self.attrs.cats_int64s, self.attrs.cats_strings @@ -134,14 +139,12 @@ def infer_output_types(self) -> dict[str, Type]: t = self.inputs.X.unwrap_tensor() (elem_type,) = {np.int64, np.str_} - {t.dtype.type} # type: ignore return {"Y": Tensor(elem_type, t.shape)} - op_type = OpType("CategoryMapper", "ai.onnx.ml", 1) attrs: Attributes inputs: Inputs outputs: Outputs - class _DictVectorizer(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -162,7 +165,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _FeatureVectorizer(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -182,7 +184,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Imputer(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -199,26 +200,18 @@ class Inputs(BaseInputs): class Outputs(BaseOutputs): Y: VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: if not self.inputs.fully_typed: return {} t = self.inputs.X.unwrap_tensor() # We verify if the attributes are set correctly and matching the input elem type cases = { - np.int64: ( - self.attrs.imputed_value_int64s, - self.attrs.replaced_value_int64, - ), - np.float32: ( - self.attrs.imputed_value_floats, - self.attrs.replaced_value_float, - ), + np.int64: (self.attrs.imputed_value_int64s, self.attrs.replaced_value_int64), + np.float32: (self.attrs.imputed_value_floats, self.attrs.replaced_value_float) } for key, (imp, rep) in cases.items(): if t.dtype.type is key: - if not all( - imp1 is None for key1, (imp1, rep1) in cases.items() if key != key1 - ): + if not all(imp1 is None for key1, (imp1, rep1) in cases.items() if key != key1): raise InferenceError("Only one input imputed type may be set.") break else: @@ -229,18 +222,14 @@ def infer_output_types(self) -> dict[str, Type]: sim = t.shape last = sim[-1] if sim else 1 if isinstance(last, int) and len(imp.value) not in {1, last}: - raise InferenceError( - f"Mismatched expected ({len(imp.value)}) and actual ({last}) feature count." - ) + raise InferenceError(f"Mismatched expected ({len(imp.value)}) and actual ({last}) feature count.") return {"Y": t} - op_type = OpType("Imputer", "ai.onnx.ml", 1) attrs: Attributes inputs: Inputs outputs: Outputs - class _LabelEncoder(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -268,7 +257,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _LinearClassifier(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -294,7 +282,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _LinearRegressor(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -311,7 +298,7 @@ class Inputs(BaseInputs): class Outputs(BaseOutputs): Y: VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: if not self.inputs.fully_typed: return {} sim = self.inputs.X.unwrap_tensor().shape @@ -324,14 +311,12 @@ def infer_output_types(self) -> dict[str, Type]: return {"Y": Tensor(np.float32, (1, 1))} else: raise InferenceError("Input shape must be at most a matrix.") - op_type = OpType("LinearRegressor", "ai.onnx.ml", 1) attrs: Attributes inputs: Inputs outputs: Outputs - class _Normalizer(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -345,20 +330,16 @@ class Inputs(BaseInputs): class Outputs(BaseOutputs): Y: VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: if self.attrs.norm.value not in ("MAX", "L1", "L2"): - raise InferenceError( - f"Unknown normalisation method `{self.attrs.norm.value}`" - ) + raise InferenceError(f"Unknown normalisation method `{self.attrs.norm.value}`") return {"Y": self.inputs.X.type} if self.inputs.X.type is not None else {} - op_type = OpType("Normalizer", "ai.onnx.ml", 1) attrs: Attributes inputs: Inputs outputs: Outputs - class _OneHotEncoder(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -374,7 +355,7 @@ class Inputs(BaseInputs): class Outputs(BaseOutputs): Y: VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: if not self.inputs.fully_typed: return {} if self.attrs.cats_int64s: @@ -386,15 +367,15 @@ def infer_output_types(self) -> dict[str, Type]: "Either `cats_int64s` or `cats_strings` attributes must be set." ) shape = (*self.inputs.X.unwrap_tensor().shape, n_encodings) # type: ignore - return {"Y": Tensor(dtype=np.float32, shape=shape)} - + return { + "Y": Tensor(dtype=np.float32, shape=shape) + } op_type = OpType("OneHotEncoder", "ai.onnx.ml", 1) attrs: Attributes inputs: Inputs outputs: Outputs - class _SVMClassifier(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -425,7 +406,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _SVMRegressor(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -452,7 +432,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Scaler(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -467,7 +446,7 @@ class Inputs(BaseInputs): class Outputs(BaseOutputs): Y: VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: if self.inputs.X.type is None: return {} sc, off = self.attrs.scale, self.attrs.offset @@ -477,22 +456,16 @@ def infer_output_types(self) -> dict[str, Type]: # If the number of features is known (last row, we can check this here) last = t.shape[-1] if t.shape else 1 if isinstance(last, int) and len(sc.value) not in {1, last}: - raise InferenceError( - f"Mismatched expected ({len(sc.value)}) and actual ({last}) feature count for scale." - ) + raise InferenceError(f"Mismatched expected ({len(sc.value)}) and actual ({last}) feature count for scale.") if isinstance(last, int) and len(off.value) not in {1, last}: - raise InferenceError( - f"Mismatched expected ({len(off.value)}) and actual ({last}) feature count for offset." - ) + raise InferenceError(f"Mismatched expected ({len(off.value)}) and actual ({last}) feature count for offset.") return {"Y": Tensor(np.float32, t.shape)} - op_type = OpType("Scaler", "ai.onnx.ml", 1) attrs: Attributes inputs: Inputs outputs: Outputs - class _TreeEnsembleClassifier(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -527,7 +500,7 @@ class Outputs(BaseOutputs): Y: VarInfo Z: VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: e = ( len(self.attrs.class_ids.value) if self.attrs.class_ids is not None @@ -543,21 +516,19 @@ def infer_output_types(self) -> dict[str, Type]: ) if self.inputs.fully_typed: shape = self.inputs.X.unwrap_tensor().shape - assert shape is not None # already checked with fully_typed + assert shape is not None # already checked with fully_typed if len(shape) != 2: raise InferenceError("Expected input to be a matrix.") n = shape[0] else: n = None return {"Y": Tensor(y_type, (n,)), "Z": Tensor(np.float32, (n, e))} - op_type = OpType("TreeEnsembleClassifier", "ai.onnx.ml", 3) attrs: Attributes inputs: Inputs outputs: Outputs - class _TreeEnsembleRegressor(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -591,7 +562,7 @@ class Inputs(BaseInputs): class Outputs(BaseOutputs): Y: VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: if self.inputs.fully_typed: shape = self.inputs.X.unwrap_tensor().shape assert shape is not None # already checked with fully_typed @@ -603,14 +574,12 @@ def infer_output_types(self) -> dict[str, Type]: n = None e = self.attrs.n_targets.value if self.attrs.n_targets is not None else None return {"Y": Tensor(np.float32, (n, e))} - op_type = OpType("TreeEnsembleRegressor", "ai.onnx.ml", 3) attrs: Attributes inputs: Inputs outputs: Outputs - class _ZipMap(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -631,1518 +600,1137 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs +def array_feature_extractor(X: Var, Y: Var, ) -> Var: + r""" +Select elements of the input tensor based on the indices passed. The +indices are applied to the last axes of the tensor. + +Parameters +========== +X + Type T. + Data to be selected +Y + Type tensor(int64). + The indices, based on 0 as the first index of any dimension. + +Returns +======= +Z : Var + Type T. + Selected output data as an array + +Notes +===== +Signature: ``ai.onnx.ml@1::ArrayFeatureExtractor``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)`, `tensor(string)` + """ + return _ArrayFeatureExtractor( + _ArrayFeatureExtractor.Attributes( + ), _ArrayFeatureExtractor.Inputs( + X=unwrap_vars(X), Y=unwrap_vars(Y), ), ).get_output_vars( + X=get_value(X), Y=get_value(Y), ).Z + -def array_feature_extractor( - X: Var, - Y: Var, -) -> Var: +def binarizer(X: Var, *, threshold: float = 0.0, ) -> Var: r""" - Select elements of the input tensor based on the indices passed. The - indices are applied to the last axes of the tensor. - - Parameters - ========== - X - Type T. - Data to be selected - Y - Type tensor(int64). - The indices, based on 0 as the first index of any dimension. - - Returns - ======= - Z : Var - Type T. - Selected output data as an array - - Notes - ===== - Signature: ``ai.onnx.ml@1::ArrayFeatureExtractor``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)`, `tensor(string)` +Maps the values of the input tensor to either 0 or 1, element-wise, +based on the outcome of a comparison against a threshold value. + +Parameters +========== +X + Type T. + Data to be binarized +threshold + Attribute. + Values greater than this are mapped to 1, others to 0. + +Returns +======= +Y : Var + Type T. + Binarized output data + +Notes +===== +Signature: ``ai.onnx.ml@1::Binarizer``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return ( - _ArrayFeatureExtractor( - _ArrayFeatureExtractor.Attributes(), - _ArrayFeatureExtractor.Inputs( - X=unwrap_vars(X), - Y=unwrap_vars(Y), - ), - ) - .get_output_vars( - X=get_value(X), - Y=get_value(Y), - ) - .Z - ) + return _Binarizer( + _Binarizer.Attributes( + threshold=AttrFloat32(threshold, name="threshold"), + ), _Binarizer.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y -def binarizer( - X: Var, - *, - threshold: float = 0.0, -) -> Var: +def cast_map(X: Var, *, cast_to: str = "TO_FLOAT", map_form: str = "DENSE", max_map: int = 1, ) -> Var: r""" - Maps the values of the input tensor to either 0 or 1, element-wise, - based on the outcome of a comparison against a threshold value. - - Parameters - ========== - X - Type T. - Data to be binarized - threshold - Attribute. - Values greater than this are mapped to 1, others to 0. - - Returns - ======= - Y : Var - Type T. - Binarized output data - - Notes - ===== - Signature: ``ai.onnx.ml@1::Binarizer``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` +Converts a map to a tensor.The map key must be an int64 and the values +will be ordered in ascending order based on this key.The operator +supports dense packing or sparse packing. If using sparse packing, the +key cannot exceed the max_map-1 value. + +Parameters +========== +X + Type T1. + The input map that is to be cast to a tensor +cast_to + Attribute. + A string indicating the desired element type of the output tensor, one + of 'TO_FLOAT', 'TO_STRING', 'TO_INT64'. +map_form + Attribute. + Indicates whether to only output as many values as are in the input + (dense), or position the input based on using the key of the map as the + index of the output (sparse).One of 'DENSE', 'SPARSE'. +max_map + Attribute. + If the value of map_form is 'SPARSE,' this attribute indicates the total + length of the output tensor. + +Returns +======= +Y : Var + Type T2. + A tensor representing the same data as the input map, ordered by their + keys + +Notes +===== +Signature: ``ai.onnx.ml@1::CastMap``. + +Type constraints: + - T1: `map(int64,tensor(float))`, `map(int64,tensor(string))` + - T2: `tensor(float)`, `tensor(int64)`, `tensor(string)` """ - return ( - _Binarizer( - _Binarizer.Attributes( - threshold=AttrFloat32(threshold, name="threshold"), - ), - _Binarizer.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) + return _CastMap( + _CastMap.Attributes( + cast_to=AttrString(cast_to, name="cast_to"), + map_form=AttrString(map_form, name="map_form"), + max_map=AttrInt64(max_map, name="max_map"), + ), _CastMap.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y -def cast_map( - X: Var, - *, - cast_to: str = "TO_FLOAT", - map_form: str = "DENSE", - max_map: int = 1, -) -> Var: +def category_mapper(X: Var, *, cats_int64s: Optional[Iterable[int]] = None, cats_strings: Optional[Iterable[str]] = None, default_int64: int = -1, default_string: str = "_Unused", ) -> Var: r""" - Converts a map to a tensor.The map key must be an int64 and the values - will be ordered in ascending order based on this key.The operator - supports dense packing or sparse packing. If using sparse packing, the - key cannot exceed the max_map-1 value. - - Parameters - ========== - X - Type T1. - The input map that is to be cast to a tensor - cast_to - Attribute. - A string indicating the desired element type of the output tensor, one - of 'TO_FLOAT', 'TO_STRING', 'TO_INT64'. - map_form - Attribute. - Indicates whether to only output as many values as are in the input - (dense), or position the input based on using the key of the map as the - index of the output (sparse).One of 'DENSE', 'SPARSE'. - max_map - Attribute. - If the value of map_form is 'SPARSE,' this attribute indicates the total - length of the output tensor. - - Returns - ======= - Y : Var - Type T2. - A tensor representing the same data as the input map, ordered by their - keys - - Notes - ===== - Signature: ``ai.onnx.ml@1::CastMap``. - - Type constraints: - - T1: `map(int64,tensor(float))`, `map(int64,tensor(string))` - - T2: `tensor(float)`, `tensor(int64)`, `tensor(string)` +Converts strings to integers and vice versa. Two sequences of equal +length are used to map between integers and strings, with strings and +integers at the same index detailing the mapping. Each operator converts +either integers to strings or strings to integers, depending on which +default value attribute is provided. Only one default value attribute +should be defined. If the string default value is set, it will convert +integers to strings. If the int default value is set, it will convert +strings to integers. + +Parameters +========== +X + Type T1. + Input data +cats_int64s + Attribute. + The integers of the map. This sequence must be the same length as the + 'cats_strings' sequence. +cats_strings + Attribute. + The strings of the map. This sequence must be the same length as the + 'cats_int64s' sequence +default_int64 + Attribute. + An integer to use when an input string value is not found in the map.One + and only one of the 'default\_\*' attributes must be defined. +default_string + Attribute. + A string to use when an input integer value is not found in the map.One + and only one of the 'default\_\*' attributes must be defined. + +Returns +======= +Y : Var + Type T2. + Output data. If strings are input, the output values are integers, and + vice versa. + +Notes +===== +Signature: ``ai.onnx.ml@1::CategoryMapper``. + +Type constraints: + - T1: `tensor(int64)`, `tensor(string)` + - T2: `tensor(int64)`, `tensor(string)` """ - return ( - _CastMap( - _CastMap.Attributes( - cast_to=AttrString(cast_to, name="cast_to"), - map_form=AttrString(map_form, name="map_form"), - max_map=AttrInt64(max_map, name="max_map"), - ), - _CastMap.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def category_mapper( - X: Var, - *, - cats_int64s: Optional[Iterable[int]] = None, - cats_strings: Optional[Iterable[str]] = None, - default_int64: int = -1, - default_string: str = "_Unused", -) -> Var: + return _CategoryMapper( + _CategoryMapper.Attributes( + cats_int64s=AttrInt64s.maybe(cats_int64s, name="cats_int64s"), + cats_strings=AttrStrings.maybe(cats_strings, name="cats_strings"), + default_int64=AttrInt64(default_int64, name="default_int64"), + default_string=AttrString(default_string, name="default_string"), + ), _CategoryMapper.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def dict_vectorizer(X: Var, *, int64_vocabulary: Optional[Iterable[int]] = None, string_vocabulary: Optional[Iterable[str]] = None, ) -> Var: r""" - Converts strings to integers and vice versa. Two sequences of equal - length are used to map between integers and strings, with strings and - integers at the same index detailing the mapping. Each operator converts - either integers to strings or strings to integers, depending on which - default value attribute is provided. Only one default value attribute - should be defined. If the string default value is set, it will convert - integers to strings. If the int default value is set, it will convert - strings to integers. - - Parameters - ========== - X - Type T1. - Input data - cats_int64s - Attribute. - The integers of the map. This sequence must be the same length as the - 'cats_strings' sequence. - cats_strings - Attribute. - The strings of the map. This sequence must be the same length as the - 'cats_int64s' sequence - default_int64 - Attribute. - An integer to use when an input string value is not found in the map.One - and only one of the 'default\_\*' attributes must be defined. - default_string - Attribute. - A string to use when an input integer value is not found in the map.One - and only one of the 'default\_\*' attributes must be defined. - - Returns - ======= - Y : Var - Type T2. - Output data. If strings are input, the output values are integers, and - vice versa. - - Notes - ===== - Signature: ``ai.onnx.ml@1::CategoryMapper``. - - Type constraints: - - T1: `tensor(int64)`, `tensor(string)` - - T2: `tensor(int64)`, `tensor(string)` +Uses an index mapping to convert a dictionary to an array. Given a +dictionary, each key is looked up in the vocabulary attribute +corresponding to the key type. The index into the vocabulary array at +which the key is found is then used to index the output 1-D tensor 'Y' +and insert into it the value found in the dictionary 'X'. The key type +of the input map must correspond to the element type of the defined +vocabulary attribute. Therefore, the output array will be equal in +length to the index mapping vector parameter. All keys in the input +dictionary must be present in the index mapping vector. For each item in +the input dictionary, insert its value in the output array. Any keys not +present in the input dictionary, will be zero in the output array. For +example: if the ``string_vocabulary`` parameter is set to +``["a", "c", "b", "z"]``, then an input of ``{"a": 4, "c": 8}`` will +produce an output of ``[4, 8, 0, 0]``. + +Parameters +========== +X + Type T1. + A dictionary. +int64_vocabulary + Attribute. + An integer vocabulary array.One and only one of the vocabularies must be + defined. +string_vocabulary + Attribute. + A string vocabulary array.One and only one of the vocabularies must be + defined. + +Returns +======= +Y : Var + Type T2. + A 1-D tensor holding values from the input dictionary. + +Notes +===== +Signature: ``ai.onnx.ml@1::DictVectorizer``. + +Type constraints: + - T1: `map(int64,tensor(double))`, `map(int64,tensor(float))`, `map(int64,tensor(string))`, `map(string,tensor(double))`, `map(string,tensor(float))`, `map(string,tensor(int64))` + - T2: `tensor(double)`, `tensor(float)`, `tensor(int64)`, `tensor(string)` """ - return ( - _CategoryMapper( - _CategoryMapper.Attributes( - cats_int64s=AttrInt64s.maybe(cats_int64s, name="cats_int64s"), - cats_strings=AttrStrings.maybe(cats_strings, name="cats_strings"), - default_int64=AttrInt64(default_int64, name="default_int64"), - default_string=AttrString(default_string, name="default_string"), - ), - _CategoryMapper.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) + return _DictVectorizer( + _DictVectorizer.Attributes( + int64_vocabulary=AttrInt64s.maybe(int64_vocabulary, name="int64_vocabulary"), + string_vocabulary=AttrStrings.maybe(string_vocabulary, name="string_vocabulary"), + ), _DictVectorizer.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y -def dict_vectorizer( - X: Var, - *, - int64_vocabulary: Optional[Iterable[int]] = None, - string_vocabulary: Optional[Iterable[str]] = None, -) -> Var: +def feature_vectorizer(X: Sequence[Var], *, inputdimensions: Optional[Iterable[int]] = None, ) -> Var: r""" - Uses an index mapping to convert a dictionary to an array. Given a - dictionary, each key is looked up in the vocabulary attribute - corresponding to the key type. The index into the vocabulary array at - which the key is found is then used to index the output 1-D tensor 'Y' - and insert into it the value found in the dictionary 'X'. The key type - of the input map must correspond to the element type of the defined - vocabulary attribute. Therefore, the output array will be equal in - length to the index mapping vector parameter. All keys in the input - dictionary must be present in the index mapping vector. For each item in - the input dictionary, insert its value in the output array. Any keys not - present in the input dictionary, will be zero in the output array. For - example: if the ``string_vocabulary`` parameter is set to - ``["a", "c", "b", "z"]``, then an input of ``{"a": 4, "c": 8}`` will - produce an output of ``[4, 8, 0, 0]``. - - Parameters - ========== - X - Type T1. - A dictionary. - int64_vocabulary - Attribute. - An integer vocabulary array.One and only one of the vocabularies must be - defined. - string_vocabulary - Attribute. - A string vocabulary array.One and only one of the vocabularies must be - defined. - - Returns - ======= - Y : Var - Type T2. - A 1-D tensor holding values from the input dictionary. - - Notes - ===== - Signature: ``ai.onnx.ml@1::DictVectorizer``. - - Type constraints: - - T1: `map(int64,tensor(double))`, `map(int64,tensor(float))`, `map(int64,tensor(string))`, `map(string,tensor(double))`, `map(string,tensor(float))`, `map(string,tensor(int64))` - - T2: `tensor(double)`, `tensor(float)`, `tensor(int64)`, `tensor(string)` +Concatenates input tensors into one continuous output. All input shapes +are 2-D and are concatenated along the second dimension. 1-D tensors are +treated as [1,C]. Inputs are copied to the output maintaining the order +of the input arguments. All inputs must be integers or floats, while the +output will be all floating point values. + +Parameters +========== +X + Type T1. + An ordered collection of tensors, all with the same element type. +inputdimensions + Attribute. + The size of each input in the input list + +Returns +======= +Y : Var + Type tensor(float). + The output array, elements ordered as the inputs. + +Notes +===== +Signature: ``ai.onnx.ml@1::FeatureVectorizer``. + +Type constraints: + - T1: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return ( - _DictVectorizer( - _DictVectorizer.Attributes( - int64_vocabulary=AttrInt64s.maybe( - int64_vocabulary, name="int64_vocabulary" - ), - string_vocabulary=AttrStrings.maybe( - string_vocabulary, name="string_vocabulary" - ), - ), - _DictVectorizer.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) + return _FeatureVectorizer( + _FeatureVectorizer.Attributes( + inputdimensions=AttrInt64s.maybe(inputdimensions, name="inputdimensions"), + ), _FeatureVectorizer.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y -def feature_vectorizer( - X: Sequence[Var], - *, - inputdimensions: Optional[Iterable[int]] = None, -) -> Var: +def imputer(X: Var, *, imputed_value_floats: Optional[Iterable[float]] = None, imputed_value_int64s: Optional[Iterable[int]] = None, replaced_value_float: float = 0.0, replaced_value_int64: int = 0, ) -> Var: r""" - Concatenates input tensors into one continuous output. All input shapes - are 2-D and are concatenated along the second dimension. 1-D tensors are - treated as [1,C]. Inputs are copied to the output maintaining the order - of the input arguments. All inputs must be integers or floats, while the - output will be all floating point values. - - Parameters - ========== - X - Type T1. - An ordered collection of tensors, all with the same element type. - inputdimensions - Attribute. - The size of each input in the input list - - Returns - ======= - Y : Var - Type tensor(float). - The output array, elements ordered as the inputs. - - Notes - ===== - Signature: ``ai.onnx.ml@1::FeatureVectorizer``. - - Type constraints: - - T1: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` +Replaces inputs that equal one value with another, leaving all other +elements alone. This operator is typically used to replace missing +values in situations where they have a canonical representation, such as +-1, 0, NaN, or some extreme value. One and only one of +imputed_value_floats or imputed_value_int64s should be defined -- floats +if the input tensor holds floats, integers if the input tensor holds +integers. The imputed values must all fit within the width of the tensor +element type. One and only one of the replaced_value_float or +replaced_value_int64 should be defined, which one depends on whether +floats or integers are being processed. The imputed_value attribute +length can be 1 element, or it can have one element per input feature.In +other words, if the input tensor has the shape [\*,F], then the length +of the attribute array may be 1 or F. If it is 1, then it is broadcast +along the last dimension and applied to each feature. + +Parameters +========== +X + Type T. + Data to be processed. +imputed_value_floats + Attribute. + Value(s) to change to +imputed_value_int64s + Attribute. + Value(s) to change to. +replaced_value_float + Attribute. + A value that needs replacing. +replaced_value_int64 + Attribute. + A value that needs replacing. + +Returns +======= +Y : Var + Type T. + Imputed output data + +Notes +===== +Signature: ``ai.onnx.ml@1::Imputer``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return ( - _FeatureVectorizer( - _FeatureVectorizer.Attributes( - inputdimensions=AttrInt64s.maybe( - inputdimensions, name="inputdimensions" - ), - ), - _FeatureVectorizer.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def imputer( - X: Var, - *, - imputed_value_floats: Optional[Iterable[float]] = None, - imputed_value_int64s: Optional[Iterable[int]] = None, - replaced_value_float: float = 0.0, - replaced_value_int64: int = 0, -) -> Var: + return _Imputer( + _Imputer.Attributes( + imputed_value_floats=AttrFloat32s.maybe(imputed_value_floats, name="imputed_value_floats"), + imputed_value_int64s=AttrInt64s.maybe(imputed_value_int64s, name="imputed_value_int64s"), + replaced_value_float=AttrFloat32(replaced_value_float, name="replaced_value_float"), + replaced_value_int64=AttrInt64(replaced_value_int64, name="replaced_value_int64"), + ), _Imputer.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def label_encoder(X: Var, *, default_float: float = -0.0, default_int64: int = -1, default_string: str = "_Unused", keys_floats: Optional[Iterable[float]] = None, keys_int64s: Optional[Iterable[int]] = None, keys_strings: Optional[Iterable[str]] = None, values_floats: Optional[Iterable[float]] = None, values_int64s: Optional[Iterable[int]] = None, values_strings: Optional[Iterable[str]] = None, ) -> Var: r""" - Replaces inputs that equal one value with another, leaving all other - elements alone. This operator is typically used to replace missing - values in situations where they have a canonical representation, such as - -1, 0, NaN, or some extreme value. One and only one of - imputed_value_floats or imputed_value_int64s should be defined -- floats - if the input tensor holds floats, integers if the input tensor holds - integers. The imputed values must all fit within the width of the tensor - element type. One and only one of the replaced_value_float or - replaced_value_int64 should be defined, which one depends on whether - floats or integers are being processed. The imputed_value attribute - length can be 1 element, or it can have one element per input feature.In - other words, if the input tensor has the shape [\*,F], then the length - of the attribute array may be 1 or F. If it is 1, then it is broadcast - along the last dimension and applied to each feature. - - Parameters - ========== - X - Type T. - Data to be processed. - imputed_value_floats - Attribute. - Value(s) to change to - imputed_value_int64s - Attribute. - Value(s) to change to. - replaced_value_float - Attribute. - A value that needs replacing. - replaced_value_int64 - Attribute. - A value that needs replacing. - - Returns - ======= - Y : Var - Type T. - Imputed output data - - Notes - ===== - Signature: ``ai.onnx.ml@1::Imputer``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` +Maps each element in the input tensor to another value. The mapping is +determined by the two parallel attributes, 'keys\_\ *' and 'values\_*' +attribute. The i-th value in the specified 'keys\_\ *' attribute would +be mapped to the i-th value in the specified 'values\_*' attribute. It +implies that input's element type and the element type of the specified +'keys\_\ *' should be identical while the output type is identical to +the specified 'values\_*' attribute. If an input element can not be +found in the specified 'keys\_\ *' attribute, the 'default\_*' that +matches the specified 'values\_\ *' attribute may be used as its output +value. Let's consider an example which maps a string tensor to an +integer tensor. Assume and 'keys_strings' is ["Amy", "Sally"], +'values_int64s' is [5, 6], and 'default_int64' is '-1'. The input +["Dori", "Amy", "Amy", "Sally", "Sally"] would be mapped to [-1, 5, 5, +6, 6]. Since this operator is an one-to-one mapping, its input and +output shapes are the same. Notice that only one of +'keys\_*'/'values\_\ *' can be set. For key look-up, bit-wise comparison +is used so even a float NaN can be mapped to a value in 'values\_*' +attribute. + +Parameters +========== +X + Type T1. + Input data. It can be either tensor or scalar. +default_float + Attribute. + A float. +default_int64 + Attribute. + An integer. +default_string + Attribute. + A string. +keys_floats + Attribute. + A list of floats. +keys_int64s + Attribute. + A list of ints. +keys_strings + Attribute. + A list of strings. One and only one of 'keys\_\*'s should be set. +values_floats + Attribute. + A list of floats. +values_int64s + Attribute. + A list of ints. +values_strings + Attribute. + A list of strings. One and only one of 'value\_\*'s should be set. + +Returns +======= +Y : Var + Type T2. + Output data. + +Notes +===== +Signature: ``ai.onnx.ml@2::LabelEncoder``. + +Type constraints: + - T1: `tensor(float)`, `tensor(int64)`, `tensor(string)` + - T2: `tensor(float)`, `tensor(int64)`, `tensor(string)` """ - return ( - _Imputer( - _Imputer.Attributes( - imputed_value_floats=AttrFloat32s.maybe( - imputed_value_floats, name="imputed_value_floats" - ), - imputed_value_int64s=AttrInt64s.maybe( - imputed_value_int64s, name="imputed_value_int64s" - ), - replaced_value_float=AttrFloat32( - replaced_value_float, name="replaced_value_float" - ), - replaced_value_int64=AttrInt64( - replaced_value_int64, name="replaced_value_int64" - ), - ), - _Imputer.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def label_encoder( - X: Var, - *, - default_float: float = -0.0, - default_int64: int = -1, - default_string: str = "_Unused", - keys_floats: Optional[Iterable[float]] = None, - keys_int64s: Optional[Iterable[int]] = None, - keys_strings: Optional[Iterable[str]] = None, - values_floats: Optional[Iterable[float]] = None, - values_int64s: Optional[Iterable[int]] = None, - values_strings: Optional[Iterable[str]] = None, -) -> Var: + return _LabelEncoder( + _LabelEncoder.Attributes( + default_float=AttrFloat32(default_float, name="default_float"), + default_int64=AttrInt64(default_int64, name="default_int64"), + default_string=AttrString(default_string, name="default_string"), + keys_floats=AttrFloat32s.maybe(keys_floats, name="keys_floats"), + keys_int64s=AttrInt64s.maybe(keys_int64s, name="keys_int64s"), + keys_strings=AttrStrings.maybe(keys_strings, name="keys_strings"), + values_floats=AttrFloat32s.maybe(values_floats, name="values_floats"), + values_int64s=AttrInt64s.maybe(values_int64s, name="values_int64s"), + values_strings=AttrStrings.maybe(values_strings, name="values_strings"), + ), _LabelEncoder.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def linear_classifier(X: Var, *, classlabels_ints: Optional[Iterable[int]] = None, classlabels_strings: Optional[Iterable[str]] = None, coefficients: Iterable[float], intercepts: Optional[Iterable[float]] = None, multi_class: int = 0, post_transform: str = "NONE", ) -> tuple[Var, Var]: r""" - Maps each element in the input tensor to another value. The mapping is - determined by the two parallel attributes, 'keys\_\ *' and 'values\_*' - attribute. The i-th value in the specified 'keys\_\ *' attribute would - be mapped to the i-th value in the specified 'values\_*' attribute. It - implies that input's element type and the element type of the specified - 'keys\_\ *' should be identical while the output type is identical to - the specified 'values\_*' attribute. If an input element can not be - found in the specified 'keys\_\ *' attribute, the 'default\_*' that - matches the specified 'values\_\ *' attribute may be used as its output - value. Let's consider an example which maps a string tensor to an - integer tensor. Assume and 'keys_strings' is ["Amy", "Sally"], - 'values_int64s' is [5, 6], and 'default_int64' is '-1'. The input - ["Dori", "Amy", "Amy", "Sally", "Sally"] would be mapped to [-1, 5, 5, - 6, 6]. Since this operator is an one-to-one mapping, its input and - output shapes are the same. Notice that only one of - 'keys\_*'/'values\_\ *' can be set. For key look-up, bit-wise comparison - is used so even a float NaN can be mapped to a value in 'values\_*' - attribute. - - Parameters - ========== - X - Type T1. - Input data. It can be either tensor or scalar. - default_float - Attribute. - A float. - default_int64 - Attribute. - An integer. - default_string - Attribute. - A string. - keys_floats - Attribute. - A list of floats. - keys_int64s - Attribute. - A list of ints. - keys_strings - Attribute. - A list of strings. One and only one of 'keys\_\*'s should be set. - values_floats - Attribute. - A list of floats. - values_int64s - Attribute. - A list of ints. - values_strings - Attribute. - A list of strings. One and only one of 'value\_\*'s should be set. - - Returns - ======= - Y : Var - Type T2. - Output data. - - Notes - ===== - Signature: ``ai.onnx.ml@2::LabelEncoder``. - - Type constraints: - - T1: `tensor(float)`, `tensor(int64)`, `tensor(string)` - - T2: `tensor(float)`, `tensor(int64)`, `tensor(string)` +Linear classifier + +Parameters +========== +X + Type T1. + Data to be classified. +classlabels_ints + Attribute. + Class labels when using integer labels. One and only one 'classlabels' + attribute must be defined. +classlabels_strings + Attribute. + Class labels when using string labels. One and only one 'classlabels' + attribute must be defined. +coefficients + Attribute. + A collection of weights of the model(s). +intercepts + Attribute. + A collection of intercepts. +multi_class + Attribute. + Indicates whether to do OvR or multinomial (0=OvR is the default). +post_transform + Attribute. + Indicates the transform to apply to the scores vector.One of 'NONE,' + 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT' + +Returns +======= +Y : Var + Type T2. + Classification outputs (one class per example). +Z : Var + Type tensor(float). + Classification scores ([N,E] - one score for each class and example + +Notes +===== +Signature: ``ai.onnx.ml@1::LinearClassifier``. + +Type constraints: + - T1: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` + - T2: `tensor(int64)`, `tensor(string)` """ - return ( - _LabelEncoder( - _LabelEncoder.Attributes( - default_float=AttrFloat32(default_float, name="default_float"), - default_int64=AttrInt64(default_int64, name="default_int64"), - default_string=AttrString(default_string, name="default_string"), - keys_floats=AttrFloat32s.maybe(keys_floats, name="keys_floats"), - keys_int64s=AttrInt64s.maybe(keys_int64s, name="keys_int64s"), - keys_strings=AttrStrings.maybe(keys_strings, name="keys_strings"), - values_floats=AttrFloat32s.maybe(values_floats, name="values_floats"), - values_int64s=AttrInt64s.maybe(values_int64s, name="values_int64s"), - values_strings=AttrStrings.maybe(values_strings, name="values_strings"), - ), - _LabelEncoder.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def linear_classifier( - X: Var, - *, - classlabels_ints: Optional[Iterable[int]] = None, - classlabels_strings: Optional[Iterable[str]] = None, - coefficients: Iterable[float], - intercepts: Optional[Iterable[float]] = None, - multi_class: int = 0, - post_transform: str = "NONE", -) -> tuple[Var, Var]: + return _LinearClassifier( + _LinearClassifier.Attributes( + classlabels_ints=AttrInt64s.maybe(classlabels_ints, name="classlabels_ints"), + classlabels_strings=AttrStrings.maybe(classlabels_strings, name="classlabels_strings"), + coefficients=AttrFloat32s(coefficients, name="coefficients"), + intercepts=AttrFloat32s.maybe(intercepts, name="intercepts"), + multi_class=AttrInt64(multi_class, name="multi_class"), + post_transform=AttrString(post_transform, name="post_transform"), + ), _LinearClassifier.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), )._unpack_to_any() + + +def linear_regressor(X: Var, *, coefficients: Optional[Iterable[float]] = None, intercepts: Optional[Iterable[float]] = None, post_transform: str = "NONE", targets: int = 1, ) -> Var: r""" - Linear classifier - - Parameters - ========== - X - Type T1. - Data to be classified. - classlabels_ints - Attribute. - Class labels when using integer labels. One and only one 'classlabels' - attribute must be defined. - classlabels_strings - Attribute. - Class labels when using string labels. One and only one 'classlabels' - attribute must be defined. - coefficients - Attribute. - A collection of weights of the model(s). - intercepts - Attribute. - A collection of intercepts. - multi_class - Attribute. - Indicates whether to do OvR or multinomial (0=OvR is the default). - post_transform - Attribute. - Indicates the transform to apply to the scores vector.One of 'NONE,' - 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT' - - Returns - ======= - Y : Var - Type T2. - Classification outputs (one class per example). - Z : Var - Type tensor(float). - Classification scores ([N,E] - one score for each class and example - - Notes - ===== - Signature: ``ai.onnx.ml@1::LinearClassifier``. - - Type constraints: - - T1: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` - - T2: `tensor(int64)`, `tensor(string)` +Generalized linear regression evaluation. If targets is set to 1 +(default) then univariate regression is performed. If targets is set to +M then M sets of coefficients must be passed in as a sequence and M +results will be output for each input n in N. The coefficients array is +of length n, and the coefficients for each target are contiguous. +Intercepts are optional but if provided must match the number of +targets. + +Parameters +========== +X + Type T. + Data to be regressed. +coefficients + Attribute. + Weights of the model(s). +intercepts + Attribute. + Weights of the intercepts, if used. +post_transform + Attribute. + Indicates the transform to apply to the regression output vector.One of + 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT' +targets + Attribute. + The total number of regression targets, 1 if not defined. + +Returns +======= +Y : Var + Type tensor(float). + Regression outputs (one per target, per example). + +Notes +===== +Signature: ``ai.onnx.ml@1::LinearRegressor``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return ( - _LinearClassifier( - _LinearClassifier.Attributes( - classlabels_ints=AttrInt64s.maybe( - classlabels_ints, name="classlabels_ints" - ), - classlabels_strings=AttrStrings.maybe( - classlabels_strings, name="classlabels_strings" - ), - coefficients=AttrFloat32s(coefficients, name="coefficients"), - intercepts=AttrFloat32s.maybe(intercepts, name="intercepts"), - multi_class=AttrInt64(multi_class, name="multi_class"), - post_transform=AttrString(post_transform, name="post_transform"), - ), - _LinearClassifier.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - ._unpack_to_any() - ) - - -def linear_regressor( - X: Var, - *, - coefficients: Optional[Iterable[float]] = None, - intercepts: Optional[Iterable[float]] = None, - post_transform: str = "NONE", - targets: int = 1, -) -> Var: + return _LinearRegressor( + _LinearRegressor.Attributes( + coefficients=AttrFloat32s.maybe(coefficients, name="coefficients"), + intercepts=AttrFloat32s.maybe(intercepts, name="intercepts"), + post_transform=AttrString(post_transform, name="post_transform"), + targets=AttrInt64(targets, name="targets"), + ), _LinearRegressor.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def normalizer(X: Var, *, norm: str = "MAX", ) -> Var: r""" - Generalized linear regression evaluation. If targets is set to 1 - (default) then univariate regression is performed. If targets is set to - M then M sets of coefficients must be passed in as a sequence and M - results will be output for each input n in N. The coefficients array is - of length n, and the coefficients for each target are contiguous. - Intercepts are optional but if provided must match the number of - targets. - - Parameters - ========== - X - Type T. - Data to be regressed. - coefficients - Attribute. - Weights of the model(s). - intercepts - Attribute. - Weights of the intercepts, if used. - post_transform - Attribute. - Indicates the transform to apply to the regression output vector.One of - 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT' - targets - Attribute. - The total number of regression targets, 1 if not defined. - - Returns - ======= - Y : Var - Type tensor(float). - Regression outputs (one per target, per example). - - Notes - ===== - Signature: ``ai.onnx.ml@1::LinearRegressor``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` +Normalize the input. There are three normalization modes, which have the +corresponding formulas, defined using element-wise infix operators '/' +and '^' and tensor-wide functions 'max' and 'sum': Max: Y = X / max(X) +L1: Y = X / sum(X) L2: Y = sqrt(X^2 / sum(X^2)} In all modes, if the +divisor is zero, Y == X. For batches, that is, [N,C] tensors, +normalization is done along the C axis. In other words, each row of the +batch is normalized independently. + +Parameters +========== +X + Type T. + Data to be encoded, a tensor of shape [N,C] or [C] +norm + Attribute. + One of 'MAX,' 'L1,' 'L2' + +Returns +======= +Y : Var + Type tensor(float). + Encoded output data + +Notes +===== +Signature: ``ai.onnx.ml@1::Normalizer``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return ( - _LinearRegressor( - _LinearRegressor.Attributes( - coefficients=AttrFloat32s.maybe(coefficients, name="coefficients"), - intercepts=AttrFloat32s.maybe(intercepts, name="intercepts"), - post_transform=AttrString(post_transform, name="post_transform"), - targets=AttrInt64(targets, name="targets"), - ), - _LinearRegressor.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) + return _Normalizer( + _Normalizer.Attributes( + norm=AttrString(norm, name="norm"), + ), _Normalizer.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y -def normalizer( - X: Var, - *, - norm: str = "MAX", -) -> Var: +def one_hot_encoder(X: Var, *, cats_int64s: Optional[Iterable[int]] = None, cats_strings: Optional[Iterable[str]] = None, zeros: int = 1, ) -> Var: r""" - Normalize the input. There are three normalization modes, which have the - corresponding formulas, defined using element-wise infix operators '/' - and '^' and tensor-wide functions 'max' and 'sum': Max: Y = X / max(X) - L1: Y = X / sum(X) L2: Y = sqrt(X^2 / sum(X^2)} In all modes, if the - divisor is zero, Y == X. For batches, that is, [N,C] tensors, - normalization is done along the C axis. In other words, each row of the - batch is normalized independently. - - Parameters - ========== - X - Type T. - Data to be encoded, a tensor of shape [N,C] or [C] - norm - Attribute. - One of 'MAX,' 'L1,' 'L2' - - Returns - ======= - Y : Var - Type tensor(float). - Encoded output data - - Notes - ===== - Signature: ``ai.onnx.ml@1::Normalizer``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` +Replace each input element with an array of ones and zeros, where a +single one is placed at the index of the category that was passed in. +The total category count will determine the size of the extra dimension +of the output array Y. For example, if we pass a tensor with a single +value of 4, and a category count of 8, the output will be a tensor with +``[0,0,0,0,1,0,0,0]``. This operator assumes every input feature is from +the same set of categories. If the input is a tensor of float, int32, or +double, the data will be cast to integers and the cats_int64s category +list will be used for the lookups. + +Parameters +========== +X + Type T. + Data to be encoded. +cats_int64s + Attribute. + List of categories, ints.One and only one of the 'cats\_\*' attributes + must be defined. +cats_strings + Attribute. + List of categories, strings.One and only one of the 'cats\_\*' + attributes must be defined. +zeros + Attribute. + If true and category is not present, will return all zeros; if false and + a category if not found, the operator will fail. + +Returns +======= +Y : Var + Type tensor(float). + Encoded output data, having one more dimension than X. + +Notes +===== +Signature: ``ai.onnx.ml@1::OneHotEncoder``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)`, `tensor(string)` """ - return ( - _Normalizer( - _Normalizer.Attributes( - norm=AttrString(norm, name="norm"), - ), - _Normalizer.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) + return _OneHotEncoder( + _OneHotEncoder.Attributes( + cats_int64s=AttrInt64s.maybe(cats_int64s, name="cats_int64s"), + cats_strings=AttrStrings.maybe(cats_strings, name="cats_strings"), + zeros=AttrInt64(zeros, name="zeros"), + ), _OneHotEncoder.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y -def one_hot_encoder( - X: Var, - *, - cats_int64s: Optional[Iterable[int]] = None, - cats_strings: Optional[Iterable[str]] = None, - zeros: int = 1, -) -> Var: +def svmclassifier(X: Var, *, classlabels_ints: Optional[Iterable[int]] = None, classlabels_strings: Optional[Iterable[str]] = None, coefficients: Optional[Iterable[float]] = None, kernel_params: Optional[Iterable[float]] = None, kernel_type: str = "LINEAR", post_transform: str = "NONE", prob_a: Optional[Iterable[float]] = None, prob_b: Optional[Iterable[float]] = None, rho: Optional[Iterable[float]] = None, support_vectors: Optional[Iterable[float]] = None, vectors_per_class: Optional[Iterable[int]] = None, ) -> tuple[Var, Var]: r""" - Replace each input element with an array of ones and zeros, where a - single one is placed at the index of the category that was passed in. - The total category count will determine the size of the extra dimension - of the output array Y. For example, if we pass a tensor with a single - value of 4, and a category count of 8, the output will be a tensor with - ``[0,0,0,0,1,0,0,0]``. This operator assumes every input feature is from - the same set of categories. If the input is a tensor of float, int32, or - double, the data will be cast to integers and the cats_int64s category - list will be used for the lookups. - - Parameters - ========== - X - Type T. - Data to be encoded. - cats_int64s - Attribute. - List of categories, ints.One and only one of the 'cats\_\*' attributes - must be defined. - cats_strings - Attribute. - List of categories, strings.One and only one of the 'cats\_\*' - attributes must be defined. - zeros - Attribute. - If true and category is not present, will return all zeros; if false and - a category if not found, the operator will fail. - - Returns - ======= - Y : Var - Type tensor(float). - Encoded output data, having one more dimension than X. - - Notes - ===== - Signature: ``ai.onnx.ml@1::OneHotEncoder``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)`, `tensor(string)` +Support Vector Machine classifier + +Parameters +========== +X + Type T1. + Data to be classified. +classlabels_ints + Attribute. + Class labels if using integer labels.One and only one of the + 'classlabels\_\*' attributes must be defined. +classlabels_strings + Attribute. + Class labels if using string labels.One and only one of the + 'classlabels\_\*' attributes must be defined. +coefficients + Attribute. + +kernel_params + Attribute. + List of 3 elements containing gamma, coef0, and degree, in that order. + Zero if unused for the kernel. +kernel_type + Attribute. + The kernel type, one of 'LINEAR,' 'POLY,' 'RBF,' 'SIGMOID'. +post_transform + Attribute. + Indicates the transform to apply to the score. One of 'NONE,' 'SOFTMAX,' + 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT' +prob_a + Attribute. + First set of probability coefficients. +prob_b + Attribute. + Second set of probability coefficients. This array must be same size as + prob_a.If these are provided then output Z are probability estimates, + otherwise they are raw scores. +rho + Attribute. + +support_vectors + Attribute. + +vectors_per_class + Attribute. + + +Returns +======= +Y : Var + Type T2. + Classification outputs (one class per example). +Z : Var + Type tensor(float). + Class scores (one per class per example), if prob_a and prob_b are + provided they are probabilities for each class, otherwise they are raw + scores. + +Notes +===== +Signature: ``ai.onnx.ml@1::SVMClassifier``. + +Type constraints: + - T1: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` + - T2: `tensor(int64)`, `tensor(string)` """ - return ( - _OneHotEncoder( - _OneHotEncoder.Attributes( - cats_int64s=AttrInt64s.maybe(cats_int64s, name="cats_int64s"), - cats_strings=AttrStrings.maybe(cats_strings, name="cats_strings"), - zeros=AttrInt64(zeros, name="zeros"), - ), - _OneHotEncoder.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def svmclassifier( - X: Var, - *, - classlabels_ints: Optional[Iterable[int]] = None, - classlabels_strings: Optional[Iterable[str]] = None, - coefficients: Optional[Iterable[float]] = None, - kernel_params: Optional[Iterable[float]] = None, - kernel_type: str = "LINEAR", - post_transform: str = "NONE", - prob_a: Optional[Iterable[float]] = None, - prob_b: Optional[Iterable[float]] = None, - rho: Optional[Iterable[float]] = None, - support_vectors: Optional[Iterable[float]] = None, - vectors_per_class: Optional[Iterable[int]] = None, -) -> tuple[Var, Var]: + return _SVMClassifier( + _SVMClassifier.Attributes( + classlabels_ints=AttrInt64s.maybe(classlabels_ints, name="classlabels_ints"), + classlabels_strings=AttrStrings.maybe(classlabels_strings, name="classlabels_strings"), + coefficients=AttrFloat32s.maybe(coefficients, name="coefficients"), + kernel_params=AttrFloat32s.maybe(kernel_params, name="kernel_params"), + kernel_type=AttrString(kernel_type, name="kernel_type"), + post_transform=AttrString(post_transform, name="post_transform"), + prob_a=AttrFloat32s.maybe(prob_a, name="prob_a"), + prob_b=AttrFloat32s.maybe(prob_b, name="prob_b"), + rho=AttrFloat32s.maybe(rho, name="rho"), + support_vectors=AttrFloat32s.maybe(support_vectors, name="support_vectors"), + vectors_per_class=AttrInt64s.maybe(vectors_per_class, name="vectors_per_class"), + ), _SVMClassifier.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), )._unpack_to_any() + + +def svmregressor(X: Var, *, coefficients: Optional[Iterable[float]] = None, kernel_params: Optional[Iterable[float]] = None, kernel_type: str = "LINEAR", n_supports: int = 0, one_class: int = 0, post_transform: str = "NONE", rho: Optional[Iterable[float]] = None, support_vectors: Optional[Iterable[float]] = None, ) -> Var: r""" - Support Vector Machine classifier - - Parameters - ========== - X - Type T1. - Data to be classified. - classlabels_ints - Attribute. - Class labels if using integer labels.One and only one of the - 'classlabels\_\*' attributes must be defined. - classlabels_strings - Attribute. - Class labels if using string labels.One and only one of the - 'classlabels\_\*' attributes must be defined. - coefficients - Attribute. - - kernel_params - Attribute. - List of 3 elements containing gamma, coef0, and degree, in that order. - Zero if unused for the kernel. - kernel_type - Attribute. - The kernel type, one of 'LINEAR,' 'POLY,' 'RBF,' 'SIGMOID'. - post_transform - Attribute. - Indicates the transform to apply to the score. One of 'NONE,' 'SOFTMAX,' - 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT' - prob_a - Attribute. - First set of probability coefficients. - prob_b - Attribute. - Second set of probability coefficients. This array must be same size as - prob_a.If these are provided then output Z are probability estimates, - otherwise they are raw scores. - rho - Attribute. - - support_vectors - Attribute. - - vectors_per_class - Attribute. - - - Returns - ======= - Y : Var - Type T2. - Classification outputs (one class per example). - Z : Var - Type tensor(float). - Class scores (one per class per example), if prob_a and prob_b are - provided they are probabilities for each class, otherwise they are raw - scores. - - Notes - ===== - Signature: ``ai.onnx.ml@1::SVMClassifier``. - - Type constraints: - - T1: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` - - T2: `tensor(int64)`, `tensor(string)` +Support Vector Machine regression prediction and one-class SVM anomaly +detection. + +Parameters +========== +X + Type T. + Data to be regressed. +coefficients + Attribute. + Support vector coefficients. +kernel_params + Attribute. + List of 3 elements containing gamma, coef0, and degree, in that order. + Zero if unused for the kernel. +kernel_type + Attribute. + The kernel type, one of 'LINEAR,' 'POLY,' 'RBF,' 'SIGMOID'. +n_supports + Attribute. + The number of support vectors. +one_class + Attribute. + Flag indicating whether the regression is a one-class SVM or not. +post_transform + Attribute. + Indicates the transform to apply to the score. One of 'NONE,' 'SOFTMAX,' + 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT.' +rho + Attribute. + +support_vectors + Attribute. + Chosen support vectors + +Returns +======= +Y : Var + Type tensor(float). + Regression outputs (one score per target per example). + +Notes +===== +Signature: ``ai.onnx.ml@1::SVMRegressor``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return ( - _SVMClassifier( - _SVMClassifier.Attributes( - classlabels_ints=AttrInt64s.maybe( - classlabels_ints, name="classlabels_ints" - ), - classlabels_strings=AttrStrings.maybe( - classlabels_strings, name="classlabels_strings" - ), - coefficients=AttrFloat32s.maybe(coefficients, name="coefficients"), - kernel_params=AttrFloat32s.maybe(kernel_params, name="kernel_params"), - kernel_type=AttrString(kernel_type, name="kernel_type"), - post_transform=AttrString(post_transform, name="post_transform"), - prob_a=AttrFloat32s.maybe(prob_a, name="prob_a"), - prob_b=AttrFloat32s.maybe(prob_b, name="prob_b"), - rho=AttrFloat32s.maybe(rho, name="rho"), - support_vectors=AttrFloat32s.maybe( - support_vectors, name="support_vectors" - ), - vectors_per_class=AttrInt64s.maybe( - vectors_per_class, name="vectors_per_class" - ), - ), - _SVMClassifier.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - ._unpack_to_any() - ) - - -def svmregressor( - X: Var, - *, - coefficients: Optional[Iterable[float]] = None, - kernel_params: Optional[Iterable[float]] = None, - kernel_type: str = "LINEAR", - n_supports: int = 0, - one_class: int = 0, - post_transform: str = "NONE", - rho: Optional[Iterable[float]] = None, - support_vectors: Optional[Iterable[float]] = None, -) -> Var: + return _SVMRegressor( + _SVMRegressor.Attributes( + coefficients=AttrFloat32s.maybe(coefficients, name="coefficients"), + kernel_params=AttrFloat32s.maybe(kernel_params, name="kernel_params"), + kernel_type=AttrString(kernel_type, name="kernel_type"), + n_supports=AttrInt64(n_supports, name="n_supports"), + one_class=AttrInt64(one_class, name="one_class"), + post_transform=AttrString(post_transform, name="post_transform"), + rho=AttrFloat32s.maybe(rho, name="rho"), + support_vectors=AttrFloat32s.maybe(support_vectors, name="support_vectors"), + ), _SVMRegressor.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def scaler(X: Var, *, offset: Optional[Iterable[float]] = None, scale: Optional[Iterable[float]] = None, ) -> Var: r""" - Support Vector Machine regression prediction and one-class SVM anomaly - detection. - - Parameters - ========== - X - Type T. - Data to be regressed. - coefficients - Attribute. - Support vector coefficients. - kernel_params - Attribute. - List of 3 elements containing gamma, coef0, and degree, in that order. - Zero if unused for the kernel. - kernel_type - Attribute. - The kernel type, one of 'LINEAR,' 'POLY,' 'RBF,' 'SIGMOID'. - n_supports - Attribute. - The number of support vectors. - one_class - Attribute. - Flag indicating whether the regression is a one-class SVM or not. - post_transform - Attribute. - Indicates the transform to apply to the score. One of 'NONE,' 'SOFTMAX,' - 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT.' - rho - Attribute. - - support_vectors - Attribute. - Chosen support vectors - - Returns - ======= - Y : Var - Type tensor(float). - Regression outputs (one score per target per example). - - Notes - ===== - Signature: ``ai.onnx.ml@1::SVMRegressor``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` +Rescale input data, for example to standardize features by removing the +mean and scaling to unit variance. + +Parameters +========== +X + Type T. + Data to be scaled. +offset + Attribute. + First, offset by this.Can be length of features in an [N,F] tensor or + length 1, in which case it applies to all features, regardless of + dimension count. +scale + Attribute. + Second, multiply by this.Can be length of features in an [N,F] tensor or + length 1, in which case it applies to all features, regardless of + dimension count.Must be same length as 'offset' + +Returns +======= +Y : Var + Type tensor(float). + Scaled output data. + +Notes +===== +Signature: ``ai.onnx.ml@1::Scaler``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return ( - _SVMRegressor( - _SVMRegressor.Attributes( - coefficients=AttrFloat32s.maybe(coefficients, name="coefficients"), - kernel_params=AttrFloat32s.maybe(kernel_params, name="kernel_params"), - kernel_type=AttrString(kernel_type, name="kernel_type"), - n_supports=AttrInt64(n_supports, name="n_supports"), - one_class=AttrInt64(one_class, name="one_class"), - post_transform=AttrString(post_transform, name="post_transform"), - rho=AttrFloat32s.maybe(rho, name="rho"), - support_vectors=AttrFloat32s.maybe( - support_vectors, name="support_vectors" - ), - ), - _SVMRegressor.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) + return _Scaler( + _Scaler.Attributes( + offset=AttrFloat32s.maybe(offset, name="offset"), + scale=AttrFloat32s.maybe(scale, name="scale"), + ), _Scaler.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y -def scaler( - X: Var, - *, - offset: Optional[Iterable[float]] = None, - scale: Optional[Iterable[float]] = None, -) -> Var: +def tree_ensemble_classifier(X: Var, *, base_values: Optional[Iterable[float]] = None, base_values_as_tensor: Optional[np.ndarray] = None, class_ids: Optional[Iterable[int]] = None, class_nodeids: Optional[Iterable[int]] = None, class_treeids: Optional[Iterable[int]] = None, class_weights: Optional[Iterable[float]] = None, class_weights_as_tensor: Optional[np.ndarray] = None, classlabels_int64s: Optional[Iterable[int]] = None, classlabels_strings: Optional[Iterable[str]] = None, nodes_falsenodeids: Optional[Iterable[int]] = None, nodes_featureids: Optional[Iterable[int]] = None, nodes_hitrates: Optional[Iterable[float]] = None, nodes_hitrates_as_tensor: Optional[np.ndarray] = None, nodes_missing_value_tracks_true: Optional[Iterable[int]] = None, nodes_modes: Optional[Iterable[str]] = None, nodes_nodeids: Optional[Iterable[int]] = None, nodes_treeids: Optional[Iterable[int]] = None, nodes_truenodeids: Optional[Iterable[int]] = None, nodes_values: Optional[Iterable[float]] = None, nodes_values_as_tensor: Optional[np.ndarray] = None, post_transform: str = "NONE", ) -> tuple[Var, Var]: r""" - Rescale input data, for example to standardize features by removing the - mean and scaling to unit variance. - - Parameters - ========== - X - Type T. - Data to be scaled. - offset - Attribute. - First, offset by this.Can be length of features in an [N,F] tensor or - length 1, in which case it applies to all features, regardless of - dimension count. - scale - Attribute. - Second, multiply by this.Can be length of features in an [N,F] tensor or - length 1, in which case it applies to all features, regardless of - dimension count.Must be same length as 'offset' - - Returns - ======= - Y : Var - Type tensor(float). - Scaled output data. - - Notes - ===== - Signature: ``ai.onnx.ml@1::Scaler``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` +Tree Ensemble classifier. Returns the top class for each of N inputs. +The attributes named 'nodes_X' form a sequence of tuples, associated by +index into the sequences, which must all be of equal length. These +tuples define the nodes. Similarly, all fields prefixed with 'class\_' +are tuples of votes at the leaves. A leaf may have multiple votes, where +each vote is weighted by the associated class_weights index. One and +only one of classlabels_strings or classlabels_int64s will be defined. +The class_ids are indices into this list. All fields ending with +\_as_tensor can be used instead of the same parameter without the suffix +if the element type is double and not float. + +Parameters +========== +X + Type T1. + Input of shape [N,F] +base_values + Attribute. + Base values for classification, added to final class score; the size + must be the same as the classes or can be left unassigned (assumed 0) +base_values_as_tensor + Attribute. + Base values for classification, added to final class score; the size + must be the same as the classes or can be left unassigned (assumed 0) +class_ids + Attribute. + The index of the class list that each weight is for. +class_nodeids + Attribute. + node id that this weight is for. +class_treeids + Attribute. + The id of the tree that this node is in. +class_weights + Attribute. + The weight for the class in class_id. +class_weights_as_tensor + Attribute. + The weight for the class in class_id. +classlabels_int64s + Attribute. + Class labels if using integer labels.One and only one of the + 'classlabels\_\*' attributes must be defined. +classlabels_strings + Attribute. + Class labels if using string labels.One and only one of the + 'classlabels\_\*' attributes must be defined. +nodes_falsenodeids + Attribute. + Child node if expression is false. +nodes_featureids + Attribute. + Feature id for each node. +nodes_hitrates + Attribute. + Popularity of each node, used for performance and may be omitted. +nodes_hitrates_as_tensor + Attribute. + Popularity of each node, used for performance and may be omitted. +nodes_missing_value_tracks_true + Attribute. + For each node, define what to do in the presence of a missing value: if + a value is missing (NaN), use the 'true' or 'false' branch based on the + value in this array.This attribute may be left undefined, and the + default value is false (0) for all nodes. +nodes_modes + Attribute. + The node kind, that is, the comparison to make at the node. There is no + comparison to make at a leaf node.One of 'BRANCH_LEQ', 'BRANCH_LT', + 'BRANCH_GTE', 'BRANCH_GT', 'BRANCH_EQ', 'BRANCH_NEQ', 'LEAF' +nodes_nodeids + Attribute. + Node id for each node. Ids may restart at zero for each tree, but it not + required to. +nodes_treeids + Attribute. + Tree id for each node. +nodes_truenodeids + Attribute. + Child node if expression is true. +nodes_values + Attribute. + Thresholds to do the splitting on for each node. +nodes_values_as_tensor + Attribute. + Thresholds to do the splitting on for each node. +post_transform + Attribute. + Indicates the transform to apply to the score. One of 'NONE,' 'SOFTMAX,' + 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT.' + +Returns +======= +Y : Var + Type T2. + N, Top class for each point +Z : Var + Type tensor(float). + The class score for each class, for each point, a tensor of shape [N,E]. + +Notes +===== +Signature: ``ai.onnx.ml@3::TreeEnsembleClassifier``. + +Type constraints: + - T1: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` + - T2: `tensor(int64)`, `tensor(string)` """ - return ( - _Scaler( - _Scaler.Attributes( - offset=AttrFloat32s.maybe(offset, name="offset"), - scale=AttrFloat32s.maybe(scale, name="scale"), - ), - _Scaler.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def tree_ensemble_classifier( - X: Var, - *, - base_values: Optional[Iterable[float]] = None, - base_values_as_tensor: Optional[np.ndarray] = None, - class_ids: Optional[Iterable[int]] = None, - class_nodeids: Optional[Iterable[int]] = None, - class_treeids: Optional[Iterable[int]] = None, - class_weights: Optional[Iterable[float]] = None, - class_weights_as_tensor: Optional[np.ndarray] = None, - classlabels_int64s: Optional[Iterable[int]] = None, - classlabels_strings: Optional[Iterable[str]] = None, - nodes_falsenodeids: Optional[Iterable[int]] = None, - nodes_featureids: Optional[Iterable[int]] = None, - nodes_hitrates: Optional[Iterable[float]] = None, - nodes_hitrates_as_tensor: Optional[np.ndarray] = None, - nodes_missing_value_tracks_true: Optional[Iterable[int]] = None, - nodes_modes: Optional[Iterable[str]] = None, - nodes_nodeids: Optional[Iterable[int]] = None, - nodes_treeids: Optional[Iterable[int]] = None, - nodes_truenodeids: Optional[Iterable[int]] = None, - nodes_values: Optional[Iterable[float]] = None, - nodes_values_as_tensor: Optional[np.ndarray] = None, - post_transform: str = "NONE", -) -> tuple[Var, Var]: + return _TreeEnsembleClassifier( + _TreeEnsembleClassifier.Attributes( + base_values=AttrFloat32s.maybe(base_values, name="base_values"), + base_values_as_tensor=AttrTensor.maybe(base_values_as_tensor, name="base_values_as_tensor"), + class_ids=AttrInt64s.maybe(class_ids, name="class_ids"), + class_nodeids=AttrInt64s.maybe(class_nodeids, name="class_nodeids"), + class_treeids=AttrInt64s.maybe(class_treeids, name="class_treeids"), + class_weights=AttrFloat32s.maybe(class_weights, name="class_weights"), + class_weights_as_tensor=AttrTensor.maybe(class_weights_as_tensor, name="class_weights_as_tensor"), + classlabels_int64s=AttrInt64s.maybe(classlabels_int64s, name="classlabels_int64s"), + classlabels_strings=AttrStrings.maybe(classlabels_strings, name="classlabels_strings"), + nodes_falsenodeids=AttrInt64s.maybe(nodes_falsenodeids, name="nodes_falsenodeids"), + nodes_featureids=AttrInt64s.maybe(nodes_featureids, name="nodes_featureids"), + nodes_hitrates=AttrFloat32s.maybe(nodes_hitrates, name="nodes_hitrates"), + nodes_hitrates_as_tensor=AttrTensor.maybe(nodes_hitrates_as_tensor, name="nodes_hitrates_as_tensor"), + nodes_missing_value_tracks_true=AttrInt64s.maybe(nodes_missing_value_tracks_true, name="nodes_missing_value_tracks_true"), + nodes_modes=AttrStrings.maybe(nodes_modes, name="nodes_modes"), + nodes_nodeids=AttrInt64s.maybe(nodes_nodeids, name="nodes_nodeids"), + nodes_treeids=AttrInt64s.maybe(nodes_treeids, name="nodes_treeids"), + nodes_truenodeids=AttrInt64s.maybe(nodes_truenodeids, name="nodes_truenodeids"), + nodes_values=AttrFloat32s.maybe(nodes_values, name="nodes_values"), + nodes_values_as_tensor=AttrTensor.maybe(nodes_values_as_tensor, name="nodes_values_as_tensor"), + post_transform=AttrString(post_transform, name="post_transform"), + ), _TreeEnsembleClassifier.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), )._unpack_to_any() + + +def tree_ensemble_regressor(X: Var, *, aggregate_function: str = "SUM", base_values: Optional[Iterable[float]] = None, base_values_as_tensor: Optional[np.ndarray] = None, n_targets: Optional[int] = None, nodes_falsenodeids: Optional[Iterable[int]] = None, nodes_featureids: Optional[Iterable[int]] = None, nodes_hitrates: Optional[Iterable[float]] = None, nodes_hitrates_as_tensor: Optional[np.ndarray] = None, nodes_missing_value_tracks_true: Optional[Iterable[int]] = None, nodes_modes: Optional[Iterable[str]] = None, nodes_nodeids: Optional[Iterable[int]] = None, nodes_treeids: Optional[Iterable[int]] = None, nodes_truenodeids: Optional[Iterable[int]] = None, nodes_values: Optional[Iterable[float]] = None, nodes_values_as_tensor: Optional[np.ndarray] = None, post_transform: str = "NONE", target_ids: Optional[Iterable[int]] = None, target_nodeids: Optional[Iterable[int]] = None, target_treeids: Optional[Iterable[int]] = None, target_weights: Optional[Iterable[float]] = None, target_weights_as_tensor: Optional[np.ndarray] = None, ) -> Var: r""" - Tree Ensemble classifier. Returns the top class for each of N inputs. - The attributes named 'nodes_X' form a sequence of tuples, associated by - index into the sequences, which must all be of equal length. These - tuples define the nodes. Similarly, all fields prefixed with 'class\_' - are tuples of votes at the leaves. A leaf may have multiple votes, where - each vote is weighted by the associated class_weights index. One and - only one of classlabels_strings or classlabels_int64s will be defined. - The class_ids are indices into this list. All fields ending with - \_as_tensor can be used instead of the same parameter without the suffix - if the element type is double and not float. - - Parameters - ========== - X - Type T1. - Input of shape [N,F] - base_values - Attribute. - Base values for classification, added to final class score; the size - must be the same as the classes or can be left unassigned (assumed 0) - base_values_as_tensor - Attribute. - Base values for classification, added to final class score; the size - must be the same as the classes or can be left unassigned (assumed 0) - class_ids - Attribute. - The index of the class list that each weight is for. - class_nodeids - Attribute. - node id that this weight is for. - class_treeids - Attribute. - The id of the tree that this node is in. - class_weights - Attribute. - The weight for the class in class_id. - class_weights_as_tensor - Attribute. - The weight for the class in class_id. - classlabels_int64s - Attribute. - Class labels if using integer labels.One and only one of the - 'classlabels\_\*' attributes must be defined. - classlabels_strings - Attribute. - Class labels if using string labels.One and only one of the - 'classlabels\_\*' attributes must be defined. - nodes_falsenodeids - Attribute. - Child node if expression is false. - nodes_featureids - Attribute. - Feature id for each node. - nodes_hitrates - Attribute. - Popularity of each node, used for performance and may be omitted. - nodes_hitrates_as_tensor - Attribute. - Popularity of each node, used for performance and may be omitted. - nodes_missing_value_tracks_true - Attribute. - For each node, define what to do in the presence of a missing value: if - a value is missing (NaN), use the 'true' or 'false' branch based on the - value in this array.This attribute may be left undefined, and the - default value is false (0) for all nodes. - nodes_modes - Attribute. - The node kind, that is, the comparison to make at the node. There is no - comparison to make at a leaf node.One of 'BRANCH_LEQ', 'BRANCH_LT', - 'BRANCH_GTE', 'BRANCH_GT', 'BRANCH_EQ', 'BRANCH_NEQ', 'LEAF' - nodes_nodeids - Attribute. - Node id for each node. Ids may restart at zero for each tree, but it not - required to. - nodes_treeids - Attribute. - Tree id for each node. - nodes_truenodeids - Attribute. - Child node if expression is true. - nodes_values - Attribute. - Thresholds to do the splitting on for each node. - nodes_values_as_tensor - Attribute. - Thresholds to do the splitting on for each node. - post_transform - Attribute. - Indicates the transform to apply to the score. One of 'NONE,' 'SOFTMAX,' - 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT.' - - Returns - ======= - Y : Var - Type T2. - N, Top class for each point - Z : Var - Type tensor(float). - The class score for each class, for each point, a tensor of shape [N,E]. - - Notes - ===== - Signature: ``ai.onnx.ml@3::TreeEnsembleClassifier``. - - Type constraints: - - T1: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` - - T2: `tensor(int64)`, `tensor(string)` +Tree Ensemble regressor. Returns the regressed values for each input in +N. All args with nodes\_ are fields of a tuple of tree nodes, and it is +assumed they are the same length, and an index i will decode the tuple +across these inputs. Each node id can appear only once for each tree id. +All fields prefixed with target\_ are tuples of votes at the leaves. A +leaf may have multiple votes, where each vote is weighted by the +associated target_weights index. All fields ending with \_as_tensor can +be used instead of the same parameter without the suffix if the element +type is double and not float. All trees must have their node ids start +at 0 and increment by 1. Mode enum is BRANCH_LEQ, BRANCH_LT, BRANCH_GTE, +BRANCH_GT, BRANCH_EQ, BRANCH_NEQ, LEAF + +Parameters +========== +X + Type T. + Input of shape [N,F] +aggregate_function + Attribute. + Defines how to aggregate leaf values within a target. One of 'AVERAGE,' + 'SUM,' 'MIN,' 'MAX.' +base_values + Attribute. + Base values for regression, added to final prediction after applying + aggregate_function; the size must be the same as the classes or can be + left unassigned (assumed 0) +base_values_as_tensor + Attribute. + Base values for regression, added to final prediction after applying + aggregate_function; the size must be the same as the classes or can be + left unassigned (assumed 0) +n_targets + Attribute. + The total number of targets. +nodes_falsenodeids + Attribute. + Child node if expression is false +nodes_featureids + Attribute. + Feature id for each node. +nodes_hitrates + Attribute. + Popularity of each node, used for performance and may be omitted. +nodes_hitrates_as_tensor + Attribute. + Popularity of each node, used for performance and may be omitted. +nodes_missing_value_tracks_true + Attribute. + For each node, define what to do in the presence of a NaN: use the + 'true' (if the attribute value is 1) or 'false' (if the attribute value + is 0) branch based on the value in this array.This attribute may be left + undefined and the default value is false (0) for all nodes. +nodes_modes + Attribute. + The node kind, that is, the comparison to make at the node. There is no + comparison to make at a leaf node.One of 'BRANCH_LEQ', 'BRANCH_LT', + 'BRANCH_GTE', 'BRANCH_GT', 'BRANCH_EQ', 'BRANCH_NEQ', 'LEAF' +nodes_nodeids + Attribute. + Node id for each node. Node ids must restart at zero for each tree and + increase sequentially. +nodes_treeids + Attribute. + Tree id for each node. +nodes_truenodeids + Attribute. + Child node if expression is true +nodes_values + Attribute. + Thresholds to do the splitting on for each node. +nodes_values_as_tensor + Attribute. + Thresholds to do the splitting on for each node. +post_transform + Attribute. + Indicates the transform to apply to the score. One of 'NONE,' 'SOFTMAX,' + 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT' +target_ids + Attribute. + The index of the target that each weight is for +target_nodeids + Attribute. + The node id of each weight +target_treeids + Attribute. + The id of the tree that each node is in. +target_weights + Attribute. + The weight for each target +target_weights_as_tensor + Attribute. + The weight for each target + +Returns +======= +Y : Var + Type tensor(float). + N classes + +Notes +===== +Signature: ``ai.onnx.ml@3::TreeEnsembleRegressor``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return ( - _TreeEnsembleClassifier( - _TreeEnsembleClassifier.Attributes( - base_values=AttrFloat32s.maybe(base_values, name="base_values"), - base_values_as_tensor=AttrTensor.maybe( - base_values_as_tensor, name="base_values_as_tensor" - ), - class_ids=AttrInt64s.maybe(class_ids, name="class_ids"), - class_nodeids=AttrInt64s.maybe(class_nodeids, name="class_nodeids"), - class_treeids=AttrInt64s.maybe(class_treeids, name="class_treeids"), - class_weights=AttrFloat32s.maybe(class_weights, name="class_weights"), - class_weights_as_tensor=AttrTensor.maybe( - class_weights_as_tensor, name="class_weights_as_tensor" - ), - classlabels_int64s=AttrInt64s.maybe( - classlabels_int64s, name="classlabels_int64s" - ), - classlabels_strings=AttrStrings.maybe( - classlabels_strings, name="classlabels_strings" - ), - nodes_falsenodeids=AttrInt64s.maybe( - nodes_falsenodeids, name="nodes_falsenodeids" - ), - nodes_featureids=AttrInt64s.maybe( - nodes_featureids, name="nodes_featureids" - ), - nodes_hitrates=AttrFloat32s.maybe( - nodes_hitrates, name="nodes_hitrates" - ), - nodes_hitrates_as_tensor=AttrTensor.maybe( - nodes_hitrates_as_tensor, name="nodes_hitrates_as_tensor" - ), - nodes_missing_value_tracks_true=AttrInt64s.maybe( - nodes_missing_value_tracks_true, - name="nodes_missing_value_tracks_true", - ), - nodes_modes=AttrStrings.maybe(nodes_modes, name="nodes_modes"), - nodes_nodeids=AttrInt64s.maybe(nodes_nodeids, name="nodes_nodeids"), - nodes_treeids=AttrInt64s.maybe(nodes_treeids, name="nodes_treeids"), - nodes_truenodeids=AttrInt64s.maybe( - nodes_truenodeids, name="nodes_truenodeids" - ), - nodes_values=AttrFloat32s.maybe(nodes_values, name="nodes_values"), - nodes_values_as_tensor=AttrTensor.maybe( - nodes_values_as_tensor, name="nodes_values_as_tensor" - ), - post_transform=AttrString(post_transform, name="post_transform"), - ), - _TreeEnsembleClassifier.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - ._unpack_to_any() - ) - - -def tree_ensemble_regressor( - X: Var, - *, - aggregate_function: str = "SUM", - base_values: Optional[Iterable[float]] = None, - base_values_as_tensor: Optional[np.ndarray] = None, - n_targets: Optional[int] = None, - nodes_falsenodeids: Optional[Iterable[int]] = None, - nodes_featureids: Optional[Iterable[int]] = None, - nodes_hitrates: Optional[Iterable[float]] = None, - nodes_hitrates_as_tensor: Optional[np.ndarray] = None, - nodes_missing_value_tracks_true: Optional[Iterable[int]] = None, - nodes_modes: Optional[Iterable[str]] = None, - nodes_nodeids: Optional[Iterable[int]] = None, - nodes_treeids: Optional[Iterable[int]] = None, - nodes_truenodeids: Optional[Iterable[int]] = None, - nodes_values: Optional[Iterable[float]] = None, - nodes_values_as_tensor: Optional[np.ndarray] = None, - post_transform: str = "NONE", - target_ids: Optional[Iterable[int]] = None, - target_nodeids: Optional[Iterable[int]] = None, - target_treeids: Optional[Iterable[int]] = None, - target_weights: Optional[Iterable[float]] = None, - target_weights_as_tensor: Optional[np.ndarray] = None, -) -> Var: - r""" - Tree Ensemble regressor. Returns the regressed values for each input in - N. All args with nodes\_ are fields of a tuple of tree nodes, and it is - assumed they are the same length, and an index i will decode the tuple - across these inputs. Each node id can appear only once for each tree id. - All fields prefixed with target\_ are tuples of votes at the leaves. A - leaf may have multiple votes, where each vote is weighted by the - associated target_weights index. All fields ending with \_as_tensor can - be used instead of the same parameter without the suffix if the element - type is double and not float. All trees must have their node ids start - at 0 and increment by 1. Mode enum is BRANCH_LEQ, BRANCH_LT, BRANCH_GTE, - BRANCH_GT, BRANCH_EQ, BRANCH_NEQ, LEAF - - Parameters - ========== - X - Type T. - Input of shape [N,F] - aggregate_function - Attribute. - Defines how to aggregate leaf values within a target. One of 'AVERAGE,' - 'SUM,' 'MIN,' 'MAX.' - base_values - Attribute. - Base values for regression, added to final prediction after applying - aggregate_function; the size must be the same as the classes or can be - left unassigned (assumed 0) - base_values_as_tensor - Attribute. - Base values for regression, added to final prediction after applying - aggregate_function; the size must be the same as the classes or can be - left unassigned (assumed 0) - n_targets - Attribute. - The total number of targets. - nodes_falsenodeids - Attribute. - Child node if expression is false - nodes_featureids - Attribute. - Feature id for each node. - nodes_hitrates - Attribute. - Popularity of each node, used for performance and may be omitted. - nodes_hitrates_as_tensor - Attribute. - Popularity of each node, used for performance and may be omitted. - nodes_missing_value_tracks_true - Attribute. - For each node, define what to do in the presence of a NaN: use the - 'true' (if the attribute value is 1) or 'false' (if the attribute value - is 0) branch based on the value in this array.This attribute may be left - undefined and the default value is false (0) for all nodes. - nodes_modes - Attribute. - The node kind, that is, the comparison to make at the node. There is no - comparison to make at a leaf node.One of 'BRANCH_LEQ', 'BRANCH_LT', - 'BRANCH_GTE', 'BRANCH_GT', 'BRANCH_EQ', 'BRANCH_NEQ', 'LEAF' - nodes_nodeids - Attribute. - Node id for each node. Node ids must restart at zero for each tree and - increase sequentially. - nodes_treeids - Attribute. - Tree id for each node. - nodes_truenodeids - Attribute. - Child node if expression is true - nodes_values - Attribute. - Thresholds to do the splitting on for each node. - nodes_values_as_tensor - Attribute. - Thresholds to do the splitting on for each node. - post_transform - Attribute. - Indicates the transform to apply to the score. One of 'NONE,' 'SOFTMAX,' - 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT' - target_ids - Attribute. - The index of the target that each weight is for - target_nodeids - Attribute. - The node id of each weight - target_treeids - Attribute. - The id of the tree that each node is in. - target_weights - Attribute. - The weight for each target - target_weights_as_tensor - Attribute. - The weight for each target - - Returns - ======= - Y : Var - Type tensor(float). - N classes - - Notes - ===== - Signature: ``ai.onnx.ml@3::TreeEnsembleRegressor``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` - """ - return ( - _TreeEnsembleRegressor( - _TreeEnsembleRegressor.Attributes( - aggregate_function=AttrString( - aggregate_function, name="aggregate_function" - ), - base_values=AttrFloat32s.maybe(base_values, name="base_values"), - base_values_as_tensor=AttrTensor.maybe( - base_values_as_tensor, name="base_values_as_tensor" - ), - n_targets=AttrInt64.maybe(n_targets, name="n_targets"), - nodes_falsenodeids=AttrInt64s.maybe( - nodes_falsenodeids, name="nodes_falsenodeids" - ), - nodes_featureids=AttrInt64s.maybe( - nodes_featureids, name="nodes_featureids" - ), - nodes_hitrates=AttrFloat32s.maybe( - nodes_hitrates, name="nodes_hitrates" - ), - nodes_hitrates_as_tensor=AttrTensor.maybe( - nodes_hitrates_as_tensor, name="nodes_hitrates_as_tensor" - ), - nodes_missing_value_tracks_true=AttrInt64s.maybe( - nodes_missing_value_tracks_true, - name="nodes_missing_value_tracks_true", - ), - nodes_modes=AttrStrings.maybe(nodes_modes, name="nodes_modes"), - nodes_nodeids=AttrInt64s.maybe(nodes_nodeids, name="nodes_nodeids"), - nodes_treeids=AttrInt64s.maybe(nodes_treeids, name="nodes_treeids"), - nodes_truenodeids=AttrInt64s.maybe( - nodes_truenodeids, name="nodes_truenodeids" - ), - nodes_values=AttrFloat32s.maybe(nodes_values, name="nodes_values"), - nodes_values_as_tensor=AttrTensor.maybe( - nodes_values_as_tensor, name="nodes_values_as_tensor" - ), - post_transform=AttrString(post_transform, name="post_transform"), - target_ids=AttrInt64s.maybe(target_ids, name="target_ids"), - target_nodeids=AttrInt64s.maybe(target_nodeids, name="target_nodeids"), - target_treeids=AttrInt64s.maybe(target_treeids, name="target_treeids"), - target_weights=AttrFloat32s.maybe( - target_weights, name="target_weights" - ), - target_weights_as_tensor=AttrTensor.maybe( - target_weights_as_tensor, name="target_weights_as_tensor" - ), - ), - _TreeEnsembleRegressor.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def zip_map( - X: Var, - *, - classlabels_int64s: Optional[Iterable[int]] = None, - classlabels_strings: Optional[Iterable[str]] = None, -) -> Var: + return _TreeEnsembleRegressor( + _TreeEnsembleRegressor.Attributes( + aggregate_function=AttrString(aggregate_function, name="aggregate_function"), + base_values=AttrFloat32s.maybe(base_values, name="base_values"), + base_values_as_tensor=AttrTensor.maybe(base_values_as_tensor, name="base_values_as_tensor"), + n_targets=AttrInt64.maybe(n_targets, name="n_targets"), + nodes_falsenodeids=AttrInt64s.maybe(nodes_falsenodeids, name="nodes_falsenodeids"), + nodes_featureids=AttrInt64s.maybe(nodes_featureids, name="nodes_featureids"), + nodes_hitrates=AttrFloat32s.maybe(nodes_hitrates, name="nodes_hitrates"), + nodes_hitrates_as_tensor=AttrTensor.maybe(nodes_hitrates_as_tensor, name="nodes_hitrates_as_tensor"), + nodes_missing_value_tracks_true=AttrInt64s.maybe(nodes_missing_value_tracks_true, name="nodes_missing_value_tracks_true"), + nodes_modes=AttrStrings.maybe(nodes_modes, name="nodes_modes"), + nodes_nodeids=AttrInt64s.maybe(nodes_nodeids, name="nodes_nodeids"), + nodes_treeids=AttrInt64s.maybe(nodes_treeids, name="nodes_treeids"), + nodes_truenodeids=AttrInt64s.maybe(nodes_truenodeids, name="nodes_truenodeids"), + nodes_values=AttrFloat32s.maybe(nodes_values, name="nodes_values"), + nodes_values_as_tensor=AttrTensor.maybe(nodes_values_as_tensor, name="nodes_values_as_tensor"), + post_transform=AttrString(post_transform, name="post_transform"), + target_ids=AttrInt64s.maybe(target_ids, name="target_ids"), + target_nodeids=AttrInt64s.maybe(target_nodeids, name="target_nodeids"), + target_treeids=AttrInt64s.maybe(target_treeids, name="target_treeids"), + target_weights=AttrFloat32s.maybe(target_weights, name="target_weights"), + target_weights_as_tensor=AttrTensor.maybe(target_weights_as_tensor, name="target_weights_as_tensor"), + ), _TreeEnsembleRegressor.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def zip_map(X: Var, *, classlabels_int64s: Optional[Iterable[int]] = None, classlabels_strings: Optional[Iterable[str]] = None, ) -> Var: r""" - Creates a map from the input and the attributes. The values are provided - by the input tensor, while the keys are specified by the attributes. - Must provide keys in either classlabels_strings or classlabels_int64s - (but not both). The columns of the tensor correspond one-by-one to the - keys specified by the attributes. There must be as many columns as keys. - - Parameters - ========== - X - Type tensor(float). - The input values - classlabels_int64s - Attribute. - The keys when using int keys.One and only one of the 'classlabels\_\*' - attributes must be defined. - classlabels_strings - Attribute. - The keys when using string keys.One and only one of the - 'classlabels\_\*' attributes must be defined. - - Returns - ======= - Z : Var - Type T. - The output map - - Notes - ===== - Signature: ``ai.onnx.ml@1::ZipMap``. - - Type constraints: - - T: `seq(map(int64,tensor(float)))`, `seq(map(string,tensor(float)))` +Creates a map from the input and the attributes. The values are provided +by the input tensor, while the keys are specified by the attributes. +Must provide keys in either classlabels_strings or classlabels_int64s +(but not both). The columns of the tensor correspond one-by-one to the +keys specified by the attributes. There must be as many columns as keys. + +Parameters +========== +X + Type tensor(float). + The input values +classlabels_int64s + Attribute. + The keys when using int keys.One and only one of the 'classlabels\_\*' + attributes must be defined. +classlabels_strings + Attribute. + The keys when using string keys.One and only one of the + 'classlabels\_\*' attributes must be defined. + +Returns +======= +Z : Var + Type T. + The output map + +Notes +===== +Signature: ``ai.onnx.ml@1::ZipMap``. + +Type constraints: + - T: `seq(map(int64,tensor(float)))`, `seq(map(string,tensor(float)))` """ - return ( - _ZipMap( - _ZipMap.Attributes( - classlabels_int64s=AttrInt64s.maybe( - classlabels_int64s, name="classlabels_int64s" - ), - classlabels_strings=AttrStrings.maybe( - classlabels_strings, name="classlabels_strings" - ), - ), - _ZipMap.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Z - ) + return _ZipMap( + _ZipMap.Attributes( + classlabels_int64s=AttrInt64s.maybe(classlabels_int64s, name="classlabels_int64s"), + classlabels_strings=AttrStrings.maybe(classlabels_strings, name="classlabels_strings"), + ), _ZipMap.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Z _OPERATORS = { @@ -2187,4 +1775,4 @@ def zip_map( "ZipMap": zip_map, } -__all__ = [fun.__name__ for fun in _CONSTRUCTORS.values()] +__all__ = [fun.__name__ for fun in _CONSTRUCTORS.values()] diff --git a/src/spox/opset/ai/onnx/ml/v4.py b/src/spox/opset/ai/onnx/ml/v4.py index 2e4871c..998b6af 100644 --- a/src/spox/opset/ai/onnx/ml/v4.py +++ b/src/spox/opset/ai/onnx/ml/v4.py @@ -1,66 +1,58 @@ -# Copyright (c) QuantCo 2023-2024 -# SPDX-License-Identifier: BSD-3-Clause - # ruff: noqa: E741 -- Allow ambiguous variable name -from collections.abc import Iterable +import typing +import warnings from dataclasses import dataclass +from collections.abc import Iterable, Sequence from typing import ( + Any, + Callable, Optional, + Union, ) +from typing import cast as typing_cast import numpy as np +import numpy.typing as npt +from spox._var import Var, VarInfo, result_type, unwrap_vars, get_value +from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._attributes import ( + AttrDtype, AttrFloat32, AttrFloat32s, + AttrGraph, AttrInt64, AttrInt64s, AttrString, AttrStrings, AttrTensor, + AttrType, ) -from spox._fields import BaseAttributes, BaseInputs, BaseOutputs +from spox._graph import Graph, subgraph +from spox._internal_op import intro from spox._node import OpType -from spox._standard import StandardNode -from spox._var import Var, VarInfo, get_value, unwrap_vars -from spox.opset.ai.onnx.ml.v3 import ( - _ArrayFeatureExtractor, - _Binarizer, - _CastMap, - _CategoryMapper, - _DictVectorizer, - _FeatureVectorizer, - _Imputer, - _LinearClassifier, - _LinearRegressor, - _Normalizer, - _OneHotEncoder, - _Scaler, - _SVMClassifier, - _SVMRegressor, - _TreeEnsembleClassifier, - _TreeEnsembleRegressor, - _ZipMap, - array_feature_extractor, - binarizer, - cast_map, - category_mapper, - dict_vectorizer, - feature_vectorizer, - imputer, - linear_classifier, - linear_regressor, - normalizer, - one_hot_encoder, - scaler, - svmclassifier, - svmregressor, - tree_ensemble_classifier, - tree_ensemble_regressor, - zip_map, -) - - +from spox._standard import InferenceError, StandardNode +from spox._type_system import Tensor, Type, Sequence as SpoxSequence +from spox._value_prop import PropValueType + + +from spox.opset.ai.onnx.ml.v3 import _ArrayFeatureExtractor, array_feature_extractor +from spox.opset.ai.onnx.ml.v3 import _Binarizer, binarizer +from spox.opset.ai.onnx.ml.v3 import _CastMap, cast_map +from spox.opset.ai.onnx.ml.v3 import _CategoryMapper, category_mapper +from spox.opset.ai.onnx.ml.v3 import _DictVectorizer, dict_vectorizer +from spox.opset.ai.onnx.ml.v3 import _FeatureVectorizer, feature_vectorizer +from spox.opset.ai.onnx.ml.v3 import _Imputer, imputer +from spox.opset.ai.onnx.ml.v3 import _LinearClassifier, linear_classifier +from spox.opset.ai.onnx.ml.v3 import _LinearRegressor, linear_regressor +from spox.opset.ai.onnx.ml.v3 import _Normalizer, normalizer +from spox.opset.ai.onnx.ml.v3 import _OneHotEncoder, one_hot_encoder +from spox.opset.ai.onnx.ml.v3 import _SVMClassifier, svmclassifier +from spox.opset.ai.onnx.ml.v3 import _SVMRegressor, svmregressor +from spox.opset.ai.onnx.ml.v3 import _Scaler, scaler +from spox.opset.ai.onnx.ml.v3 import _TreeEnsembleClassifier, tree_ensemble_classifier +from spox.opset.ai.onnx.ml.v3 import _TreeEnsembleRegressor, tree_ensemble_regressor +from spox.opset.ai.onnx.ml.v3 import _ZipMap, zip_map class _LabelEncoder(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -91,131 +83,107 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - -def label_encoder( - X: Var, - *, - default_float: float = -0.0, - default_int64: int = -1, - default_string: str = "_Unused", - default_tensor: Optional[np.ndarray] = None, - keys_floats: Optional[Iterable[float]] = None, - keys_int64s: Optional[Iterable[int]] = None, - keys_strings: Optional[Iterable[str]] = None, - keys_tensor: Optional[np.ndarray] = None, - values_floats: Optional[Iterable[float]] = None, - values_int64s: Optional[Iterable[int]] = None, - values_strings: Optional[Iterable[str]] = None, - values_tensor: Optional[np.ndarray] = None, -) -> Var: +def label_encoder(X: Var, *, default_float: float = -0.0, default_int64: int = -1, default_string: str = "_Unused", default_tensor: Optional[np.ndarray] = None, keys_floats: Optional[Iterable[float]] = None, keys_int64s: Optional[Iterable[int]] = None, keys_strings: Optional[Iterable[str]] = None, keys_tensor: Optional[np.ndarray] = None, values_floats: Optional[Iterable[float]] = None, values_int64s: Optional[Iterable[int]] = None, values_strings: Optional[Iterable[str]] = None, values_tensor: Optional[np.ndarray] = None, ) -> Var: r""" - Maps each element in the input tensor to another value. The mapping is - determined by the two parallel attributes, 'keys\_\ *' and 'values\_*' - attribute. The i-th value in the specified 'keys\_\ *' attribute would - be mapped to the i-th value in the specified 'values\_*' attribute. It - implies that input's element type and the element type of the specified - 'keys\_\ *' should be identical while the output type is identical to - the specified 'values\_*' attribute. Note that the 'keys\_\ *' and - 'values\_*' attributes must have the same length. If an input element - can not be found in the specified 'keys\_\ *' attribute, the - 'default\_*' that matches the specified 'values\_\ *' attribute may be - used as its output value. The type of the 'default\_*' attribute must - match the 'values\_\ *' attribute chosen. Let's consider an example - which maps a string tensor to an integer tensor. Assume and - 'keys_strings' is ["Amy", "Sally"], 'values_int64s' is [5, 6], and - 'default_int64' is '-1'. The input ["Dori", "Amy", "Amy", "Sally", - "Sally"] would be mapped to [-1, 5, 5, 6, 6]. Since this operator is an - one-to-one mapping, its input and output shapes are the same. Notice - that only one of 'keys\_*'/'values\_\*' can be set. Float keys with - value 'NaN' match any input 'NaN' value regardless of bit value. If a - key is repeated, the last key takes precedence. - - Parameters - ========== - X - Type T1. - Input data. It must have the same element type as the keys\_\* attribute - set. - default_float - Attribute. - A float. - default_int64 - Attribute. - An integer. - default_string - Attribute. - A string. - default_tensor - Attribute. - A default tensor. {"*Unused"} if values*\ \* has string type, {-1} if - values\_\* has integral type, and {-0.f} if values\_\* has float type. - keys_floats - Attribute. - A list of floats. - keys_int64s - Attribute. - A list of ints. - keys_strings - Attribute. - A list of strings. - keys_tensor - Attribute. - Keys encoded as a 1D tensor. One and only one of 'keys\_\*'s should be - set. - values_floats - Attribute. - A list of floats. - values_int64s - Attribute. - A list of ints. - values_strings - Attribute. - A list of strings. - values_tensor - Attribute. - Values encoded as a 1D tensor. One and only one of 'values\_\*'s should - be set. - - Returns - ======= - Y : Var - Type T2. - Output data. This tensor's element type is based on the values\_\* - attribute set. - - Notes - ===== - Signature: ``ai.onnx.ml@4::LabelEncoder``. - - Type constraints: - - T1: `tensor(double)`, `tensor(float)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(string)` - - T2: `tensor(double)`, `tensor(float)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(string)` +Maps each element in the input tensor to another value. The mapping is +determined by the two parallel attributes, 'keys\_\ *' and 'values\_*' +attribute. The i-th value in the specified 'keys\_\ *' attribute would +be mapped to the i-th value in the specified 'values\_*' attribute. It +implies that input's element type and the element type of the specified +'keys\_\ *' should be identical while the output type is identical to +the specified 'values\_*' attribute. Note that the 'keys\_\ *' and +'values\_*' attributes must have the same length. If an input element +can not be found in the specified 'keys\_\ *' attribute, the +'default\_*' that matches the specified 'values\_\ *' attribute may be +used as its output value. The type of the 'default\_*' attribute must +match the 'values\_\ *' attribute chosen. Let's consider an example +which maps a string tensor to an integer tensor. Assume and +'keys_strings' is ["Amy", "Sally"], 'values_int64s' is [5, 6], and +'default_int64' is '-1'. The input ["Dori", "Amy", "Amy", "Sally", +"Sally"] would be mapped to [-1, 5, 5, 6, 6]. Since this operator is an +one-to-one mapping, its input and output shapes are the same. Notice +that only one of 'keys\_*'/'values\_\*' can be set. Float keys with +value 'NaN' match any input 'NaN' value regardless of bit value. If a +key is repeated, the last key takes precedence. + +Parameters +========== +X + Type T1. + Input data. It must have the same element type as the keys\_\* attribute + set. +default_float + Attribute. + A float. +default_int64 + Attribute. + An integer. +default_string + Attribute. + A string. +default_tensor + Attribute. + A default tensor. {"*Unused"} if values*\ \* has string type, {-1} if + values\_\* has integral type, and {-0.f} if values\_\* has float type. +keys_floats + Attribute. + A list of floats. +keys_int64s + Attribute. + A list of ints. +keys_strings + Attribute. + A list of strings. +keys_tensor + Attribute. + Keys encoded as a 1D tensor. One and only one of 'keys\_\*'s should be + set. +values_floats + Attribute. + A list of floats. +values_int64s + Attribute. + A list of ints. +values_strings + Attribute. + A list of strings. +values_tensor + Attribute. + Values encoded as a 1D tensor. One and only one of 'values\_\*'s should + be set. + +Returns +======= +Y : Var + Type T2. + Output data. This tensor's element type is based on the values\_\* + attribute set. + +Notes +===== +Signature: ``ai.onnx.ml@4::LabelEncoder``. + +Type constraints: + - T1: `tensor(double)`, `tensor(float)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(string)` + - T2: `tensor(double)`, `tensor(float)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(string)` """ - return ( - _LabelEncoder( - _LabelEncoder.Attributes( - default_float=AttrFloat32(default_float, name="default_float"), - default_int64=AttrInt64(default_int64, name="default_int64"), - default_string=AttrString(default_string, name="default_string"), - default_tensor=AttrTensor.maybe(default_tensor, name="default_tensor"), - keys_floats=AttrFloat32s.maybe(keys_floats, name="keys_floats"), - keys_int64s=AttrInt64s.maybe(keys_int64s, name="keys_int64s"), - keys_strings=AttrStrings.maybe(keys_strings, name="keys_strings"), - keys_tensor=AttrTensor.maybe(keys_tensor, name="keys_tensor"), - values_floats=AttrFloat32s.maybe(values_floats, name="values_floats"), - values_int64s=AttrInt64s.maybe(values_int64s, name="values_int64s"), - values_strings=AttrStrings.maybe(values_strings, name="values_strings"), - values_tensor=AttrTensor.maybe(values_tensor, name="values_tensor"), - ), - _LabelEncoder.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) + return _LabelEncoder( + _LabelEncoder.Attributes( + default_float=AttrFloat32(default_float, name="default_float"), + default_int64=AttrInt64(default_int64, name="default_int64"), + default_string=AttrString(default_string, name="default_string"), + default_tensor=AttrTensor.maybe(default_tensor, name="default_tensor"), + keys_floats=AttrFloat32s.maybe(keys_floats, name="keys_floats"), + keys_int64s=AttrInt64s.maybe(keys_int64s, name="keys_int64s"), + keys_strings=AttrStrings.maybe(keys_strings, name="keys_strings"), + keys_tensor=AttrTensor.maybe(keys_tensor, name="keys_tensor"), + values_floats=AttrFloat32s.maybe(values_floats, name="values_floats"), + values_int64s=AttrInt64s.maybe(values_int64s, name="values_int64s"), + values_strings=AttrStrings.maybe(values_strings, name="values_strings"), + values_tensor=AttrTensor.maybe(values_tensor, name="values_tensor"), + ), _LabelEncoder.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y _OPERATORS = { @@ -260,4 +228,4 @@ def label_encoder( "ZipMap": zip_map, } -__all__ = [fun.__name__ for fun in _CONSTRUCTORS.values()] +__all__ = [fun.__name__ for fun in _CONSTRUCTORS.values()] diff --git a/src/spox/opset/ai/onnx/ml/v5.py b/src/spox/opset/ai/onnx/ml/v5.py index c35f6c9..c1c90c3 100644 --- a/src/spox/opset/ai/onnx/ml/v5.py +++ b/src/spox/opset/ai/onnx/ml/v5.py @@ -1,60 +1,57 @@ -# Copyright (c) QuantCo 2023-2024 -# SPDX-License-Identifier: BSD-3-Clause - # ruff: noqa: E741 -- Allow ambiguous variable name -from collections.abc import Iterable +import typing +import warnings from dataclasses import dataclass +from collections.abc import Iterable, Sequence from typing import ( + Any, + Callable, Optional, + Union, ) +from typing import cast as typing_cast import numpy as np +import numpy.typing as npt +from spox._var import Var, VarInfo, result_type, unwrap_vars, get_value +from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._attributes import ( + AttrDtype, + AttrFloat32, + AttrFloat32s, + AttrGraph, AttrInt64, AttrInt64s, + AttrString, + AttrStrings, AttrTensor, + AttrType, ) -from spox._fields import BaseAttributes, BaseInputs, BaseOutputs +from spox._graph import Graph, subgraph +from spox._internal_op import intro from spox._node import OpType -from spox._standard import StandardNode -from spox._var import Var, VarInfo, get_value, unwrap_vars -from spox.opset.ai.onnx.ml.v4 import ( - _ArrayFeatureExtractor, - _Binarizer, - _CastMap, - _CategoryMapper, - _DictVectorizer, - _FeatureVectorizer, - _Imputer, - _LabelEncoder, - _LinearClassifier, - _LinearRegressor, - _Normalizer, - _OneHotEncoder, - _Scaler, - _SVMClassifier, - _SVMRegressor, - _ZipMap, - array_feature_extractor, - binarizer, - cast_map, - category_mapper, - dict_vectorizer, - feature_vectorizer, - imputer, - label_encoder, - linear_classifier, - linear_regressor, - normalizer, - one_hot_encoder, - scaler, - svmclassifier, - svmregressor, - zip_map, -) +from spox._standard import InferenceError, StandardNode +from spox._type_system import Tensor, Type, Sequence as SpoxSequence +from spox._value_prop import PropValueType +from spox.opset.ai.onnx.ml.v4 import _ArrayFeatureExtractor, array_feature_extractor +from spox.opset.ai.onnx.ml.v4 import _Binarizer, binarizer +from spox.opset.ai.onnx.ml.v4 import _CastMap, cast_map +from spox.opset.ai.onnx.ml.v4 import _CategoryMapper, category_mapper +from spox.opset.ai.onnx.ml.v4 import _DictVectorizer, dict_vectorizer +from spox.opset.ai.onnx.ml.v4 import _FeatureVectorizer, feature_vectorizer +from spox.opset.ai.onnx.ml.v4 import _Imputer, imputer +from spox.opset.ai.onnx.ml.v4 import _LabelEncoder, label_encoder +from spox.opset.ai.onnx.ml.v4 import _LinearClassifier, linear_classifier +from spox.opset.ai.onnx.ml.v4 import _LinearRegressor, linear_regressor +from spox.opset.ai.onnx.ml.v4 import _Normalizer, normalizer +from spox.opset.ai.onnx.ml.v4 import _OneHotEncoder, one_hot_encoder +from spox.opset.ai.onnx.ml.v4 import _SVMClassifier, svmclassifier +from spox.opset.ai.onnx.ml.v4 import _SVMRegressor, svmregressor +from spox.opset.ai.onnx.ml.v4 import _Scaler, scaler +from spox.opset.ai.onnx.ml.v4 import _ZipMap, zip_map class _TreeEnsemble(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -89,181 +86,142 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - -def tree_ensemble( - X: Var, - *, - aggregate_function: int = 1, - leaf_targetids: Iterable[int], - leaf_weights: np.ndarray, - membership_values: Optional[np.ndarray] = None, - n_targets: Optional[int] = None, - nodes_falseleafs: Iterable[int], - nodes_falsenodeids: Iterable[int], - nodes_featureids: Iterable[int], - nodes_hitrates: Optional[np.ndarray] = None, - nodes_missing_value_tracks_true: Optional[Iterable[int]] = None, - nodes_modes: np.ndarray, - nodes_splits: np.ndarray, - nodes_trueleafs: Iterable[int], - nodes_truenodeids: Iterable[int], - post_transform: int = 0, - tree_roots: Iterable[int], -) -> Var: +def tree_ensemble(X: Var, *, aggregate_function: int = 1, leaf_targetids: Iterable[int], leaf_weights: np.ndarray, membership_values: Optional[np.ndarray] = None, n_targets: Optional[int] = None, nodes_falseleafs: Iterable[int], nodes_falsenodeids: Iterable[int], nodes_featureids: Iterable[int], nodes_hitrates: Optional[np.ndarray] = None, nodes_missing_value_tracks_true: Optional[Iterable[int]] = None, nodes_modes: np.ndarray, nodes_splits: np.ndarray, nodes_trueleafs: Iterable[int], nodes_truenodeids: Iterable[int], post_transform: int = 0, tree_roots: Iterable[int], ) -> Var: r""" - Tree Ensemble operator. Returns the regressed values for each input in a - batch. Inputs have dimensions ``[N, F]`` where ``N`` is the input batch - size and ``F`` is the number of input features. Outputs have dimensions - ``[N, num_targets]`` where ``N`` is the batch size and ``num_targets`` - is the number of targets, which is a configurable attribute. +Tree Ensemble operator. Returns the regressed values for each input in a +batch. Inputs have dimensions ``[N, F]`` where ``N`` is the input batch +size and ``F`` is the number of input features. Outputs have dimensions +``[N, num_targets]`` where ``N`` is the batch size and ``num_targets`` +is the number of targets, which is a configurable attribute. - :: +:: - The encoding of this attribute is split along interior nodes and the leaves of the trees. Notably, attributes with the prefix `nodes_*` are associated with interior nodes, and attributes with the prefix `leaf_*` are associated with leaves. - The attributes `nodes_*` must all have the same length and encode a sequence of tuples, as defined by taking all the `nodes_*` fields at a given position. + The encoding of this attribute is split along interior nodes and the leaves of the trees. Notably, attributes with the prefix `nodes_*` are associated with interior nodes, and attributes with the prefix `leaf_*` are associated with leaves. + The attributes `nodes_*` must all have the same length and encode a sequence of tuples, as defined by taking all the `nodes_*` fields at a given position. - All fields prefixed with `leaf_*` represent tree leaves, and similarly define tuples of leaves and must have identical length. + All fields prefixed with `leaf_*` represent tree leaves, and similarly define tuples of leaves and must have identical length. - This operator can be used to implement both the previous `TreeEnsembleRegressor` and `TreeEnsembleClassifier` nodes. - The `TreeEnsembleRegressor` node maps directly to this node and requires changing how the nodes are represented. - The `TreeEnsembleClassifier` node can be implemented by adding a `ArgMax` node after this node to determine the top class. - To encode class labels, a `LabelEncoder` or `GatherND` operator may be used. + This operator can be used to implement both the previous `TreeEnsembleRegressor` and `TreeEnsembleClassifier` nodes. + The `TreeEnsembleRegressor` node maps directly to this node and requires changing how the nodes are represented. + The `TreeEnsembleClassifier` node can be implemented by adding a `ArgMax` node after this node to determine the top class. + To encode class labels, a `LabelEncoder` or `GatherND` operator may be used. - Parameters - ========== - X - Type T. - Input of shape [Batch Size, Number of Features] - aggregate_function - Attribute. - Defines how to aggregate leaf values within a target. One of 'AVERAGE' - (0) 'SUM' (1) 'MIN' (2) 'MAX (3) defaults to 'SUM' (1) - leaf_targetids - Attribute. - The index of the target that this leaf contributes to (this must be in - range ``[0, n_targets)``). - leaf_weights - Attribute. - The weight for each leaf. - membership_values - Attribute. - Members to test membership of for each set membership node. List all of - the members to test again in the order that the 'BRANCH_MEMBER' mode - appears in ``node_modes``, delimited by ``NaN``\ s. Will have the same - number of sets of values as nodes with mode 'BRANCH_MEMBER'. This may be - omitted if the node doesn't contain any 'BRANCH_MEMBER' nodes. - n_targets - Attribute. - The total number of targets. - nodes_falseleafs - Attribute. - 1 if false branch is leaf for each node and 0 if an interior node. To - represent a tree that is a leaf (only has one node), one can do so by - having a single ``nodes_*`` entry with true and false branches - referencing the same ``leaf_*`` entry - nodes_falsenodeids - Attribute. - If ``nodes_falseleafs`` is false at an entry, this represents the - position of the false branch node. This position can be used to index - into a ``nodes_*`` entry. If ``nodes_falseleafs`` is false, it is an - index into the leaf\_\* attributes. - nodes_featureids - Attribute. - Feature id for each node. - nodes_hitrates - Attribute. - Popularity of each node, used for performance and may be omitted. - nodes_missing_value_tracks_true - Attribute. - For each node, define whether to follow the true branch (if attribute - value is 1) or false branch (if attribute value is 0) in the presence of - a NaN input feature. This attribute may be left undefined and the - default value is false (0) for all nodes. - nodes_modes - Attribute. - The comparison operation performed by the node. This is encoded as an - enumeration of 0 ('BRANCH_LEQ'), 1 ('BRANCH_LT'), 2 ('BRANCH_GTE'), 3 - ('BRANCH_GT'), 4 ('BRANCH_EQ'), 5 ('BRANCH_NEQ'), and 6 - ('BRANCH_MEMBER'). Note this is a tensor of type uint8. - nodes_splits - Attribute. - Thresholds to do the splitting on for each node with mode that is not - 'BRANCH_MEMBER'. - nodes_trueleafs - Attribute. - 1 if true branch is leaf for each node and 0 an interior node. To - represent a tree that is a leaf (only has one node), one can do so by - having a single ``nodes_*`` entry with true and false branches - referencing the same ``leaf_*`` entry - nodes_truenodeids - Attribute. - If ``nodes_trueleafs`` is false at an entry, this represents the - position of the true branch node. This position can be used to index - into a ``nodes_*`` entry. If ``nodes_trueleafs`` is false, it is an - index into the leaf\_\* attributes. - post_transform - Attribute. - Indicates the transform to apply to the score. One of 'NONE' (0), - 'SOFTMAX' (1), 'LOGISTIC' (2), 'SOFTMAX_ZERO' (3) or 'PROBIT' (4), - defaults to 'NONE' (0) - tree_roots - Attribute. - Index into ``nodes_*`` for the root of each tree. The tree structure is - derived from the branching of each node. +Parameters +========== +X + Type T. + Input of shape [Batch Size, Number of Features] +aggregate_function + Attribute. + Defines how to aggregate leaf values within a target. One of 'AVERAGE' + (0) 'SUM' (1) 'MIN' (2) 'MAX (3) defaults to 'SUM' (1) +leaf_targetids + Attribute. + The index of the target that this leaf contributes to (this must be in + range ``[0, n_targets)``). +leaf_weights + Attribute. + The weight for each leaf. +membership_values + Attribute. + Members to test membership of for each set membership node. List all of + the members to test again in the order that the 'BRANCH_MEMBER' mode + appears in ``node_modes``, delimited by ``NaN``\ s. Will have the same + number of sets of values as nodes with mode 'BRANCH_MEMBER'. This may be + omitted if the node doesn't contain any 'BRANCH_MEMBER' nodes. +n_targets + Attribute. + The total number of targets. +nodes_falseleafs + Attribute. + 1 if false branch is leaf for each node and 0 if an interior node. To + represent a tree that is a leaf (only has one node), one can do so by + having a single ``nodes_*`` entry with true and false branches + referencing the same ``leaf_*`` entry +nodes_falsenodeids + Attribute. + If ``nodes_falseleafs`` is false at an entry, this represents the + position of the false branch node. This position can be used to index + into a ``nodes_*`` entry. If ``nodes_falseleafs`` is false, it is an + index into the leaf\_\* attributes. +nodes_featureids + Attribute. + Feature id for each node. +nodes_hitrates + Attribute. + Popularity of each node, used for performance and may be omitted. +nodes_missing_value_tracks_true + Attribute. + For each node, define whether to follow the true branch (if attribute + value is 1) or false branch (if attribute value is 0) in the presence of + a NaN input feature. This attribute may be left undefined and the + default value is false (0) for all nodes. +nodes_modes + Attribute. + The comparison operation performed by the node. This is encoded as an + enumeration of 0 ('BRANCH_LEQ'), 1 ('BRANCH_LT'), 2 ('BRANCH_GTE'), 3 + ('BRANCH_GT'), 4 ('BRANCH_EQ'), 5 ('BRANCH_NEQ'), and 6 + ('BRANCH_MEMBER'). Note this is a tensor of type uint8. +nodes_splits + Attribute. + Thresholds to do the splitting on for each node with mode that is not + 'BRANCH_MEMBER'. +nodes_trueleafs + Attribute. + 1 if true branch is leaf for each node and 0 an interior node. To + represent a tree that is a leaf (only has one node), one can do so by + having a single ``nodes_*`` entry with true and false branches + referencing the same ``leaf_*`` entry +nodes_truenodeids + Attribute. + If ``nodes_trueleafs`` is false at an entry, this represents the + position of the true branch node. This position can be used to index + into a ``nodes_*`` entry. If ``nodes_trueleafs`` is false, it is an + index into the leaf\_\* attributes. +post_transform + Attribute. + Indicates the transform to apply to the score. One of 'NONE' (0), + 'SOFTMAX' (1), 'LOGISTIC' (2), 'SOFTMAX_ZERO' (3) or 'PROBIT' (4), + defaults to 'NONE' (0) +tree_roots + Attribute. + Index into ``nodes_*`` for the root of each tree. The tree structure is + derived from the branching of each node. - Returns - ======= - Y : Var - Type T. - Output of shape [Batch Size, Number of targets] +Returns +======= +Y : Var + Type T. + Output of shape [Batch Size, Number of targets] - Notes - ===== - Signature: ``ai.onnx.ml@5::TreeEnsemble``. +Notes +===== +Signature: ``ai.onnx.ml@5::TreeEnsemble``. - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return ( - _TreeEnsemble( - _TreeEnsemble.Attributes( - aggregate_function=AttrInt64( - aggregate_function, name="aggregate_function" - ), - leaf_targetids=AttrInt64s(leaf_targetids, name="leaf_targetids"), - leaf_weights=AttrTensor(leaf_weights, name="leaf_weights"), - membership_values=AttrTensor.maybe( - membership_values, name="membership_values" - ), - n_targets=AttrInt64.maybe(n_targets, name="n_targets"), - nodes_falseleafs=AttrInt64s(nodes_falseleafs, name="nodes_falseleafs"), - nodes_falsenodeids=AttrInt64s( - nodes_falsenodeids, name="nodes_falsenodeids" - ), - nodes_featureids=AttrInt64s(nodes_featureids, name="nodes_featureids"), - nodes_hitrates=AttrTensor.maybe(nodes_hitrates, name="nodes_hitrates"), - nodes_missing_value_tracks_true=AttrInt64s.maybe( - nodes_missing_value_tracks_true, - name="nodes_missing_value_tracks_true", - ), - nodes_modes=AttrTensor(nodes_modes, name="nodes_modes"), - nodes_splits=AttrTensor(nodes_splits, name="nodes_splits"), - nodes_trueleafs=AttrInt64s(nodes_trueleafs, name="nodes_trueleafs"), - nodes_truenodeids=AttrInt64s( - nodes_truenodeids, name="nodes_truenodeids" - ), - post_transform=AttrInt64(post_transform, name="post_transform"), - tree_roots=AttrInt64s(tree_roots, name="tree_roots"), - ), - _TreeEnsemble.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) + return _TreeEnsemble( + _TreeEnsemble.Attributes( + aggregate_function=AttrInt64(aggregate_function, name="aggregate_function"), + leaf_targetids=AttrInt64s(leaf_targetids, name="leaf_targetids"), + leaf_weights=AttrTensor(leaf_weights, name="leaf_weights"), + membership_values=AttrTensor.maybe(membership_values, name="membership_values"), + n_targets=AttrInt64.maybe(n_targets, name="n_targets"), + nodes_falseleafs=AttrInt64s(nodes_falseleafs, name="nodes_falseleafs"), + nodes_falsenodeids=AttrInt64s(nodes_falsenodeids, name="nodes_falsenodeids"), + nodes_featureids=AttrInt64s(nodes_featureids, name="nodes_featureids"), + nodes_hitrates=AttrTensor.maybe(nodes_hitrates, name="nodes_hitrates"), + nodes_missing_value_tracks_true=AttrInt64s.maybe(nodes_missing_value_tracks_true, name="nodes_missing_value_tracks_true"), + nodes_modes=AttrTensor(nodes_modes, name="nodes_modes"), + nodes_splits=AttrTensor(nodes_splits, name="nodes_splits"), + nodes_trueleafs=AttrInt64s(nodes_trueleafs, name="nodes_trueleafs"), + nodes_truenodeids=AttrInt64s(nodes_truenodeids, name="nodes_truenodeids"), + post_transform=AttrInt64(post_transform, name="post_transform"), + tree_roots=AttrInt64s(tree_roots, name="tree_roots"), + ), _TreeEnsemble.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y _OPERATORS = { @@ -306,4 +264,4 @@ def tree_ensemble( "ZipMap": zip_map, } -__all__ = [fun.__name__ for fun in _CONSTRUCTORS.values()] +__all__ = [fun.__name__ for fun in _CONSTRUCTORS.values()] diff --git a/src/spox/opset/ai/onnx/v17.py b/src/spox/opset/ai/onnx/v17.py index cc4ffc6..023e5da 100644 --- a/src/spox/opset/ai/onnx/v17.py +++ b/src/spox/opset/ai/onnx/v17.py @@ -1,18 +1,21 @@ -# Copyright (c) QuantCo 2023-2024 -# SPDX-License-Identifier: BSD-3-Clause - # ruff: noqa: E741 -- Allow ambiguous variable name -from collections.abc import Iterable, Sequence +import typing +import warnings from dataclasses import dataclass +from collections.abc import Iterable, Sequence from typing import ( + Any, Callable, Optional, + Union, ) from typing import cast as typing_cast import numpy as np import numpy.typing as npt +from spox._var import Var, VarInfo, result_type, unwrap_vars, get_value +from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._attributes import ( AttrDtype, AttrFloat32, @@ -25,14 +28,12 @@ AttrTensor, AttrType, ) -from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._graph import Graph, subgraph +from spox._internal_op import intro from spox._node import OpType from spox._standard import InferenceError, StandardNode -from spox._type_system import Sequence as SpoxSequence -from spox._type_system import Tensor, Type +from spox._type_system import Tensor, Type, Sequence as SpoxSequence from spox._value_prop import PropValueType -from spox._var import Var, VarInfo, get_value, unwrap_vars class _Abs(StandardNode): @@ -54,7 +55,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Acos(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -74,7 +74,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Acosh(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -94,7 +93,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Add(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -115,7 +113,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _And(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -136,7 +133,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ArgMax(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -158,7 +154,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ArgMin(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -180,7 +175,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Asin(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -200,7 +194,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Asinh(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -220,7 +213,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Atan(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -240,7 +232,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Atanh(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -260,7 +251,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _AveragePool(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -285,7 +275,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _BatchNormalization(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -313,7 +302,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Bernoulli(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -334,7 +322,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _BitShift(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -355,7 +342,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _BlackmanWindow(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -376,7 +362,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Cast(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -396,7 +381,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _CastLike(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -417,7 +401,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Ceil(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -437,7 +420,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Celu(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -457,7 +439,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Clip(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -479,7 +460,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Compress(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -494,39 +474,30 @@ class Inputs(BaseInputs): class Outputs(BaseOutputs): output: VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: self.infer_output_types_onnx() - inp, cond = ( - self.inputs.input.unwrap_tensor(), - self.inputs.condition.unwrap_tensor(), - ) + inp, cond = self.inputs.input.unwrap_tensor(), self.inputs.condition.unwrap_tensor() if not inp.shape: - return {"output": Tensor(inp.dtype, None)} + return {'output': Tensor(inp.dtype, None)} if cond.dtype != np.dtype(bool): raise InferenceError("Compress input 'condition' must be a boolean dtype.") if cond.shape and len(cond.shape) != 1: - raise InferenceError( - "Compress input 'condition' must be a vector (of rank 1)." - ) + raise InferenceError("Compress input 'condition' must be a vector (of rank 1).") if self.attrs.axis is not None: shape = list(inp.shape) axis = self.attrs.axis.value if not (-len(shape) <= axis < len(shape)): - raise InferenceError( - f"Compress attribute 'axis' must in range [-rank, rank-1] (rank={len(shape)})." - ) + raise InferenceError(f"Compress attribute 'axis' must in range [-rank, rank-1] (rank={len(shape)}).") shape[axis] = None else: shape = [None] - return {"output": Tensor(inp.dtype, tuple(shape))} - + return {'output': Tensor(inp.dtype, tuple(shape))} op_type = OpType("Compress", "", 11) attrs: Attributes inputs: Inputs outputs: Outputs - class _Concat(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -546,7 +517,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ConcatFromSequence(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -567,7 +537,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Constant(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -586,9 +555,7 @@ class Outputs(BaseOutputs): output: VarInfo def propagate_values(self, initializers) -> dict[str, PropValueType]: - ((key, raw),) = ( - (k, v.value) for k, v in self.attrs.get_fields().items() if v is not None - ) + ((key, raw),) = ((k, v.value) for k, v in self.attrs.get_fields().items() if v is not None) if key == "value": value = raw elif key == "value_float": @@ -606,18 +573,14 @@ def propagate_values(self, initializers) -> dict[str, PropValueType]: elif key == "sparse_value": return {} else: - raise RuntimeError( - f"Could not extract the set Constant value attribute, got: {key}" - ) + raise RuntimeError(f"Could not extract the set Constant value attribute, got: {key}") return {"output": value} - op_type = OpType("Constant", "", 13) attrs: Attributes inputs: BaseInputs outputs: Outputs - class _ConstantOfShape(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -637,7 +600,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Conv(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -664,7 +626,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ConvInteger(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -692,7 +653,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ConvTranspose(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -721,7 +681,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Cos(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -741,7 +700,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Cosh(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -761,7 +719,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _CumSum(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -783,7 +740,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _DFT(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -806,7 +762,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _DepthToSpace(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -827,7 +782,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _DequantizeLinear(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -849,7 +803,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Det(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -869,7 +822,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Div(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -890,7 +842,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Dropout(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -913,7 +864,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _DynamicQuantizeLinear(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -935,7 +885,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Einsum(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -955,7 +904,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Elu(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -975,7 +923,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Equal(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -996,7 +943,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Erf(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1016,7 +962,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Exp(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1036,7 +981,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Expand(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1057,7 +1001,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _EyeLike(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1078,7 +1021,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Flatten(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1098,7 +1040,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Floor(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1118,7 +1059,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _GRU(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1151,7 +1091,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Gather(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1172,7 +1111,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _GatherElements(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1193,7 +1131,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _GatherND(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1214,7 +1151,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Gemm(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1239,7 +1175,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _GlobalAveragePool(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1259,7 +1194,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _GlobalLpPool(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1279,7 +1213,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _GlobalMaxPool(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1299,7 +1232,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Greater(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1320,7 +1252,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _GreaterOrEqual(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1341,7 +1272,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _GridSample(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1364,7 +1294,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _HammingWindow(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1385,7 +1314,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _HannWindow(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1406,7 +1334,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _HardSigmoid(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1427,7 +1354,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _HardSwish(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1447,7 +1373,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Hardmax(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1467,7 +1392,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Identity(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1487,7 +1411,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _If(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1508,7 +1431,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _InstanceNormalization(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1530,7 +1452,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _IsInf(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1551,7 +1472,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _IsNaN(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1571,7 +1491,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _LRN(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1594,7 +1513,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _LSTM(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1630,7 +1548,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _LayerNormalization(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1656,7 +1573,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _LeakyRelu(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1676,7 +1592,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Less(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1697,7 +1612,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _LessOrEqual(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1718,7 +1632,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Log(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1738,7 +1651,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _LogSoftmax(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1758,7 +1670,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Loop(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1774,7 +1685,7 @@ class Inputs(BaseInputs): class Outputs(BaseOutputs): v_final_and_scan_outputs: Sequence[VarInfo] - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: output_types = super().infer_output_types() body = self.attrs.body.value @@ -1787,14 +1698,12 @@ def infer_output_types(self) -> dict[str, Type]: output_types[name] = typ return output_types - op_type = OpType("Loop", "", 16) attrs: Attributes inputs: Inputs outputs: Outputs - class _LpNormalization(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1815,7 +1724,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _LpPool(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1839,7 +1747,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _MatMul(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1860,7 +1767,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _MatMulInteger(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1883,7 +1789,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Max(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1903,7 +1808,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _MaxPool(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1930,7 +1834,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _MaxRoiPool(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1952,7 +1855,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _MaxUnpool(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1976,7 +1878,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Mean(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -1996,7 +1897,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _MeanVarianceNormalization(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2016,7 +1916,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _MelWeightMatrix(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2040,7 +1939,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Min(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2060,7 +1958,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Mod(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2081,7 +1978,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Mul(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2102,7 +1998,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Multinomial(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2124,7 +2019,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Neg(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2144,7 +2038,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _NegativeLogLikelihoodLoss(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2167,7 +2060,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _NonMaxSuppression(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2191,7 +2083,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _NonZero(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2211,7 +2102,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Not(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2231,7 +2121,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _OneHot(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2253,7 +2142,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Optional(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2273,7 +2161,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _OptionalGetElement(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2293,7 +2180,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _OptionalHasElement(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2313,7 +2199,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Or(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2334,7 +2219,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _PRelu(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2355,7 +2239,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Pad(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2377,7 +2260,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Pow(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2398,7 +2280,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _QLinearConv(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2431,7 +2312,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _QLinearMatMul(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2458,7 +2338,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _QuantizeLinear(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2480,7 +2359,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _RNN(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2512,7 +2390,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _RandomNormal(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2534,7 +2411,6 @@ class Outputs(BaseOutputs): inputs: BaseInputs outputs: Outputs - class _RandomNormalLike(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2557,7 +2433,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _RandomUniform(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2579,7 +2454,6 @@ class Outputs(BaseOutputs): inputs: BaseInputs outputs: Outputs - class _RandomUniformLike(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2602,7 +2476,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Range(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2624,7 +2497,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Reciprocal(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2644,7 +2516,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceL1(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2665,7 +2536,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceL2(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2686,7 +2556,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceLogSum(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2707,7 +2576,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceLogSumExp(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2728,7 +2596,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceMax(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2749,7 +2616,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceMean(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2770,7 +2636,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceMin(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2791,7 +2656,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceProd(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2812,7 +2676,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceSum(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2834,7 +2697,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceSumSquare(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2855,7 +2717,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Relu(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2875,7 +2736,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Reshape(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2896,7 +2756,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Resize(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2924,7 +2783,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReverseSequence(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2946,7 +2804,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _RoiAlign(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2973,7 +2830,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Round(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -2993,7 +2849,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _STFT(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3016,7 +2871,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Scan(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3041,7 +2895,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ScatterElements(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3064,7 +2917,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ScatterND(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3086,7 +2938,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Selu(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3107,7 +2958,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _SequenceAt(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3128,7 +2978,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _SequenceConstruct(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3148,7 +2997,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _SequenceEmpty(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3166,7 +3014,6 @@ class Outputs(BaseOutputs): inputs: BaseInputs outputs: Outputs - class _SequenceErase(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3187,7 +3034,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _SequenceInsert(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3209,7 +3055,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _SequenceLength(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3229,7 +3074,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _SequenceMap(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3250,7 +3094,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Shape(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3271,7 +3114,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Shrink(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3292,7 +3134,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Sigmoid(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3312,7 +3153,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Sign(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3332,7 +3172,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Sin(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3352,7 +3191,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Sinh(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3372,7 +3210,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Size(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3392,7 +3229,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Slice(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3416,7 +3252,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Softmax(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3436,7 +3271,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _SoftmaxCrossEntropyLoss(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3460,7 +3294,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Softplus(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3480,7 +3313,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Softsign(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3500,7 +3332,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _SpaceToDepth(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3520,7 +3351,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Split(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3541,7 +3371,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _SplitToSequence(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3563,7 +3392,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Sqrt(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3583,7 +3411,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Squeeze(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3604,7 +3431,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _StringNormalizer(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3627,7 +3453,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Sub(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3648,7 +3473,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Sum(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3668,7 +3492,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Tan(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3688,7 +3511,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Tanh(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3708,7 +3530,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _TfIdfVectorizer(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3736,7 +3557,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ThresholdedRelu(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3756,7 +3576,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Tile(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3777,7 +3596,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _TopK(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3801,7 +3619,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Transpose(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3821,7 +3638,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Trilu(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3842,7 +3658,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Unique(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3866,7 +3681,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Unsqueeze(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3887,7 +3701,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Where(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3909,7 +3722,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Xor(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -3930,12781 +3742,10266 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - -def abs( - X: Var, -) -> Var: +def abs(X: Var, ) -> Var: r""" - Absolute takes one input data (Tensor) and produces one output data - (Tensor) where absolute value, y = abs(x), is applied to the tensor - elementwise. - - Parameters - ========== - X - Type T. - Input tensor - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@13::Abs``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Abs( - _Abs.Attributes(), - _Abs.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def acos( - input: Var, -) -> Var: - r""" - Calculates the arccosine (inverse of cosine) of the given input tensor, - element-wise. +Absolute takes one input data (Tensor) and produces one output data +(Tensor) where absolute value, y = abs(x), is applied to the tensor +elementwise. - Parameters - ========== - input - Type T. - Input tensor +Parameters +========== +X + Type T. + Input tensor - Returns - ======= - output : Var - Type T. - The arccosine of the input tensor computed element-wise +Returns +======= +Y : Var + Type T. + Output tensor - Notes - ===== - Signature: ``ai.onnx@7::Acos``. +Notes +===== +Signature: ``ai.onnx@13::Abs``. - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _Acos( - _Acos.Attributes(), - _Acos.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) + return _Abs( + _Abs.Attributes( + ), _Abs.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y -def acosh( - input: Var, -) -> Var: +def acos(input: Var, ) -> Var: r""" - Calculates the hyperbolic arccosine of the given input tensor - element-wise. +Calculates the arccosine (inverse of cosine) of the given input tensor, +element-wise. - Parameters - ========== - input - Type T. - Input tensor - - Returns - ======= - output : Var - Type T. - The hyperbolic arccosine values of the input tensor computed - element-wise - - Notes - ===== - Signature: ``ai.onnx@9::Acosh``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Acosh( - _Acosh.Attributes(), - _Acosh.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) +Parameters +========== +input + Type T. + Input tensor +Returns +======= +output : Var + Type T. + The arccosine of the input tensor computed element-wise -def add( - A: Var, - B: Var, -) -> Var: - r""" - Performs element-wise binary addition (with Numpy-style broadcasting - support). - - This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - (Opset 14 change): Extend supported types to include uint8, int8, - uint16, and int16. - - Parameters - ========== - A - Type T. - First operand. - B - Type T. - Second operand. - - Returns - ======= - C : Var - Type T. - Result, has same element type as two inputs - - Notes - ===== - Signature: ``ai.onnx@14::Add``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +Notes +===== +Signature: ``ai.onnx@7::Acos``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return ( - _Add( - _Add.Attributes(), - _Add.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - ) - .C - ) + return _Acos( + _Acos.Attributes( + ), _Acos.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output -def and_( - A: Var, - B: Var, -) -> Var: +def acosh(input: Var, ) -> Var: r""" - Returns the tensor resulted from performing the ``and`` logical - operation elementwise on the input tensors ``A`` and ``B`` (with - Numpy-style broadcasting support). - - This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - Parameters - ========== - A - Type T. - First input operand for the logical operator. - B - Type T. - Second input operand for the logical operator. - - Returns - ======= - C : Var - Type T1. - Result tensor. - - Notes - ===== - Signature: ``ai.onnx@7::And``. - - Type constraints: - - T: `tensor(bool)` - - T1: `tensor(bool)` - """ - return ( - _And( - _And.Attributes(), - _And.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - ) - .C - ) +Calculates the hyperbolic arccosine of the given input tensor +element-wise. +Parameters +========== +input + Type T. + Input tensor -def arg_max( - data: Var, - *, - axis: int = 0, - keepdims: int = 1, - select_last_index: int = 0, -) -> Var: - r""" - Computes the indices of the max elements of the input tensor's element - along the provided axis. The resulting tensor has the same rank as the - input if keepdims equals 1. If keepdims equals 0, then the resulting - tensor has the reduced dimension pruned. If select_last_index is True - (default False), the index of the last occurrence of the max is selected - if the max appears more than once in the input. Otherwise the index of - the first occurrence is selected. The type of the output tensor is - integer. - - Parameters - ========== - data - Type T. - An input tensor. - axis - Attribute. - The axis in which to compute the arg indices. Accepted range is [-r, - r-1] where r = rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - select_last_index - Attribute. - Whether to select the last index or the first index if the {name} - appears in multiple indices, default is False (first index). - - Returns - ======= - reduced : Var - Type tensor(int64). - Reduced output tensor with integer data type. - - Notes - ===== - Signature: ``ai.onnx@13::ArgMax``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +Returns +======= +output : Var + Type T. + The hyperbolic arccosine values of the input tensor computed + element-wise + +Notes +===== +Signature: ``ai.onnx@9::Acosh``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return ( - _ArgMax( - _ArgMax.Attributes( - axis=AttrInt64(axis, name="axis"), - keepdims=AttrInt64(keepdims, name="keepdims"), - select_last_index=AttrInt64( - select_last_index, name="select_last_index" - ), - ), - _ArgMax.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .reduced - ) + return _Acosh( + _Acosh.Attributes( + ), _Acosh.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output -def arg_min( - data: Var, - *, - axis: int = 0, - keepdims: int = 1, - select_last_index: int = 0, -) -> Var: - r""" - Computes the indices of the min elements of the input tensor's element - along the provided axis. The resulting tensor has the same rank as the - input if keepdims equals 1. If keepdims equals 0, then the resulting - tensor has the reduced dimension pruned. If select_last_index is True - (default False), the index of the last occurrence of the min is selected - if the min appears more than once in the input. Otherwise the index of - the first occurrence is selected. The type of the output tensor is - integer. - - Parameters - ========== - data - Type T. - An input tensor. - axis - Attribute. - The axis in which to compute the arg indices. Accepted range is [-r, - r-1] where r = rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - select_last_index - Attribute. - Whether to select the last index or the first index if the {name} - appears in multiple indices, default is False (first index). - - Returns - ======= - reduced : Var - Type tensor(int64). - Reduced output tensor with integer data type. - - Notes - ===== - Signature: ``ai.onnx@13::ArgMin``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +def add(A: Var, B: Var, ) -> Var: + r""" +Performs element-wise binary addition (with Numpy-style broadcasting +support). + +This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +(Opset 14 change): Extend supported types to include uint8, int8, +uint16, and int16. + +Parameters +========== +A + Type T. + First operand. +B + Type T. + Second operand. + +Returns +======= +C : Var + Type T. + Result, has same element type as two inputs + +Notes +===== +Signature: ``ai.onnx@14::Add``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Add( + _Add.Attributes( + ), _Add.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), ), ).get_output_vars( + A=get_value(A), B=get_value(B), ).C + + +def and_(A: Var, B: Var, ) -> Var: + r""" +Returns the tensor resulted from performing the ``and`` logical +operation elementwise on the input tensors ``A`` and ``B`` (with +Numpy-style broadcasting support). + +This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +Parameters +========== +A + Type T. + First input operand for the logical operator. +B + Type T. + Second input operand for the logical operator. + +Returns +======= +C : Var + Type T1. + Result tensor. + +Notes +===== +Signature: ``ai.onnx@7::And``. + +Type constraints: + - T: `tensor(bool)` + - T1: `tensor(bool)` + """ + return _And( + _And.Attributes( + ), _And.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), ), ).get_output_vars( + A=get_value(A), B=get_value(B), ).C + + +def arg_max(data: Var, *, axis: int = 0, keepdims: int = 1, select_last_index: int = 0, ) -> Var: + r""" +Computes the indices of the max elements of the input tensor's element +along the provided axis. The resulting tensor has the same rank as the +input if keepdims equals 1. If keepdims equals 0, then the resulting +tensor has the reduced dimension pruned. If select_last_index is True +(default False), the index of the last occurrence of the max is selected +if the max appears more than once in the input. Otherwise the index of +the first occurrence is selected. The type of the output tensor is +integer. + +Parameters +========== +data + Type T. + An input tensor. +axis + Attribute. + The axis in which to compute the arg indices. Accepted range is [-r, + r-1] where r = rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. +select_last_index + Attribute. + Whether to select the last index or the first index if the {name} + appears in multiple indices, default is False (first index). + +Returns +======= +reduced : Var + Type tensor(int64). + Reduced output tensor with integer data type. + +Notes +===== +Signature: ``ai.onnx@13::ArgMax``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _ArgMax( + _ArgMax.Attributes( + axis=AttrInt64(axis, name="axis"), + keepdims=AttrInt64(keepdims, name="keepdims"), + select_last_index=AttrInt64(select_last_index, name="select_last_index"), + ), _ArgMax.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).reduced + + +def arg_min(data: Var, *, axis: int = 0, keepdims: int = 1, select_last_index: int = 0, ) -> Var: + r""" +Computes the indices of the min elements of the input tensor's element +along the provided axis. The resulting tensor has the same rank as the +input if keepdims equals 1. If keepdims equals 0, then the resulting +tensor has the reduced dimension pruned. If select_last_index is True +(default False), the index of the last occurrence of the min is selected +if the min appears more than once in the input. Otherwise the index of +the first occurrence is selected. The type of the output tensor is +integer. + +Parameters +========== +data + Type T. + An input tensor. +axis + Attribute. + The axis in which to compute the arg indices. Accepted range is [-r, + r-1] where r = rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. +select_last_index + Attribute. + Whether to select the last index or the first index if the {name} + appears in multiple indices, default is False (first index). + +Returns +======= +reduced : Var + Type tensor(int64). + Reduced output tensor with integer data type. + +Notes +===== +Signature: ``ai.onnx@13::ArgMin``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _ArgMin( + _ArgMin.Attributes( + axis=AttrInt64(axis, name="axis"), + keepdims=AttrInt64(keepdims, name="keepdims"), + select_last_index=AttrInt64(select_last_index, name="select_last_index"), + ), _ArgMin.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).reduced + + +def asin(input: Var, ) -> Var: + r""" +Calculates the arcsine (inverse of sine) of the given input tensor, +element-wise. + +Parameters +========== +input + Type T. + Input tensor + +Returns +======= +output : Var + Type T. + The arcsine of the input tensor computed element-wise + +Notes +===== +Signature: ``ai.onnx@7::Asin``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Asin( + _Asin.Attributes( + ), _Asin.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def asinh(input: Var, ) -> Var: + r""" +Calculates the hyperbolic arcsine of the given input tensor +element-wise. + +Parameters +========== +input + Type T. + Input tensor + +Returns +======= +output : Var + Type T. + The hyperbolic arcsine values of the input tensor computed element-wise + +Notes +===== +Signature: ``ai.onnx@9::Asinh``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return ( - _ArgMin( - _ArgMin.Attributes( - axis=AttrInt64(axis, name="axis"), - keepdims=AttrInt64(keepdims, name="keepdims"), - select_last_index=AttrInt64( - select_last_index, name="select_last_index" - ), - ), - _ArgMin.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .reduced - ) + return _Asinh( + _Asinh.Attributes( + ), _Asinh.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output -def asin( - input: Var, -) -> Var: +def atan(input: Var, ) -> Var: r""" - Calculates the arcsine (inverse of sine) of the given input tensor, - element-wise. +Calculates the arctangent (inverse of tangent) of the given input +tensor, element-wise. - Parameters - ========== - input - Type T. - Input tensor +Parameters +========== +input + Type T. + Input tensor - Returns - ======= - output : Var - Type T. - The arcsine of the input tensor computed element-wise +Returns +======= +output : Var + Type T. + The arctangent of the input tensor computed element-wise - Notes - ===== - Signature: ``ai.onnx@7::Asin``. +Notes +===== +Signature: ``ai.onnx@7::Atan``. - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return ( - _Asin( - _Asin.Attributes(), - _Asin.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) + return _Atan( + _Atan.Attributes( + ), _Atan.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output -def asinh( - input: Var, -) -> Var: +def atanh(input: Var, ) -> Var: r""" - Calculates the hyperbolic arcsine of the given input tensor - element-wise. +Calculates the hyperbolic arctangent of the given input tensor +element-wise. - Parameters - ========== - input - Type T. - Input tensor +Parameters +========== +input + Type T. + Input tensor - Returns - ======= - output : Var - Type T. - The hyperbolic arcsine values of the input tensor computed element-wise +Returns +======= +output : Var + Type T. + The hyperbolic arctangent values of the input tensor computed + element-wise - Notes - ===== - Signature: ``ai.onnx@9::Asinh``. +Notes +===== +Signature: ``ai.onnx@9::Atanh``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Atanh( + _Atanh.Attributes( + ), _Atanh.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def average_pool(X: Var, *, auto_pad: str = "NOTSET", ceil_mode: int = 0, count_include_pad: int = 0, kernel_shape: Iterable[int], pads: Optional[Iterable[int]] = None, strides: Optional[Iterable[int]] = None, ) -> Var: + r""" +AveragePool consumes an input tensor X and applies average pooling +across the tensor according to kernel sizes, stride sizes, and pad +lengths. average pooling consisting of computing the average on all +values of a subset of the input tensor according to the kernel size and +downsampling the data into the output tensor Y for further processing. +The output spatial shape will be following: + +:: + + output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1) + +or + +:: + + output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1) + +if ceil_mode is enabled + +:: + + * pad_shape[i] is sum of pads along axis i + +``auto_pad`` is a DEPRECATED attribute. If you are using them currently, +the output spatial shape will be following when ceil_mode is enabled: + +:: + + VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i]) + SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) + +or when ceil_mode is disabled: + +:: + + VALID: output_spatial_shape[i] = floor((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i]) + SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = floor(input_spatial_shape[i] / strides_spatial_shape[i]) + +And pad shape will be following if ``SAME_UPPER`` or ``SAME_LOWER``: + +:: + + pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i] + +The output of each pooling window is divided by the number of elements +(exclude pad when attribute count_include_pad is zero). + +Parameters +========== +X + Type T. + Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. Optionally, if dimension denotation is in + effect, the operation expects the input data tensor to arrive with the + dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. +auto_pad + Attribute. + auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where + default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that + ``output_shape[i] = ceil(input_shape[i] / strides[i])`` for each axis + ``i``. The padding is split between the two sides equally or almost + equally (depending on whether it is even or odd). In case the padding is + an odd number, the extra padding is added at the end for SAME_UPPER and + at the beginning for SAME_LOWER. +ceil_mode + Attribute. + Whether to use ceil or floor (default) to compute the output shape. +count_include_pad + Attribute. + Whether include pad pixels when calculating values for the edges. + Default is 0, doesn't count include pad. +kernel_shape + Attribute. + The size of the kernel along each axis. +pads + Attribute. + Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. ``pads`` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis ``i`` and xi_end, the number of pixels + added at the end of axis ``i``. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. +strides + Attribute. + Stride along each spatial axis. If not present, the stride defaults to 1 + along each spatial axis. + +Returns +======= +Y : Var + Type T. + Output data tensor from average or max pooling across the input tensor. + Dimensions will vary based on various kernel, stride, and pad sizes. + Floor value of the dimension is used + +Notes +===== +Signature: ``ai.onnx@11::AveragePool``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _AveragePool( + _AveragePool.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), + count_include_pad=AttrInt64(count_include_pad, name="count_include_pad"), + kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), _AveragePool.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def batch_normalization(X: Var, scale: Var, B: Var, input_mean: Var, input_var: Var, *, epsilon: float = 9.999999747378752e-06, momentum: float = 0.8999999761581421, training_mode: int = 0, ) -> tuple[Var, Var, Var]: + r""" +Carries out batch normalization as described in the paper +https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, +There are five required inputs 'X', 'scale', 'B', 'input_mean' and +'input_var'. Note that 'input_mean' and 'input_var' are expected to be +the estimated statistics in inference mode (training_mode=False, +default), and the running statistics in training mode +(training_mode=True). There are multiple cases for the number of +outputs, which we list below: + +- Output case #1: Y, running_mean, running_var (training_mode=True) +- Output case #2: Y (training_mode=False) + +When training_mode=False, extra outputs are invalid. The outputs are +updated as follows when training_mode=True: + +:: + + running_mean = input_mean * momentum + current_mean * (1 - momentum) + running_var = input_var * momentum + current_var * (1 - momentum) + + Y = (X - current_mean) / sqrt(current_var + epsilon) * scale + B + +where: + +:: + + current_mean = ReduceMean(X, axis=all_except_channel_index) + current_var = ReduceVar(X, axis=all_except_channel_index) + +Notice that ``ReduceVar`` refers to the population variance, and it +equals to ``sum(sqrd(x_i - x_avg)) / N`` where ``N`` is the population +size (this formula does not use sample size ``N - 1``). + +The computation of ReduceMean and ReduceVar uses float to avoid overflow +for float16 inputs. + +When training_mode=False: + +:: + + Y = (X - input_mean) / sqrt(input_var + epsilon) * scale + B + +For previous (depreciated) non-spatial cases, implementors are suggested +to flatten the input shape to (N x C \* D1 \* D2 \* ... \* Dn) before a +BatchNormalization Op. This operator has **optional** inputs/outputs. +See `the doc `__ for +more details about the representation of optional arguments. An empty +string may be used in the place of an actual argument's name to indicate +a missing argument. Trailing optional arguments (those not followed by +an argument that is present) may also be simply omitted. + +Parameters +========== +X + Type T. + Input data tensor from the previous operator; dimensions are in the form + of (N x C x D1 x D2 ... Dn), where N is the batch size, C is the number + of channels. Statistics are computed for every channel of C over N and + D1 to Dn dimensions. For image data, input dimensions become (N x C x H + x W). The op also accepts single dimension input of size N in which case + C is assumed to be 1 +scale + Type T1. + Scale tensor of shape (C). +B + Type T1. + Bias tensor of shape (C). +input_mean + Type T2. + running (training) or estimated (testing) mean tensor of shape (C). +input_var + Type T2. + running (training) or estimated (testing) variance tensor of shape (C). +epsilon + Attribute. + The epsilon value to use to avoid division by zero. +momentum + Attribute. + Factor used in computing the running mean and variance.e.g., + running_mean = running_mean \* momentum + mean \* (1 - momentum). +training_mode + Attribute. + If set to true, it indicates BatchNormalization is being used for + training, and outputs 1 and 2 are to be computed. + +Returns +======= +Y : Var + Type T. + The output tensor of the same shape as X +running_mean : Var + Type T2. + The running mean after the BatchNormalization operator. +running_var : Var + Type T2. + The running variance after the BatchNormalization operator. This op uses + the population size (N) for calculating variance, and not the sample + size N-1. + +Notes +===== +Signature: ``ai.onnx@15::BatchNormalization``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _BatchNormalization( + _BatchNormalization.Attributes( + epsilon=AttrFloat32(epsilon, name="epsilon"), + momentum=AttrFloat32(momentum, name="momentum"), + training_mode=AttrInt64(training_mode, name="training_mode"), + ), _BatchNormalization.Inputs( + X=unwrap_vars(X), scale=unwrap_vars(scale), B=unwrap_vars(B), input_mean=unwrap_vars(input_mean), input_var=unwrap_vars(input_var), ), ).get_output_vars( + X=get_value(X), scale=get_value(scale), B=get_value(B), input_mean=get_value(input_mean), input_var=get_value(input_var), )._unpack_to_any() + + +def bernoulli(input: Var, *, dtype: Optional[npt.DTypeLike] = None, seed: Optional[float] = None, ) -> Var: + r""" +Draws binary random numbers (0 or 1) from a Bernoulli distribution. The +input tensor should be a tensor containing probabilities p (a value in +the range [0,1]) to be used for drawing the binary random number, where +an output of 1 is produced with probability p and an output of 0 is +produced with probability (1-p). + +This operator is non-deterministic and may not produce the same values +in different implementations (even if a seed is specified). + +Parameters +========== +input + Type T1. + All values in input have to be in the range:[0, 1]. +dtype + Attribute. + The data type for the elements of the output tensor. if not specified, + we will use the data type of the input tensor. +seed + Attribute. + (Optional) Seed to the random generator, if not specified we will auto + generate one. + +Returns +======= +output : Var + Type T2. + The returned output tensor only has values 0 or 1, same shape as input + tensor. + +Notes +===== +Signature: ``ai.onnx@15::Bernoulli``. + +Type constraints: + - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` + - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Bernoulli( + _Bernoulli.Attributes( + dtype=AttrDtype.maybe(dtype, name="dtype"), + seed=AttrFloat32.maybe(seed, name="seed"), + ), _Bernoulli.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def bit_shift(X: Var, Y: Var, *, direction: str, ) -> Var: + r""" +Bitwise shift operator performs element-wise operation. For each input +element, if the attribute "direction" is "RIGHT", this operator moves +its binary representation toward the right side so that the input value +is effectively decreased. If the attribute "direction" is "LEFT", bits +of binary representation moves toward the left side, which results the +increase of its actual value. The input X is the tensor to be shifted +and another input Y specifies the amounts of shifting. For example, if +"direction" is "Right", X is [1, 4], and S is [1, 1], the corresponding +output Z would be [0, 2]. If "direction" is "LEFT" with X=[1, 2] and +S=[1, 2], the corresponding output Y would be [2, 8]. + +Because this operator supports Numpy-style broadcasting, X's and Y's +shapes are not necessarily identical. This operator supports +**multidirectional (i.e., Numpy-style) broadcasting**; for more details +please check `the +doc `__. + +Parameters +========== +X + Type T. + First operand, input to be shifted. +Y + Type T. + Second operand, amounts of shift. +direction + Attribute. + Direction of moving bits. It can be either "RIGHT" (for right shift) or + "LEFT" (for left shift). + +Returns +======= +Z : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@11::BitShift``. + +Type constraints: + - T: `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _BitShift( + _BitShift.Attributes( + direction=AttrString(direction, name="direction"), + ), _BitShift.Inputs( + X=unwrap_vars(X), Y=unwrap_vars(Y), ), ).get_output_vars( + X=get_value(X), Y=get_value(Y), ).Z + + +def blackman_window(size: Var, *, output_datatype: int = 1, periodic: int = 1, ) -> Var: + r""" +Generates a Blackman window as described in the paper +https://ieeexplore.ieee.org/document/1455106. + +Parameters +========== +size + Type T1. + A scalar value indicating the length of the window. +output_datatype + Attribute. + The data type of the output tensor. Strictly must be one of the values + from DataType enum in TensorProto whose values correspond to T2. The + default value is 1 = FLOAT. +periodic + Attribute. + If 1, returns a window to be used as periodic function. If 0, return a + symmetric window. When 'periodic' is specified, hann computes a window + of length size + 1 and returns the first size points. The default value + is 1. + +Returns +======= +output : Var + Type T2. + A Blackman window with length: size. The output has the shape: [size]. + +Notes +===== +Signature: ``ai.onnx@17::BlackmanWindow``. + +Type constraints: + - T1: `tensor(int32)`, `tensor(int64)` + - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _BlackmanWindow( + _BlackmanWindow.Attributes( + output_datatype=AttrInt64(output_datatype, name="output_datatype"), + periodic=AttrInt64(periodic, name="periodic"), + ), _BlackmanWindow.Inputs( + size=unwrap_vars(size), ), ).get_output_vars( + size=get_value(size), ).output + + +def cast(input: Var, *, to: npt.DTypeLike, ) -> Var: + r""" +The operator casts the elements of a given input tensor to a data type +specified by the 'to' argument and returns an output tensor of the same +size in the converted type. The 'to' argument must be one of the data +types specified in the 'DataType' enum field in the TensorProto message. + +Casting from string tensor in plain (e.g., "3.14" and "1000") and +scientific numeric representations (e.g., "1e-5" and "1E8") to float +types is supported. For example, converting string "100.5" to an integer +may yield result 100. There are some string literals reserved for +special floating-point values; "+INF" (and "INF"), "-INF", and "NaN" are +positive infinity, negative infinity, and not-a-number, respectively. +Any string which can exactly match "+INF" in a case-insensitive way +would be mapped to positive infinite. Similarly, this case-insensitive +rule is applied to "INF" and "NaN". When casting from numeric tensors to +string tensors, plain floating-point representation (such as +"314.15926") would be used. Converting non-numerical-literal string such +as "Hello World!" is an undefined behavior. Cases of converting string +representing floating-point arithmetic value, such as "2.718", to INT is +an undefined behavior. + +Conversion from a numerical type to any numerical type is always +allowed. User must be aware of precision loss and value change caused by +range difference between two types. For example, a 64-bit float +3.1415926459 may be round to a 32-bit float 3.141592. Similarly, +converting an integer 36 to Boolean may produce 1 because we truncate +bits which can't be stored in the targeted type. + +In more detail, the conversion among numerical types should follow these +rules: + +- Casting from floating point to: + + - floating point: +/- infinity if OOR (out of range). + - fixed point: undefined if OOR. + - bool: +/- 0.0 to False; all else to True. + +- Casting from fixed point to: + + - floating point: +/- infinity if OOR. (+ infinity in the case of + uint) + - fixed point: when OOR, discard higher bits and reinterpret (with + respect to two's complement representation for signed types). For + example, 200 (int16) -> -56 (int8). + - bool: zero to False; nonzero to True. + +- Casting from bool to: + + - floating point: ``{1.0, 0.0}``. + - fixed point: ``{1, 0}``. + - bool: no change. + +Parameters +========== +input + Type T1. + Input tensor to be cast. +to + Attribute. + The data type to which the elements of the input tensor are cast. + Strictly must be one of the types from DataType enum in TensorProto + +Returns +======= +output : Var + Type T2. + Output tensor with the same shape as input with type specified by the + 'to' argument + +Notes +===== +Signature: ``ai.onnx@13::Cast``. + +Type constraints: + - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Cast( + _Cast.Attributes( + to=AttrDtype(to, name="to"), + ), _Cast.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def cast_like(input: Var, target_type: Var, ) -> Var: + r""" +The operator casts the elements of a given input tensor (the first +input) to the same data type as the elements of the second input tensor. +See documentation of the Cast operator for further details. + +Parameters +========== +input + Type T1. + Input tensor to be cast. +target_type + Type T2. + The (first) input tensor will be cast to produce a tensor of the same + type as this (second input) tensor. + +Returns +======= +output : Var + Type T2. + Output tensor produced by casting the first input tensor to have the + same type as the second input tensor. + +Notes +===== +Signature: ``ai.onnx@15::CastLike``. + +Type constraints: + - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _CastLike( + _CastLike.Attributes( + ), _CastLike.Inputs( + input=unwrap_vars(input), target_type=unwrap_vars(target_type), ), ).get_output_vars( + input=get_value(input), target_type=get_value(target_type), ).output + + +def ceil(X: Var, ) -> Var: + r""" +Ceil takes one input data (Tensor) and produces one output data +(Tensor) where the ceil is, y = ceil(x), is applied to the tensor +elementwise. If x is integral, +0, -0, NaN, or infinite, x itself is +returned. + +Parameters +========== +X + Type T. + Input tensor + +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@13::Ceil``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Ceil( + _Ceil.Attributes( + ), _Ceil.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def celu(X: Var, *, alpha: float = 1.0, ) -> Var: + r""" +Continuously Differentiable Exponential Linear Units: Perform the linear +unit element-wise on the input tensor X using formula: + +:: + + max(0,x) + min(0,alpha*(exp(x/alpha)-1)) + +Parameters +========== +X + Type T. + Input tensor +alpha + Attribute. + The Alpha value in Celu formula which control the shape of the unit. The + default value is 1.0. + +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@12::Celu``. + +Type constraints: + - T: `tensor(float)` + """ + return _Celu( + _Celu.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + ), _Celu.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def clip(input: Var, min: Optional[Var] = None, max: Optional[Var] = None, ) -> Var: + r""" +Clip operator limits the given input within an interval. The interval is +specified by the inputs 'min' and 'max'. They default to +numeric_limits::lowest() and numeric_limits::max(), respectively. + +Parameters +========== +input + Type T. + Input tensor whose elements to be clipped +min + Type T. + Minimum value, under which element is replaced by min. It must be a + scalar(tensor of empty shape). +max + Type T. + Maximum value, above which element is replaced by max. It must be a + scalar(tensor of empty shape). + +Returns +======= +output : Var + Type T. + Output tensor with clipped input elements + +Notes +===== +Signature: ``ai.onnx@13::Clip``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Clip( + _Clip.Attributes( + ), _Clip.Inputs( + input=unwrap_vars(input), min=unwrap_vars(min), max=unwrap_vars(max), ), ).get_output_vars( + input=get_value(input), min=get_value(min), max=get_value(max), ).output + + +def compress(input: Var, condition: Var, *, axis: Optional[int] = None, ) -> Var: + r""" +Selects slices from an input tensor along a given axis where condition +evaluates to True for each axis index. In case axis is not provided, +input is flattened before elements are selected. Compress behaves like +numpy.compress: +https://docs.scipy.org/doc/numpy/reference/generated/numpy.compress.html + +Parameters +========== +input + Type T. + Tensor of rank r >= 1. +condition + Type T1. + Rank 1 tensor of booleans to indicate which slices or data elements to + be selected. Its length can be less than the input length along the axis + or the flattened input size if axis is not specified. In such cases data + slices or elements exceeding the condition length are discarded. +axis + Attribute. + (Optional) Axis along which to take slices. If not specified, input is + flattened before elements being selected. Negative value means counting + dimensions from the back. Accepted range is [-r, r-1] where r = + rank(input). + +Returns +======= +output : Var + Type T. + Tensor of rank r if axis is specified. Otherwise output is a Tensor of + rank 1. + +Notes +===== +Signature: ``ai.onnx@11::Compress``. + +Type constraints: + - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T1: `tensor(bool)` + """ + return _Compress( + _Compress.Attributes( + axis=AttrInt64.maybe(axis, name="axis"), + ), _Compress.Inputs( + input=unwrap_vars(input), condition=unwrap_vars(condition), ), ).get_output_vars( + input=get_value(input), condition=get_value(condition), ).output + + +def concat(inputs: Sequence[Var], *, axis: int, ) -> Var: + r""" +Concatenate a list of tensors into a single tensor. All input tensors +must have the same shape, except for the dimension size of the axis to +concatenate on. + +Parameters +========== +inputs + Type T. + List of tensors for concatenation +axis + Attribute. + Which axis to concat on. A negative value means counting dimensions from + the back. Accepted range is [-r, r-1] where r = rank(inputs).. + +Returns +======= +concat_result : Var + Type T. + Concatenated tensor + +Notes +===== +Signature: ``ai.onnx@13::Concat``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Concat( + _Concat.Attributes( + axis=AttrInt64(axis, name="axis"), + ), _Concat.Inputs( + inputs=unwrap_vars(inputs), ), ).get_output_vars( + inputs=get_value(inputs), ).concat_result + + +def concat_from_sequence(input_sequence: Var, *, axis: int, new_axis: int = 0, ) -> Var: + r""" +Concatenate a sequence of tensors into a single tensor. All input +tensors must have the same shape, except for the dimension size of the +axis to concatenate on. By default 'new_axis' is 0, the behavior is +similar to numpy.concatenate. When 'new_axis' is 1, the behavior is +similar to numpy.stack. + +Parameters +========== +input_sequence + Type S. + Sequence of tensors for concatenation +axis + Attribute. + Which axis to concat on. Accepted range in ``[-r, r - 1]``, where ``r`` + is the rank of input tensors. When ``new_axis`` is 1, accepted range is + ``[-r - 1, r]``. +new_axis + Attribute. + Insert and concatenate on a new axis or not, default 0 means do not + insert new axis. + +Returns +======= +concat_result : Var + Type T. + Concatenated tensor + +Notes +===== +Signature: ``ai.onnx@11::ConcatFromSequence``. + +Type constraints: + - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` + - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _ConcatFromSequence( + _ConcatFromSequence.Attributes( + axis=AttrInt64(axis, name="axis"), + new_axis=AttrInt64(new_axis, name="new_axis"), + ), _ConcatFromSequence.Inputs( + input_sequence=unwrap_vars(input_sequence), ), ).get_output_vars( + input_sequence=get_value(input_sequence), ).concat_result + + +def constant(*, value: Optional[np.ndarray] = None, value_float: Optional[float] = None, value_floats: Optional[Iterable[float]] = None, value_int: Optional[int] = None, value_ints: Optional[Iterable[int]] = None, value_string: Optional[str] = None, value_strings: Optional[Iterable[str]] = None, ) -> Var: + r""" +This operator produces a constant tensor. Exactly one of the provided +attributes, either value, sparse_value, or value\_\* must be specified. + +Parameters +========== +sparse_value + Attribute. + The value for the elements of the output tensor in sparse format. +value + Attribute. + The value for the elements of the output tensor. +value_float + Attribute. + The value for the sole element for the scalar, float32, output tensor. +value_floats + Attribute. + The values for the elements for the 1D, float32, output tensor. +value_int + Attribute. + The value for the sole element for the scalar, int64, output tensor. +value_ints + Attribute. + The values for the elements for the 1D, int64, output tensor. +value_string + Attribute. + The value for the sole element for the scalar, UTF-8 string, output + tensor. +value_strings + Attribute. + The values for the elements for the 1D, UTF-8 string, output tensor. + +Returns +======= +output : Var + Type T. + Output tensor containing the same value of the provided tensor. + +Notes +===== +Signature: ``ai.onnx@13::Constant``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Constant( + _Constant.Attributes( + value=AttrTensor.maybe(value, name="value"), + value_float=AttrFloat32.maybe(value_float, name="value_float"), + value_floats=AttrFloat32s.maybe(value_floats, name="value_floats"), + value_int=AttrInt64.maybe(value_int, name="value_int"), + value_ints=AttrInt64s.maybe(value_ints, name="value_ints"), + value_string=AttrString.maybe(value_string, name="value_string"), + value_strings=AttrStrings.maybe(value_strings, name="value_strings"), + ), _Constant.Inputs( + ), ).get_output_vars( + ).output + + +def constant_of_shape(input: Var, *, value: Optional[np.ndarray] = None, ) -> Var: + r""" +Generate a tensor with given value and shape. + +Parameters +========== +input + Type T1. + 1D tensor. The shape of the expected output tensor. If empty tensor is + given, the output would be a scalar. All values must be >= 0. +value + Attribute. + (Optional) The value of the output elements.Should be a one-element + tensor. If not specified, it defaults to a tensor of value 0 and + datatype float32 + +Returns +======= +output : Var + Type T2. + Output tensor of shape specified by 'input'.If attribute 'value' is + specified, the value and datatype of the output tensor is taken from + 'value'.If attribute 'value' is not specified, the value in the output + defaults to 0, and the datatype defaults to float32. + +Notes +===== +Signature: ``ai.onnx@9::ConstantOfShape``. + +Type constraints: + - T1: `tensor(int64)` + - T2: `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _ConstantOfShape( + _ConstantOfShape.Attributes( + value=AttrTensor.maybe(value, name="value"), + ), _ConstantOfShape.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def conv(X: Var, W: Var, B: Optional[Var] = None, *, auto_pad: str = "NOTSET", dilations: Optional[Iterable[int]] = None, group: int = 1, kernel_shape: Optional[Iterable[int]] = None, pads: Optional[Iterable[int]] = None, strides: Optional[Iterable[int]] = None, ) -> Var: + r""" +The convolution operator consumes an input tensor and a filter, and +computes the output. + +Parameters +========== +X + Type T. + Input data tensor from previous layer; has size (N x C x H x W), where N + is the batch size, C is the number of channels, and H and W are the + height and width. Note that this is for the 2D image. Otherwise the size + is (N x C x D1 x D2 ... x Dn). Optionally, if dimension denotation is in + effect, the operation expects input data tensor to arrive with the + dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. +W + Type T. + The weight tensor that will be used in the convolutions; has size (M x + C/group x kH x kW), where C is the number of channels, and kH and kW are + the height and width of the kernel, and M is the number of feature maps. + For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x + k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. + Optionally, if dimension denotation is in effect, the operation expects + the weight tensor to arrive with the dimension denotation of + [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL + ...]. Assuming zero based indices for the shape array, X.shape[1] == + (W.shape[1] \* group) == C and W.shape[0] mod G == 0. Or in other words + FILTER_IN_CHANNEL multiplied by the number of groups should be equal to + DATA_CHANNEL and the number of feature maps M should be a multiple of + the number of groups G. +B + Type T. + Optional 1D bias to be added to the convolution, has size of M. +auto_pad + Attribute. + auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where + default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that + ``output_shape[i] = ceil(input_shape[i] / strides[i])`` for each axis + ``i``. The padding is split between the two sides equally or almost + equally (depending on whether it is even or odd). In case the padding is + an odd number, the extra padding is added at the end for SAME_UPPER and + at the beginning for SAME_LOWER. +dilations + Attribute. + dilation value along each spatial axis of the filter. If not present, + the dilation defaults is 1 along each spatial axis. +group + Attribute. + number of groups input channels and output channels are divided into. +kernel_shape + Attribute. + The shape of the convolution kernel. If not present, should be inferred + from input W. +pads + Attribute. + Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. ``pads`` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis ``i`` and xi_end, the number of pixels + added at the end of axis ``i``. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. +strides + Attribute. + Stride along each spatial axis. If not present, the stride defaults is 1 + along each spatial axis. + +Returns +======= +Y : Var + Type T. + Output data tensor that contains the result of the convolution. The + output dimensions are functions of the kernel size, stride size, and pad + lengths. + +Notes +===== +Signature: ``ai.onnx@11::Conv``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Conv( + _Conv.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + group=AttrInt64(group, name="group"), + kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), _Conv.Inputs( + X=unwrap_vars(X), W=unwrap_vars(W), B=unwrap_vars(B), ), ).get_output_vars( + X=get_value(X), W=get_value(W), B=get_value(B), ).Y + + +def conv_integer(x: Var, w: Var, x_zero_point: Optional[Var] = None, w_zero_point: Optional[Var] = None, *, auto_pad: str = "NOTSET", dilations: Optional[Iterable[int]] = None, group: int = 1, kernel_shape: Optional[Iterable[int]] = None, pads: Optional[Iterable[int]] = None, strides: Optional[Iterable[int]] = None, ) -> Var: + r""" +The integer convolution operator consumes an input tensor, its +zero-point, a filter, and its zero-point, and computes the output. The +production MUST never overflow. The accumulation may overflow if and +only if in 32 bits. + +Parameters +========== +x + Type T1. + Input data tensor from previous layer; has size (N x C x H x W), where N + is the batch size, C is the number of channels, and H and W are the + height and width. Note that this is for the 2D image. Otherwise the size + is (N x C x D1 x D2 ... x Dn). Optionally, if dimension denotation is in + effect, the operation expects input data tensor to arrive with the + dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. +w + Type T2. + The weight tensor that will be used in the convolutions; has size (M x + C/group x kH x kW), where C is the number of channels, and kH and kW are + the height and width of the kernel, and M is the number of feature maps. + For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x + k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. + Optionally, if dimension denotation is in effect, the operation expects + the weight tensor to arrive with the dimension denotation of + [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL + ...]. X.shape[1] == (W.shape[1] \* group) == C (assuming zero based + indices for the shape array). Or in other words FILTER_IN_CHANNEL should + be equal to DATA_CHANNEL. +x_zero_point + Type T1. + Zero point tensor for input 'x'. It's optional and default value is 0. + It's a scalar, which means a per-tensor/layer quantization. +w_zero_point + Type T2. + Zero point tensor for input 'w'. It's optional and default value is 0. + It could be a scalar or a 1-D tensor, which means a per-tensor/layer or + per output channel quantization. If it's a 1-D tensor, its number of + elements should be equal to the number of output channels (M) +auto_pad + Attribute. + auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where + default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that + ``output_shape[i] = ceil(input_shape[i] / strides[i])`` for each axis + ``i``. The padding is split between the two sides equally or almost + equally (depending on whether it is even or odd). In case the padding is + an odd number, the extra padding is added at the end for SAME_UPPER and + at the beginning for SAME_LOWER. +dilations + Attribute. + dilation value along each spatial axis of the filter. If not present, + the dilation defaults to 1 along each axis. +group + Attribute. + number of groups input channels and output channels are divided into. + default is 1. +kernel_shape + Attribute. + The shape of the convolution kernel. If not present, should be inferred + from input 'w'. +pads + Attribute. + Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0.The value represent the number + of pixels added to the beginning and end part of the corresponding + axis.\ ``pads`` format should be as follow [x1_begin, x2_begin...x1_end, + x2_end,...], where xi_begin the number ofpixels added at the beginning + of axis ``i`` and xi_end, the number of pixels added at the end of axis + ``i``.This attribute cannot be used simultaneously with auto_pad + attribute. If not present, the padding defaultsto 0 along start and end + of each spatial axis. +strides + Attribute. + Stride along each spatial axis. If not present, the stride defaults to 1 + along each axis. + +Returns +======= +y : Var + Type T3. + Output data tensor that contains the result of the convolution. The + output dimensions are functions of the kernel size, stride size, and pad + lengths. + +Notes +===== +Signature: ``ai.onnx@10::ConvInteger``. + +Type constraints: + - T1: `tensor(int8)`, `tensor(uint8)` + - T2: `tensor(int8)`, `tensor(uint8)` + - T3: `tensor(int32)` + """ + return _ConvInteger( + _ConvInteger.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + group=AttrInt64(group, name="group"), + kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), _ConvInteger.Inputs( + x=unwrap_vars(x), w=unwrap_vars(w), x_zero_point=unwrap_vars(x_zero_point), w_zero_point=unwrap_vars(w_zero_point), ), ).get_output_vars( + x=get_value(x), w=get_value(w), x_zero_point=get_value(x_zero_point), w_zero_point=get_value(w_zero_point), ).y + + +def conv_transpose(X: Var, W: Var, B: Optional[Var] = None, *, auto_pad: str = "NOTSET", dilations: Optional[Iterable[int]] = None, group: int = 1, kernel_shape: Optional[Iterable[int]] = None, output_padding: Optional[Iterable[int]] = None, output_shape: Optional[Iterable[int]] = None, pads: Optional[Iterable[int]] = None, strides: Optional[Iterable[int]] = None, ) -> Var: + r""" +The convolution transpose operator consumes an input tensor and a +filter, and computes the output. + +If the pads parameter is provided the shape of the output is calculated +via the following equation: + +output_shape[i] = stride[i] \* (input_size[i] - 1) + output_padding[i] + +((kernel_shape[i] - 1) \* dilations[i] + 1) - pads[start_i] - +pads[end_i] + +output_shape can also be explicitly specified in which case pads values +are auto generated using these equations: + +total_padding[i] = stride[i] \* (input_size[i] - 1) + output_padding[i] ++ ((kernel_shape[i] - 1) \* dilations[i] + 1) - output_shape[i] If +(auto_pads == SAME_UPPER): pads[start_i] = total_padding[i]/2; +pads[end_i] = total_padding[i] - (total_padding[i]/2) Else: +pads[start_i] = total_padding[i] - (total_padding[i]/2); pads[end_i] = +(total_padding[i]/2). + +Parameters +========== +X + Type T. + Input data tensor from previous layer; has size (N x C x H x W), where N + is the batch size, C is the number of channels, and H and W are the + height and width. Note that this is for the 2D image. Otherwise the size + is (N x C x D1 x D2 ... x Dn) +W + Type T. + The weight tensor that will be used in the convolutions; has size (C x + M/group x kH x kW), where C is the number of channels, and kH and kW are + the height and width of the kernel, and M is the number of feature maps. + For more than 2 dimensions, the weight shape will be (C x M/group x k1 x + k2 x ... x kn), where (k1 x k2 x ... x kn) is the dimension of the + kernel. The number of channels in the output should be equal to + W.shape[1] \* group (assuming zero based indices of the shape array) +B + Type T. + Optional 1D bias to be added to the convolution, has size of M. +auto_pad + Attribute. + auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where + default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that + ``output_shape[i] = input_shape[i] * strides[i]`` for each axis ``i``. + The padding is split between the two sides equally or almost equally + (depending on whether it is even or odd). In case the padding is an odd + number, the extra padding is added at the end for SAME_UPPER and at the + beginning for SAME_LOWER. +dilations + Attribute. + dilation value along each spatial axis of the filter. If not present, + the dilation defaults to 1 along each spatial axis. +group + Attribute. + number of groups input channels and output channels are divided into. +kernel_shape + Attribute. + The shape of the convolution kernel. If not present, should be inferred + from input W. +output_padding + Attribute. + Additional elements added to the side with higher coordinate indices in + the output. Each padding value in "output_padding" must be less than the + corresponding stride/dilation dimension. By default, this attribute is a + zero vector. Note that this attribute doesn't directly affect the + computed output values. It only controls the selection of the computed + values, so changing this attribute only adds or removes output elements. + If "output_shape" is explicitly provided, "output_padding" does not + contribute additional size to "output_shape" but participates in the + computation of the needed padding amount. This is also called adjs or + adjustment in some frameworks. +output_shape + Attribute. + The shape of the output can be explicitly set which will cause pads + values to be auto generated. If output_shape is specified pads values + are ignored. See doc for details for equations to generate pads. Note + that the output_shape attribute value should not include dimensions for + batch size and channels, which are automatically inferred. +pads + Attribute. + Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. ``pads`` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis ``i`` and xi_end, the number of pixels + added at the end of axis ``i``. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. +strides + Attribute. + Stride along each spatial axis. If not present, the stride defaults to 1 + along each spatial axis. + +Returns +======= +Y : Var + Type T. + Output data tensor that contains the result of the convolution. The + output dimensions are functions of the kernel size, stride size, pad + lengths and group count. The number of channels in the output should be + equal to W.shape[1] \* group (assuming zero based indices of the shape + array) + +Notes +===== +Signature: ``ai.onnx@11::ConvTranspose``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _ConvTranspose( + _ConvTranspose.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + group=AttrInt64(group, name="group"), + kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), + output_padding=AttrInt64s.maybe(output_padding, name="output_padding"), + output_shape=AttrInt64s.maybe(output_shape, name="output_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), _ConvTranspose.Inputs( + X=unwrap_vars(X), W=unwrap_vars(W), B=unwrap_vars(B), ), ).get_output_vars( + X=get_value(X), W=get_value(W), B=get_value(B), ).Y + + +def cos(input: Var, ) -> Var: + r""" +Calculates the cosine of the given input tensor, element-wise. + +Parameters +========== +input + Type T. + Input tensor + +Returns +======= +output : Var + Type T. + The cosine of the input tensor computed element-wise + +Notes +===== +Signature: ``ai.onnx@7::Cos``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Cos( + _Cos.Attributes( + ), _Cos.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def cosh(input: Var, ) -> Var: + r""" +Calculates the hyperbolic cosine of the given input tensor element-wise. + +Parameters +========== +input + Type T. + Input tensor + +Returns +======= +output : Var + Type T. + The hyperbolic cosine values of the input tensor computed element-wise + +Notes +===== +Signature: ``ai.onnx@9::Cosh``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Cosh( + _Cosh.Attributes( + ), _Cosh.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def cumsum(x: Var, axis: Var, *, exclusive: int = 0, reverse: int = 0, ) -> Var: + r""" +Performs cumulative sum of the input elements along the given axis. By +default, it will do the sum inclusively meaning the first element is +copied as is. Through an ``exclusive`` attribute, this behavior can +change to exclude the first element. It can also perform summation in +the opposite direction of the axis. For that, set ``reverse`` attribute +to 1. + +Example: + +:: + + input_x = [1, 2, 3] + axis=0 + output = [1, 3, 6] + exclusive=1 + output = [0, 1, 3] + exclusive=0 + reverse=1 + output = [6, 5, 3] + exclusive=1 + reverse=1 + output = [5, 3, 0] + +Parameters +========== +x + Type T. + An input tensor that is to be processed. +axis + Type T2. + A 0-D tensor. Must be in the range [-rank(x), rank(x)-1]. Negative value + means counting dimensions from the back. +exclusive + Attribute. + If set to 1 will return exclusive sum in which the top element is not + included. In other terms, if set to 1, the j-th output element would be + the sum of the first (j-1) elements. Otherwise, it would be the sum of + the first j elements. +reverse + Attribute. + If set to 1 will perform the sums in reverse direction. + +Returns +======= +y : Var + Type T. + Output tensor of the same type as 'x' with cumulative sums of the x's + elements + +Notes +===== +Signature: ``ai.onnx@14::CumSum``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` + - T2: `tensor(int32)`, `tensor(int64)` + """ + return _CumSum( + _CumSum.Attributes( + exclusive=AttrInt64(exclusive, name="exclusive"), + reverse=AttrInt64(reverse, name="reverse"), + ), _CumSum.Inputs( + x=unwrap_vars(x), axis=unwrap_vars(axis), ), ).get_output_vars( + x=get_value(x), axis=get_value(axis), ).y + + +def dft(input: Var, dft_length: Optional[Var] = None, *, axis: int = 1, inverse: int = 0, onesided: int = 0, ) -> Var: + r""" +Computes the discrete Fourier transform of input. + +Parameters +========== +input + Type T1. + For real input, the following shape is expected: + [batch_idx][signal_dim1][signal_dim2]...[signal_dimN][1]. For complex + input, the following shape is expected: + [batch_idx][signal_dim1][signal_dim2]...[signal_dimN][2]. The first + dimension is the batch dimension. The following N dimensions correspond + to the signal's dimensions. The final dimension represents the real and + imaginary parts of the value in that order. +dft_length + Type T2. + The length of the signal as a scalar. If greater than the axis + dimension, the signal will be zero-padded up to dft_length. If less than + the axis dimension, only the first dft_length values will be used as the + signal. It's an optional value. +axis + Attribute. + The axis on which to perform the DFT. By default this value is set to 1, + which corresponds to the first dimension after the batch index. Negative + value means counting dimensions from the back. Accepted range is + :math:`[-r, -2] \cup [0, r-2]` where ``r = rank(input)``. The last + dimension is for representing complex numbers and thus is an invalid + axis. +inverse + Attribute. + Whether to perform the inverse discrete fourier transform. By default + this value is set to 0, which corresponds to false. +onesided + Attribute. + If onesided is 1, only values for w in [0, 1, 2, ..., floor(n_fft/2) + + 1] are returned because the real-to-complex Fourier transform satisfies + the conjugate symmetry, i.e., X[m, w] = X[m, n_fft-w]\*. Note if the + input or window tensors are complex, then onesided output is not + possible. Enabling onesided with real inputs performs a Real-valued fast + Fourier transform (RFFT). When invoked with real or complex valued + input, the default value is 0. Values can be 0 or 1. + +Returns +======= +output : Var + Type T1. + The Fourier Transform of the input vector. If onesided is 0, the + following shape is expected: + [batch_idx][signal_dim1][signal_dim2]...[signal_dimN][2]. If axis=1 and + onesided is 1, the following shape is expected: + [batch_idx][floor(signal_dim1/2)+1][signal_dim2]...[signal_dimN][2]. If + axis=2 and onesided is 1, the following shape is expected: + [batch_idx][signal_dim1][floor(signal_dim2/2)+1]...[signal_dimN][2]. If + axis=N and onesided is 1, the following shape is expected: + [batch_idx][signal_dim1][signal_dim2]...[floor(signal_dimN/2)+1][2]. The + signal_dim at the specified axis is equal to the dft_length. + +Notes +===== +Signature: ``ai.onnx@17::DFT``. + +Type constraints: + - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + - T2: `tensor(int32)`, `tensor(int64)` + """ + return _DFT( + _DFT.Attributes( + axis=AttrInt64(axis, name="axis"), + inverse=AttrInt64(inverse, name="inverse"), + onesided=AttrInt64(onesided, name="onesided"), + ), _DFT.Inputs( + input=unwrap_vars(input), dft_length=unwrap_vars(dft_length), ), ).get_output_vars( + input=get_value(input), dft_length=get_value(dft_length), ).output + + +def depth_to_space(input: Var, *, blocksize: int, mode: str = "DCR", ) -> Var: + r""" +DepthToSpace rearranges (permutes) data from depth into blocks of +spatial data. This is the reverse transformation of SpaceToDepth. More +specifically, this op outputs a copy of the input tensor where values +from the depth dimension are moved in spatial blocks to the height and +width dimensions. By default, ``mode`` = ``DCR``. In the DCR mode, +elements along the depth dimension from the input tensor are rearranged +in the following order: depth, column, and then row. The output y is +computed from the input x as below: + +:: + + b, c, h, w = x.shape + tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w]) + tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2]) + y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize]) + +In the CRD mode, elements along the depth dimension from the input +tensor are rearranged in the following order: column, row, and the +depth. The output y is computed from the input x as below: + +:: + + b, c, h, w = x.shape + tmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w]) + tmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3]) + y = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize]) + +Parameters +========== +input + Type T. + Input tensor of [N,C,H,W], where N is the batch axis, C is the channel + or depth, H is the height and W is the width. +blocksize + Attribute. + Blocks of [blocksize, blocksize] are moved. +mode + Attribute. + DCR (default) for depth-column-row order re-arrangement. Use CRD for + column-row-depth order. + +Returns +======= +output : Var + Type T. + Output tensor of [N, C/(blocksize \* blocksize), H \* blocksize, W \* + blocksize]. + +Notes +===== +Signature: ``ai.onnx@13::DepthToSpace``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _DepthToSpace( + _DepthToSpace.Attributes( + blocksize=AttrInt64(blocksize, name="blocksize"), + mode=AttrString(mode, name="mode"), + ), _DepthToSpace.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def dequantize_linear(x: Var, x_scale: Var, x_zero_point: Optional[Var] = None, *, axis: int = 1, ) -> Var: + r""" +The linear dequantization operator. It consumes a quantized tensor, a +scale, and a zero point to compute the full precision tensor. The +dequantization formula is ``y = (x - x_zero_point) * x_scale``. +``x_scale`` and ``x_zero_point`` must have same shape, and can be either +a scalar for per-tensor / per layer quantization, or a 1-D tensor for +per-axis quantization. ``x_zero_point`` and ``x`` must have same type. +``x`` and ``y`` must have same shape. In the case of dequantizing int32, +there's no zero point (zero point is supposed to be 0). + +Parameters +========== +x + Type T. + N-D quantized input tensor to be de-quantized. +x_scale + Type tensor(float). + Scale for input 'x'. It can be a scalar, which means a per-tensor/layer + dequantization, or a 1-D tensor for per-axis dequantization. +x_zero_point + Type T. + Zero point for input 'x'. Shape must match x_scale. It's optional. Zero + point is 0 when it's not specified. +axis + Attribute. + (Optional) The axis of the dequantizing dimension of the input tensor. + Ignored for per-tensor quantization. Negative value means counting + dimensions from the back. Accepted range is [-r, r-1] where r = + rank(input). + +Returns +======= +y : Var + Type tensor(float). + N-D full precision output tensor. It has same shape as input 'x'. + +Notes +===== +Signature: ``ai.onnx@13::DequantizeLinear``. + +Type constraints: + - T: `tensor(int32)`, `tensor(int8)`, `tensor(uint8)` + """ + return _DequantizeLinear( + _DequantizeLinear.Attributes( + axis=AttrInt64(axis, name="axis"), + ), _DequantizeLinear.Inputs( + x=unwrap_vars(x), x_scale=unwrap_vars(x_scale), x_zero_point=unwrap_vars(x_zero_point), ), ).get_output_vars( + x=get_value(x), x_scale=get_value(x_scale), x_zero_point=get_value(x_zero_point), ).y + + +def det(X: Var, ) -> Var: + r""" +Det calculates determinant of a square matrix or batches of square +matrices. Det takes one input tensor of shape ``[*, M, M]``, where ``*`` +is zero or more batch dimensions, and the inner-most 2 dimensions form +square matrices. The output is a tensor of shape ``[*]``, containing the +determinants of all input submatrices. e.g., When the input is 2-D, the +output is a scalar(shape is empty: ``[]``). + +Parameters +========== +X + Type T. + Input tensor + +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@11::Det``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Det( + _Det.Attributes( + ), _Det.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def div(A: Var, B: Var, ) -> Var: + r""" +Performs element-wise binary division (with Numpy-style broadcasting +support). + +This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +(Opset 14 change): Extend supported types to include uint8, int8, +uint16, and int16. + +Parameters +========== +A + Type T. + First operand. +B + Type T. + Second operand. + +Returns +======= +C : Var + Type T. + Result, has same element type as two inputs + +Notes +===== +Signature: ``ai.onnx@14::Div``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Div( + _Div.Attributes( + ), _Div.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), ), ).get_output_vars( + A=get_value(A), B=get_value(B), ).C + + +def dropout(data: Var, ratio: Optional[Var] = None, training_mode: Optional[Var] = None, *, seed: Optional[int] = None, ) -> tuple[Var, Var]: + r""" +Dropout takes an input floating-point tensor, an optional input ratio +(floating-point scalar) and an optional input training_mode (boolean +scalar). It produces two tensor outputs, output (floating-point tensor) +and mask (optional ``Tensor``). If ``training_mode`` is true then +the output Y will be a random dropout; Note that this Dropout scales the +masked input data by the following equation, so to convert the trained +model into inference mode, the user can simply not pass +``training_mode`` input or set it to false. + +:: + + output = scale * data * mask, + +where + +:: + + scale = 1. / (1. - ratio). + +This operator has **optional** inputs/outputs. See `the +doc `__ for more +details about the representation of optional arguments. An empty string +may be used in the place of an actual argument's name to indicate a +missing argument. Trailing optional arguments (those not followed by an +argument that is present) may also be simply omitted. + +Parameters +========== +data + Type T. + The input data as Tensor. +ratio + Type T1. + The ratio of random dropout, with value in [0, 1). If this input was not + set, or if it was set to 0, the output would be a simple copy of the + input. If it's non-zero, output will be a random dropout of the scaled + input, which is typically the case during training. It is an optional + value, if not specified it will default to 0.5. +training_mode + Type T2. + If set to true then it indicates dropout is being used for training. It + is an optional value hence unless specified explicitly, it is false. If + it is false, ratio is ignored and the operation mimics inference mode + where nothing will be dropped from the input data and if mask is + requested as output it will contain all ones. +seed + Attribute. + (Optional) Seed to the random generator, if not specified we will auto + generate one. + +Returns +======= +output : Var + Type T. + The output. +mask : Var + Type T2. + The output mask. + +Notes +===== +Signature: ``ai.onnx@13::Dropout``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` + - T2: `tensor(bool)` + """ + return _Dropout( + _Dropout.Attributes( + seed=AttrInt64.maybe(seed, name="seed"), + ), _Dropout.Inputs( + data=unwrap_vars(data), ratio=unwrap_vars(ratio), training_mode=unwrap_vars(training_mode), ), ).get_output_vars( + data=get_value(data), ratio=get_value(ratio), training_mode=get_value(training_mode), )._unpack_to_any() + + +def dynamic_quantize_linear(x: Var, ) -> tuple[Var, Var, Var]: + r""" +A Function to fuse calculation for Scale, Zero Point and FP32->8Bit +conversion of FP32 Input data. Outputs Scale, ZeroPoint and Quantized +Input for a given FP32 Input. Scale is calculated as: + +:: + + y_scale = (maximum(0, max(x)) - minimum(0, min(x))) / (qmax - qmin) + +- where qmax and qmin are max and min values for quantization range + i.e. [0, 255] in case of uint8 +- data range is adjusted to include 0. + +Zero point is calculated as: + +:: + + intermediate_zero_point = qmin - min(x)/y_scale + y_zero_point = cast(round(saturate(itermediate_zero_point))) + +- where qmax and qmin are max and min values for quantization range + .i.e [0, 255] in case of uint8 +- for saturation, it saturates to [0, 255] if it's uint8, or [-127, + 127] if it's int8. Right now only uint8 is supported. +- rounding to nearest ties to even. + +Data quantization formula is: + +:: + + y = saturate (round (x / y_scale) + y_zero_point) + +- for saturation, it saturates to [0, 255] if it's uint8, or [-127, + 127] if it's int8. Right now only uint8 is supported. +- rounding to nearest ties to even. + +Parameters +========== +x + Type T1. + Input tensor + +Returns +======= +y : Var + Type T2. + Quantized output tensor +y_scale : Var + Type tensor(float). + Output scale. It's a scalar, which means a per-tensor/layer + quantization. +y_zero_point : Var + Type T2. + Output zero point. It's a scalar, which means a per-tensor/layer + quantization. + +Notes +===== +Signature: ``ai.onnx@11::DynamicQuantizeLinear``. + +Type constraints: + - T1: `tensor(float)` + - T2: `tensor(uint8)` + """ + return _DynamicQuantizeLinear( + _DynamicQuantizeLinear.Attributes( + ), _DynamicQuantizeLinear.Inputs( + x=unwrap_vars(x), ), ).get_output_vars( + x=get_value(x), )._unpack_to_any() + + +def einsum(Inputs: Sequence[Var], *, equation: str, ) -> Var: + r""" +An einsum of the form ``term1, term2 -> output-term`` produces an output +tensor using the following equation + +:: + + output[output-term] = reduce-sum( input1[term1] * input2[term2] ) + +where the reduce-sum performs a summation over all the indices occurring +in the input terms (term1, term2) that do not occur in the output-term. + +The Einsum operator evaluates algebraic tensor operations on a sequence +of tensors, using the Einstein summation convention. The equation string +contains a comma-separated sequence of lower case letters. Each term +corresponds to an operand tensor, and the characters within the terms +correspond to operands dimensions. + +This sequence may be followed by "->" to separate the left and right +hand side of the equation. If the equation contains "->" followed by the +right-hand side, the explicit (not classical) form of the Einstein +summation is performed, and the right-hand side indices indicate output +tensor dimensions. In other cases, output indices are (implicitly) set +to the alphabetically sorted sequence of indices appearing exactly once +in the equation. + +When a dimension character is repeated in the left-hand side, it +represents summation along the dimension. + +The equation may contain ellipsis ("...") to enable broadcasting. +Ellipsis must indicate a fixed number of dimensions. Specifically, every +occurrence of ellipsis in the equation must represent the same number of +dimensions. The right-hand side may contain exactly one ellipsis. In +implicit mode, the ellipsis dimensions are set to the beginning of the +output. The equation string may contain space (U+0020) character. + +Parameters +========== +Inputs + Type T. + Operands +equation + Attribute. + Einsum expression string. + +Returns +======= +Output : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@12::Einsum``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Einsum( + _Einsum.Attributes( + equation=AttrString(equation, name="equation"), + ), _Einsum.Inputs( + Inputs=unwrap_vars(Inputs), ), ).get_output_vars( + Inputs=get_value(Inputs), ).Output + + +def elu(X: Var, *, alpha: float = 1.0, ) -> Var: + r""" +Elu takes one input data (Tensor) and produces one output data +(Tensor) where the function +``f(x) = alpha * (exp(x) - 1.) for x < 0``, ``f(x) = x for x >= 0``., is +applied to the tensor elementwise. + +Parameters +========== +X + Type T. + 1D input tensor +alpha + Attribute. + Coefficient of ELU. + +Returns +======= +Y : Var + Type T. + 1D output tensor + +Notes +===== +Signature: ``ai.onnx@6::Elu``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Elu( + _Elu.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + ), _Elu.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def equal(A: Var, B: Var, ) -> Var: + r""" +Returns the tensor resulted from performing the ``equal`` logical +operation elementwise on the input tensors ``A`` and ``B`` (with +Numpy-style broadcasting support). + +This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +Parameters +========== +A + Type T. + First input operand for the logical operator. +B + Type T. + Second input operand for the logical operator. + +Returns +======= +C : Var + Type T1. + Result tensor. + +Notes +===== +Signature: ``ai.onnx@13::Equal``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T1: `tensor(bool)` + """ + return _Equal( + _Equal.Attributes( + ), _Equal.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), ), ).get_output_vars( + A=get_value(A), B=get_value(B), ).C + + +def erf(input: Var, ) -> Var: + r""" +Computes the error function of the given input tensor element-wise. + +Parameters +========== +input + Type T. + Input tensor + +Returns +======= +output : Var + Type T. + The error function of the input tensor computed element-wise. It has the + same shape and type of the input. + +Notes +===== +Signature: ``ai.onnx@13::Erf``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Erf( + _Erf.Attributes( + ), _Erf.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def exp(input: Var, ) -> Var: + r""" +Calculates the exponential of the given input tensor, element-wise. + +Parameters +========== +input + Type T. + Input tensor + +Returns +======= +output : Var + Type T. + The exponential of the input tensor computed element-wise + +Notes +===== +Signature: ``ai.onnx@13::Exp``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Exp( + _Exp.Attributes( + ), _Exp.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def expand(input: Var, shape: Var, ) -> Var: + r""" +Broadcast the input tensor following the given shape and the broadcast +rule. The broadcast rule is similar to numpy.array(input) \* +numpy.ones(shape): Dimensions are right alignment; Two corresponding +dimensions must have the same value, or one of them is equal to 1. Also, +this operator is similar to numpy.broadcast_to(input, shape), but the +major difference is numpy.broadcast_to() does not allow shape to be +smaller than input.size(). It is possible that the output.shape is not +equal to shape, when some dimensions in shape is equal to 1, or the +shape.ndim < input.shape.ndim. + +Parameters +========== +input + Type T. + Input tensor +shape + Type tensor(int64). + A 1-D tensor indicates the shape you want to expand to, following the + broadcast rule + +Returns +======= +output : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@13::Expand``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Expand( + _Expand.Attributes( + ), _Expand.Inputs( + input=unwrap_vars(input), shape=unwrap_vars(shape), ), ).get_output_vars( + input=get_value(input), shape=get_value(shape), ).output + + +def eye_like(input: Var, *, dtype: Optional[npt.DTypeLike] = None, k: int = 0, ) -> Var: + r""" +Generate a 2D tensor (matrix) with ones on the diagonal and zeros +everywhere else. Only 2D tensors are supported, i.e. input T1 must be of +rank 2. The shape of the output tensor is the same as the input tensor. +The data type can be specified by the 'dtype' argument. If 'dtype' is +not specified, then the type of input tensor is used. By default, the +main diagonal is populated with ones, but attribute 'k' can be used to +populate upper or lower diagonals. The 'dtype' argument must be one of +the data types specified in the 'DataType' enum field in the TensorProto +message and be valid as an output type. + +Parameters +========== +input + Type T1. + 2D input tensor to copy shape, and optionally, type information from. +dtype + Attribute. + (Optional) The data type for the elements of the output tensor. If not + specified,the data type of the input tensor T1 is used. If input tensor + T1 is also notspecified, then type defaults to 'float'. +k + Attribute. + (Optional) Index of the diagonal to be populated with ones. Default is + 0. If T2 is the output, this op sets T2[i, i+k] = 1. k = 0 populates the + main diagonal, k > 0 populates an upper diagonal, and k < 0 populates a + lower diagonal. + +Returns +======= +output : Var + Type T2. + Output tensor, same shape as input tensor T1. + +Notes +===== +Signature: ``ai.onnx@9::EyeLike``. + +Type constraints: + - T1: `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T2: `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _EyeLike( + _EyeLike.Attributes( + dtype=AttrDtype.maybe(dtype, name="dtype"), + k=AttrInt64(k, name="k"), + ), _EyeLike.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def flatten(input: Var, *, axis: int = 1, ) -> Var: + r""" +Flattens the input tensor into a 2D matrix. If input tensor has shape +(d_0, d_1, ... d_n) then the output will have shape (d_0 X d_1 ... +d\_(axis-1), d_axis X d\_(axis+1) ... X dn). + +Parameters +========== +input + Type T. + A tensor of rank >= axis. +axis + Attribute. + Indicate up to which input dimensions (exclusive) should be flattened to + the outer dimension of the output. The value for axis must be in the + range [-r, r], where r is the rank of the input tensor. Negative value + means counting dimensions from the back. When axis = 0, the shape of the + output tensor is (1, (d_0 X d_1 ... d_n), where the shape of the input + tensor is (d_0, d_1, ... d_n). + +Returns +======= +output : Var + Type T. + A 2D tensor with the contents of the input tensor, with input dimensions + up to axis flattened to the outer dimension of the output and remaining + input dimensions flattened into the inner dimension of the output. + +Notes +===== +Signature: ``ai.onnx@13::Flatten``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Flatten( + _Flatten.Attributes( + axis=AttrInt64(axis, name="axis"), + ), _Flatten.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def floor(X: Var, ) -> Var: + r""" +Floor takes one input data (Tensor) and produces one output data +(Tensor) where the floor is, y = floor(x), is applied to the tensor +elementwise. If x is integral, +0, -0, NaN, or infinite, x itself is +returned. + +Parameters +========== +X + Type T. + Input tensor + +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@13::Floor``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Floor( + _Floor.Attributes( + ), _Floor.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def gru(X: Var, W: Var, R: Var, B: Optional[Var] = None, sequence_lens: Optional[Var] = None, initial_h: Optional[Var] = None, *, activation_alpha: Optional[Iterable[float]] = None, activation_beta: Optional[Iterable[float]] = None, activations: Optional[Iterable[str]] = None, clip: Optional[float] = None, direction: str = "forward", hidden_size: Optional[int] = None, layout: int = 0, linear_before_reset: int = 0, ) -> tuple[Var, Var]: + r""" +Computes an one-layer GRU. This operator is usually supported via some +custom implementation such as CuDNN. + +Notations: + +- ``X`` - input tensor +- ``z`` - update gate +- ``r`` - reset gate +- ``h`` - hidden gate +- ``t`` - time step (t-1 means previous time step) +- ``W[zrh]`` - W parameter weight matrix for update, reset, and hidden + gates +- ``R[zrh]`` - R recurrence weight matrix for update, reset, and hidden + gates +- ``Wb[zrh]`` - W bias vectors for update, reset, and hidden gates +- ``Rb[zrh]`` - R bias vectors for update, reset, and hidden gates +- ``WB[zrh]`` - W parameter weight matrix for backward update, reset, + and hidden gates +- ``RB[zrh]`` - R recurrence weight matrix for backward update, reset, + and hidden gates +- ``WBb[zrh]`` - W bias vectors for backward update, reset, and hidden + gates +- ``RBb[zrh]`` - R bias vectors for backward update, reset, and hidden + gates +- ``H`` - Hidden state +- ``num_directions`` - 2 if direction == bidirectional else 1 + +Activation functions: + +- Relu(x) - max(0, x) +- Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) +- Sigmoid(x) - 1/(1 + e^{-x}) + +NOTE: Below are optional + +- Affine(x) - alpha \* x + beta +- LeakyRelu(x) - x if x >= 0 else alpha \* x +- ThresholdedRelu(x) - x if x >= alpha else 0 +- ScaledTanh(x) - alpha \* Tanh(beta \* x) +- HardSigmoid(x) - min(max(alpha \* x + beta, 0), 1) +- Elu(x) - x if x >= 0 else alpha \* (e^x - 1) +- Softsign(x) - x/(1 + \|x\|) +- Softplus(x) - log(1 + e^x) + +Equations (Default: f=Sigmoid, g=Tanh): + +- zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz) +- rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr) +- ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when + linear_before_reset = 0 +- ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when + linear_before_reset != 0 +- Ht = (1 - zt) (.) ht + zt (.) Ht-1 This operator has **optional** + inputs/outputs. See `the + doc `__ for more + details about the representation of optional arguments. An empty + string may be used in the place of an actual argument's name to + indicate a missing argument. Trailing optional arguments (those not + followed by an argument that is present) may also be simply omitted. + +Parameters +========== +X + Type T. + The input sequences packed (and potentially padded) into one 3-D tensor + with the shape of ``[seq_length, batch_size, input_size]``. +W + Type T. + The weight tensor for the gates. Concatenation of ``W[zrh]`` and + ``WB[zrh]`` (if bidirectional) along dimension 0. This tensor has shape + ``[num_directions, 3*hidden_size, input_size]``. +R + Type T. + The recurrence weight tensor. Concatenation of ``R[zrh]`` and + ``RB[zrh]`` (if bidirectional) along dimension 0. This tensor has shape + ``[num_directions, 3*hidden_size, hidden_size]``. +B + Type T. + The bias tensor for the gates. Concatenation of ``[Wb[zrh], Rb[zrh]]`` + and ``[WBb[zrh], RBb[zrh]]`` (if bidirectional) along dimension 0. This + tensor has shape ``[num_directions, 6*hidden_size]``. Optional: If not + specified - assumed to be 0 +sequence_lens + Type T1. + Optional tensor specifying lengths of the sequences in a batch. If not + specified - assumed all sequences in the batch to have length + ``seq_length``. It has shape ``[batch_size]``. +initial_h + Type T. + Optional initial value of the hidden. If not specified - assumed to be + 0. It has shape ``[num_directions, batch_size, hidden_size]``. +activation_alpha + Attribute. + Optional scaling values used by some activation functions. The values + are consumed in the order of activation functions, for example (f, g, h) + in LSTM. Default values are the same as of corresponding ONNX + operators.For example with LeakyRelu, the default alpha is 0.01. +activation_beta + Attribute. + Optional scaling values used by some activation functions. The values + are consumed in the order of activation functions, for example (f, g, h) + in LSTM. Default values are the same as of corresponding ONNX operators. +activations + Attribute. + A list of 2 (or 4 if bidirectional) activation functions for update, + reset, and hidden gates. The activation functions must be one of the + activation functions specified above. Optional: See the equations for + default if not specified. +clip + Attribute. + Cell clip threshold. Clipping bounds the elements of a tensor in the + range of [-threshold, +threshold] and is applied to the input of + activations. No clip if not specified. +direction + Attribute. + Specify if the RNN is forward, reverse, or bidirectional. Must be one of + forward (default), reverse, or bidirectional. +hidden_size + Attribute. + Number of neurons in the hidden layer +layout + Attribute. + The shape format of inputs X, initial_h and outputs Y, Y_h. If 0, the + following shapes are expected: X.shape = [seq_length, batch_size, + input_size], Y.shape = [seq_length, num_directions, batch_size, + hidden_size], initial_h.shape = Y_h.shape = [num_directions, batch_size, + hidden_size]. If 1, the following shapes are expected: X.shape = + [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, + num_directions, hidden_size], initial_h.shape = Y_h.shape = [batch_size, + num_directions, hidden_size]. +linear_before_reset + Attribute. + When computing the output of the hidden gate, apply the linear + transformation before multiplying by the output of the reset gate. + +Returns +======= +Y : Var + Type T. + A tensor that concats all the intermediate output values of the hidden. + It has shape ``[seq_length, num_directions, batch_size, hidden_size]``. +Y_h : Var + Type T. + The last output value of the hidden. It has shape + ``[num_directions, batch_size, hidden_size]``. + +Notes +===== +Signature: ``ai.onnx@14::GRU``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + - T1: `tensor(int32)` + """ + return _GRU( + _GRU.Attributes( + activation_alpha=AttrFloat32s.maybe(activation_alpha, name="activation_alpha"), + activation_beta=AttrFloat32s.maybe(activation_beta, name="activation_beta"), + activations=AttrStrings.maybe(activations, name="activations"), + clip=AttrFloat32.maybe(clip, name="clip"), + direction=AttrString(direction, name="direction"), + hidden_size=AttrInt64.maybe(hidden_size, name="hidden_size"), + layout=AttrInt64(layout, name="layout"), + linear_before_reset=AttrInt64(linear_before_reset, name="linear_before_reset"), + ), _GRU.Inputs( + X=unwrap_vars(X), W=unwrap_vars(W), R=unwrap_vars(R), B=unwrap_vars(B), sequence_lens=unwrap_vars(sequence_lens), initial_h=unwrap_vars(initial_h), ), ).get_output_vars( + X=get_value(X), W=get_value(W), R=get_value(R), B=get_value(B), sequence_lens=get_value(sequence_lens), initial_h=get_value(initial_h), )._unpack_to_any() + + +def gather(data: Var, indices: Var, *, axis: int = 0, ) -> Var: + r""" +Given ``data`` tensor of rank r >= 1, and ``indices`` tensor of rank q, +gather entries of the axis dimension of ``data`` (by default outer-most +one as axis=0) indexed by ``indices``, and concatenates them in an +output tensor of rank q + (r - 1). + +If ``axis = 0``, let ``k = indices[i_{0}, ..., i_{q-1}]`` then +``output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k , j_{0}, ..., j_{r-2}]``: + +:: + + data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + indices = [ + [0, 1], + [1, 2], + ] + output = [ + [ + [1.0, 1.2], + [2.3, 3.4], + ], + [ + [2.3, 3.4], + [4.5, 5.7], + ], + ] + +If ``axis = 1``, let ``k = indices[i_{0}, ..., i_{q-1}]`` then +``output[j_{0}, i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}]``: + +:: + + data = [ + [1.0, 1.2, 1.9], + [2.3, 3.4, 3.9], + [4.5, 5.7, 5.9], + ] + indices = [ + [0, 2], + ] + axis = 1, + output = [ + [[1.0, 1.9]], + [[2.3, 3.9]], + [[4.5, 5.9]], + ] + +Parameters +========== +data + Type T. + Tensor of rank r >= 1. +indices + Type Tind. + Tensor of int32/int64 indices, of any rank q. All index values are + expected to be within bounds [-s, s-1] along axis of size s. It is an + error if any of the index values are out of bounds. +axis + Attribute. + Which axis to gather on. Negative value means counting dimensions from + the back. Accepted range is [-r, r-1] where r = rank(data). + +Returns +======= +output : Var + Type T. + Tensor of rank q + (r - 1). + +Notes +===== +Signature: ``ai.onnx@13::Gather``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - Tind: `tensor(int32)`, `tensor(int64)` + """ + return _Gather( + _Gather.Attributes( + axis=AttrInt64(axis, name="axis"), + ), _Gather.Inputs( + data=unwrap_vars(data), indices=unwrap_vars(indices), ), ).get_output_vars( + data=get_value(data), indices=get_value(indices), ).output + + +def gather_elements(data: Var, indices: Var, *, axis: int = 0, ) -> Var: + r""" +GatherElements takes two inputs ``data`` and ``indices`` of the same +rank r >= 1 and an optional attribute ``axis`` that identifies an axis +of ``data`` (by default, the outer-most axis, that is axis 0). It is an +indexing operation that produces its output by indexing into the input +data tensor at index positions determined by elements of the ``indices`` +tensor. Its output shape is the same as the shape of ``indices`` and +consists of one value (gathered from the ``data``) for each element in +``indices``. + +For instance, in the 3-D case (r = 3), the output produced is determined +by the following equations: + +:: + + out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0, + out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1, + out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2, + +This operator is also the inverse of ScatterElements. It is similar to +Torch's gather operation. + +Example 1: + +:: + + data = [ + [1, 2], + [3, 4], + ] + indices = [ + [0, 0], + [1, 0], + ] + axis = 1 + output = [ + [1, 1], + [4, 3], + ] + +Example 2: + +:: + + data = [ + [1, 2, 3], + [4, 5, 6], + [7, 8, 9], + ] + indices = [ + [1, 2, 0], + [2, 0, 0], + ] + axis = 0 + output = [ + [4, 8, 3], + [7, 2, 3], + ] + +Parameters +========== +data + Type T. + Tensor of rank r >= 1. +indices + Type Tind. + Tensor of int32/int64 indices, with the same rank r as the input. All + index values are expected to be within bounds [-s, s-1] along axis of + size s. It is an error if any of the index values are out of bounds. +axis + Attribute. + Which axis to gather on. Negative value means counting dimensions from + the back. Accepted range is [-r, r-1] where r = rank(data). + +Returns +======= +output : Var + Type T. + Tensor of the same shape as indices. + +Notes +===== +Signature: ``ai.onnx@13::GatherElements``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - Tind: `tensor(int32)`, `tensor(int64)` + """ + return _GatherElements( + _GatherElements.Attributes( + axis=AttrInt64(axis, name="axis"), + ), _GatherElements.Inputs( + data=unwrap_vars(data), indices=unwrap_vars(indices), ), ).get_output_vars( + data=get_value(data), indices=get_value(indices), ).output + + +def gather_nd(data: Var, indices: Var, *, batch_dims: int = 0, ) -> Var: + r""" +Given ``data`` tensor of rank ``r`` >= 1, ``indices`` tensor of rank +``q`` >= 1, and ``batch_dims`` integer ``b``, this operator gathers +slices of ``data`` into an output tensor of rank +``q + r - indices_shape[-1] - 1 - b``. + +``indices`` is an q-dimensional integer tensor, best thought of as a +``(q-1)``-dimensional tensor of index-tuples into ``data``, where each +element defines a slice of ``data`` + +``batch_dims`` (denoted as ``b``) is an integer indicating the number of +batch dimensions, i.e the leading ``b`` number of dimensions of ``data`` +tensor and ``indices`` are representing the batches, and the gather +starts from the ``b+1`` dimension. + +Some salient points about the inputs' rank and shape: + +1) r >= 1 and q >= 1 are to be honored. There is no dependency condition + to be met between ranks ``r`` and ``q`` + +2) The first ``b`` dimensions of the shape of ``indices`` tensor and + ``data`` tensor must be equal. + +3) b < min(q, r) is to be honored. + +4) The ``indices_shape[-1]`` should have a value between 1 (inclusive) + and rank ``r-b`` (inclusive) + +5) All values in ``indices`` are expected to be within bounds [-s, s-1] + along axis of size ``s`` (i.e.) + ``-data_shape[i] <= indices[...,i] <= data_shape[i] - 1``. It is an + error if any of the index values are out of bounds. + +The output is computed as follows: + +The output tensor is obtained by mapping each index-tuple in the +``indices`` tensor to the corresponding slice of the input ``data``. + +1) If ``indices_shape[-1] > r-b`` => error condition + +2) If ``indices_shape[-1] == r-b``, since the rank of ``indices`` is + ``q``, ``indices`` can be thought of as ``N`` ``(q-b-1)``-dimensional + tensors containing 1-D tensors of dimension ``r-b``, where ``N`` is + an integer equals to the product of 1 and all the elements in the + batch dimensions of the indices_shape. Let us think of each such + ``r-b`` ranked tensor as ``indices_slice``. Each *scalar value* + corresponding to ``data[0:b-1,indices_slice]`` is filled into the + corresponding location of the ``(q-b-1)``-dimensional tensor to form + the ``output`` tensor (Example 1 below) + +3) If ``indices_shape[-1] < r-b``, since the rank of ``indices`` is + ``q``, ``indices`` can be thought of as ``N`` ``(q-b-1)``-dimensional + tensor containing 1-D tensors of dimension ``< r-b``. Let us think of + each such tensors as ``indices_slice``. Each *tensor slice* + corresponding to ``data[0:b-1, indices_slice , :]`` is filled into + the corresponding location of the ``(q-b-1)``-dimensional tensor to + form the ``output`` tensor (Examples 2, 3, 4 and 5 below) + +This operator is the inverse of ``ScatterND``. + +**Example 1** + +:: + + batch_dims = 0 + data = [[0,1],[2,3]] # data_shape = [2, 2] + indices = [[0,0],[1,1]] # indices_shape = [2, 2] + output = [0,3] # output_shape = [2] + +**Example 2** + +:: + + batch_dims = 0 + data = [[0,1],[2,3]] # data_shape = [2, 2] + indices = [[1],[0]] # indices_shape = [2, 1] + output = [[2,3],[0,1]] # output_shape = [2, 2] + +**Example 3** + +:: + + batch_dims = 0 + data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2] + indices = [[0,1],[1,0]] # indices_shape = [2, 2] + output = [[2,3],[4,5]] # output_shape = [2, 2] + +**Example 4** + +:: + + batch_dims = 0 + data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2] + indices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2] + output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2] + +**Example 5** + +:: + + batch_dims = 1 + data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2] + indices = [[1],[0]] # indices_shape = [2, 1] + output = [[2,3],[4,5]] # output_shape = [2, 2] + +Parameters +========== +data + Type T. + Tensor of rank r >= 1. +indices + Type tensor(int64). + Tensor of rank q >= 1. All index values are expected to be within bounds + [-s, s-1] along axis of size s. It is an error if any of the index + values are out of bounds. +batch_dims + Attribute. + The number of batch dimensions. The gather of indexing starts from + dimension of data[batch_dims:] + +Returns +======= +output : Var + Type T. + Tensor of rank q + r - indices_shape[-1] - 1. + +Notes +===== +Signature: ``ai.onnx@13::GatherND``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _GatherND( + _GatherND.Attributes( + batch_dims=AttrInt64(batch_dims, name="batch_dims"), + ), _GatherND.Inputs( + data=unwrap_vars(data), indices=unwrap_vars(indices), ), ).get_output_vars( + data=get_value(data), indices=get_value(indices), ).output + + +def gemm(A: Var, B: Var, C: Optional[Var] = None, *, alpha: float = 1.0, beta: float = 1.0, transA: int = 0, transB: int = 0, ) -> Var: + r""" +General Matrix multiplication: +https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 + +- A' = transpose(A) if transA else A +- B' = transpose(B) if transB else B + +Compute Y = alpha \* A' \* B' + beta \* C, where input tensor A has +shape (M, K) or (K, M), input tensor B has shape (K, N) or (N, K), input +tensor C is broadcastable to shape (M, N), and output tensor Y has shape +(M, N). A will be transposed before doing the computation if attribute +transA is non-zero, same for B and transB. This operator supports +**unidirectional broadcasting** (tensor C should be unidirectional +broadcastable to tensor A \* B); for more details please check `the +doc `__. +This operator has **optional** inputs/outputs. See `the +doc `__ for more +details about the representation of optional arguments. An empty string +may be used in the place of an actual argument's name to indicate a +missing argument. Trailing optional arguments (those not followed by an +argument that is present) may also be simply omitted. + +Parameters +========== +A + Type T. + Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, + M) if transA is non-zero. +B + Type T. + Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, + K) if transB is non-zero. +C + Type T. + Optional input tensor C. If not specified, the computation is done as if + C is a scalar 0. The shape of C should be unidirectional broadcastable + to (M, N). +alpha + Attribute. + Scalar multiplier for the product of input tensors A \* B. +beta + Attribute. + Scalar multiplier for input tensor C. +transA + Attribute. + Whether A should be transposed +transB + Attribute. + Whether B should be transposed + +Returns +======= +Y : Var + Type T. + Output tensor of shape (M, N). + +Notes +===== +Signature: ``ai.onnx@13::Gemm``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` + """ + return _Gemm( + _Gemm.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + beta=AttrFloat32(beta, name="beta"), + transA=AttrInt64(transA, name="transA"), + transB=AttrInt64(transB, name="transB"), + ), _Gemm.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), C=unwrap_vars(C), ), ).get_output_vars( + A=get_value(A), B=get_value(B), C=get_value(C), ).Y + + +def global_average_pool(X: Var, ) -> Var: + r""" +GlobalAveragePool consumes an input tensor X and applies average pooling +across the values in the same channel. This is equivalent to AveragePool +with kernel size equal to the spatial dimension of input tensor. + +Parameters +========== +X + Type T. + Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. + +Returns +======= +Y : Var + Type T. + Output data tensor from pooling across the input tensor. The output + tensor has the same rank as the input. The first two dimensions of + output shape are the same as the input (N x C), while the other + dimensions are all 1. + +Notes +===== +Signature: ``ai.onnx@1::GlobalAveragePool``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _GlobalAveragePool( + _GlobalAveragePool.Attributes( + ), _GlobalAveragePool.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def global_lp_pool(X: Var, *, p: int = 2, ) -> Var: + r""" +GlobalLpPool consumes an input tensor X and applies lp pool pooling +across the values in the same channel. This is equivalent to LpPool with +kernel size equal to the spatial dimension of input tensor. + +Parameters +========== +X + Type T. + Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. +p + Attribute. + p value of the Lp norm used to pool over the input data. + +Returns +======= +Y : Var + Type T. + Output data tensor from pooling across the input tensor. The output + tensor has the same rank as the input. The first two dimensions of + output shape are the same as the input (N x C), while the other + dimensions are all 1. + +Notes +===== +Signature: ``ai.onnx@2::GlobalLpPool``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _GlobalLpPool( + _GlobalLpPool.Attributes( + p=AttrInt64(p, name="p"), + ), _GlobalLpPool.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def global_max_pool(X: Var, ) -> Var: + r""" +GlobalMaxPool consumes an input tensor X and applies max pooling across +the values in the same channel. This is equivalent to MaxPool with +kernel size equal to the spatial dimension of input tensor. + +Parameters +========== +X + Type T. + Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. + +Returns +======= +Y : Var + Type T. + Output data tensor from pooling across the input tensor. The output + tensor has the same rank as the input. The first two dimensions of + output shape are the same as the input (N x C), while the other + dimensions are all 1. + +Notes +===== +Signature: ``ai.onnx@1::GlobalMaxPool``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _GlobalMaxPool( + _GlobalMaxPool.Attributes( + ), _GlobalMaxPool.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def greater(A: Var, B: Var, ) -> Var: + r""" +Returns the tensor resulted from performing the ``greater`` logical +operation elementwise on the input tensors ``A`` and ``B`` (with +Numpy-style broadcasting support). + +This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +Parameters +========== +A + Type T. + First input operand for the logical operator. +B + Type T. + Second input operand for the logical operator. + +Returns +======= +C : Var + Type T1. + Result tensor. + +Notes +===== +Signature: ``ai.onnx@13::Greater``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T1: `tensor(bool)` + """ + return _Greater( + _Greater.Attributes( + ), _Greater.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), ), ).get_output_vars( + A=get_value(A), B=get_value(B), ).C + + +def greater_or_equal(A: Var, B: Var, ) -> Var: + r""" +Returns the tensor resulted from performing the ``greater_equal`` +logical operation elementwise on the input tensors ``A`` and ``B`` (with +Numpy-style broadcasting support). + +This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +Parameters +========== +A + Type T. + First input operand for the logical operator. +B + Type T. + Second input operand for the logical operator. + +Returns +======= +C : Var + Type T1. + Result tensor. + +Notes +===== +Signature: ``ai.onnx@16::GreaterOrEqual``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T1: `tensor(bool)` + """ + return _GreaterOrEqual( + _GreaterOrEqual.Attributes( + ), _GreaterOrEqual.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), ), ).get_output_vars( + A=get_value(A), B=get_value(B), ).C + + +def grid_sample(X: Var, grid: Var, *, align_corners: int = 0, mode: str = "bilinear", padding_mode: str = "zeros", ) -> Var: + r""" +Given an input ``X`` and a flow-field ``grid``, computes the output +``Y`` using ``X`` values and pixel locations from ``grid``. Currently, +only spatial (4-D) inputs are supported. For input ``X`` with shape (N, +C, H, W) and ``grid`` with shape (N, H_out, W_out, 2), the output ``Y`` +will have shape (N, C, H_out, W_out). + +The tensor ``X`` contains values at centers of square pixels in a H by W +2-dimensional image. The tensor ``grid`` describes normalized positions +where the output ``Y`` is to be computed using a specified interpolation +method (the mode) and a padding mode (for grid positions falling outside +the 2-dimensional image). + +Elements in ``grid[N, H_out, W_out]`` are size-2 vectors specifying +positions in the 2-dimensional space of ``X``. They are used to +interpolate output values of ``Y[N, C, H_out, W_out]``. + +The GridSample operator is often used in doing grid generator and +sampler in the `Spatial Transformer +Networks `__. See also in +`torch.nn.functional.grid_sample `__. + +Parameters +========== +X + Type T1. + 4-D tensor of shape (N, C, H, W), where N is the batch size, C is the + numbers of channels, H and W are the height and width of the input data. +grid + Type T2. + Input offset, 4-D tensor of shape (N, H_out, W_out, 2), where H_out and + W_out are the height and width of grid and output, Grid specifies the + sampling pixel locations normalized by the input spatial dimensions. + Therefore, it should have most values in the range of [-1, 1]. If grid + has values outside the range of [-1, 1], the corresponding outputs will + be handled as defined by padding_mode. +align_corners + Attribute. + If align_corners=1, the extrema (-1 and 1) are considered as referring + to the center points of the input's corner pixels. If align_corners=0, + they are instead considered as referring to the corner points of the + input's corner pixels, making the sampling more resolution agnostic. +mode + Attribute. + Three interpolation modes: bilinear (default), nearest and bicubic. +padding_mode + Attribute. + Support padding modes for outside grid values: ``zeros``\ (default), + ``border``, ``reflection``. zeros: use 0 for out-of-bound grid + locations, border: use border values for out-of-bound grid locations, + reflection: use values at locations reflected by the border for + out-of-bound grid locations. If index 0 represents the margin pixel, the + reflected value at index -1 will be the same as the value at index 1. + For location far away from the border, it will keep being reflected + until becoming in bound. If pixel location x = -3.5 reflects by border + -1 and becomes x' = 1.5, then reflects by border 1 and becomes x'' = + 0.5. + +Returns +======= +Y : Var + Type T1. + 4-D tensor of shape (N, C, H_out, W_out) of sampled values. For integer + input types, intermediate values are computed as floating point and cast + to integer at the end. + +Notes +===== +Signature: ``ai.onnx@16::GridSample``. + +Type constraints: + - T1: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _GridSample( + _GridSample.Attributes( + align_corners=AttrInt64(align_corners, name="align_corners"), + mode=AttrString(mode, name="mode"), + padding_mode=AttrString(padding_mode, name="padding_mode"), + ), _GridSample.Inputs( + X=unwrap_vars(X), grid=unwrap_vars(grid), ), ).get_output_vars( + X=get_value(X), grid=get_value(grid), ).Y + + +def hamming_window(size: Var, *, output_datatype: int = 1, periodic: int = 1, ) -> Var: + r""" +Generates a Hamming window as described in the paper +https://ieeexplore.ieee.org/document/1455106. + +Parameters +========== +size + Type T1. + A scalar value indicating the length of the window. +output_datatype + Attribute. + The data type of the output tensor. Strictly must be one of the values + from DataType enum in TensorProto whose values correspond to T2. The + default value is 1 = FLOAT. +periodic + Attribute. + If 1, returns a window to be used as periodic function. If 0, return a + symmetric window. When 'periodic' is specified, hann computes a window + of length size + 1 and returns the first size points. The default value + is 1. + +Returns +======= +output : Var + Type T2. + A Hamming window with length: size. The output has the shape: [size]. + +Notes +===== +Signature: ``ai.onnx@17::HammingWindow``. + +Type constraints: + - T1: `tensor(int32)`, `tensor(int64)` + - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _HammingWindow( + _HammingWindow.Attributes( + output_datatype=AttrInt64(output_datatype, name="output_datatype"), + periodic=AttrInt64(periodic, name="periodic"), + ), _HammingWindow.Inputs( + size=unwrap_vars(size), ), ).get_output_vars( + size=get_value(size), ).output + + +def hann_window(size: Var, *, output_datatype: int = 1, periodic: int = 1, ) -> Var: + r""" +Generates a Hann window as described in the paper +https://ieeexplore.ieee.org/document/1455106. + +Parameters +========== +size + Type T1. + A scalar value indicating the length of the window. +output_datatype + Attribute. + The data type of the output tensor. Strictly must be one of the values + from DataType enum in TensorProto whose values correspond to T2. The + default value is 1 = FLOAT. +periodic + Attribute. + If 1, returns a window to be used as periodic function. If 0, return a + symmetric window. When 'periodic' is specified, hann computes a window + of length size + 1 and returns the first size points. The default value + is 1. + +Returns +======= +output : Var + Type T2. + A Hann window with length: size. The output has the shape: [size]. + +Notes +===== +Signature: ``ai.onnx@17::HannWindow``. + +Type constraints: + - T1: `tensor(int32)`, `tensor(int64)` + - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _HannWindow( + _HannWindow.Attributes( + output_datatype=AttrInt64(output_datatype, name="output_datatype"), + periodic=AttrInt64(periodic, name="periodic"), + ), _HannWindow.Inputs( + size=unwrap_vars(size), ), ).get_output_vars( + size=get_value(size), ).output + + +def hard_sigmoid(X: Var, *, alpha: float = 0.20000000298023224, beta: float = 0.5, ) -> Var: + r""" +HardSigmoid takes one input data (Tensor) and produces one output +data (Tensor) where the HardSigmoid function, y = max(0, min(1, alpha +\* x + beta)), is applied to the tensor elementwise. + +Parameters +========== +X + Type T. + Input tensor +alpha + Attribute. + Value of alpha. +beta + Attribute. + Value of beta. + +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@6::HardSigmoid``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _HardSigmoid( + _HardSigmoid.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + beta=AttrFloat32(beta, name="beta"), + ), _HardSigmoid.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def hard_swish(X: Var, ) -> Var: + r""" +HardSwish takes one input data (Tensor) and produces one output data +(Tensor) where the HardSwish function, y = x \* max(0, min(1, alpha +\* x + beta)) = x \* HardSigmoid(x), where alpha = 1/6 and +beta = 0.5, is applied to the tensor elementwise. + +Parameters +========== +X + Type T. + Input tensor + +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@14::HardSwish``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _HardSwish( + _HardSwish.Attributes( + ), _HardSwish.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def hardmax(input: Var, *, axis: int = -1, ) -> Var: + r""" +The operator computes the hardmax values for the given input: + +Hardmax(element in input, axis) = 1 if the element is the first maximum +value along the specified axis, 0 otherwise + +The "axis" attribute indicates the dimension along which Hardmax will be +performed. The output tensor has the same shape and contains the Hardmax +values of the corresponding input. + +Parameters +========== +input + Type T. + The input tensor of rank >= axis. +axis + Attribute. + Describes the dimension Hardmax will be performed on. Negative value + means counting dimensions from the back. Accepted range is [-r, r-1] + where r = rank(input). + +Returns +======= +output : Var + Type T. + The output values with the same shape as the input tensor. + +Notes +===== +Signature: ``ai.onnx@13::Hardmax``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Hardmax( + _Hardmax.Attributes( + axis=AttrInt64(axis, name="axis"), + ), _Hardmax.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def identity(input: Var, ) -> Var: + r""" +Identity operator + +Parameters +========== +input + Type V. + Input tensor + +Returns +======= +output : Var + Type V. + Tensor to copy input into. + +Notes +===== +Signature: ``ai.onnx@16::Identity``. + +Type constraints: + - V: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Identity( + _Identity.Attributes( + ), _Identity.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def if_(cond: Var, *, else_branch: Callable[[], Iterable[Var]], then_branch: Callable[[], Iterable[Var]], ) -> Sequence[Var]: + r""" +If conditional + +Parameters +========== +cond + Type B. + Condition for the if. The tensor must contain a single element. +else_branch + Attribute. + Graph to run if condition is false. Has N outputs: values you wish to be + live-out to the enclosing scope. The number of outputs must match the + number of outputs in the then_branch. +then_branch + Attribute. + Graph to run if condition is true. Has N outputs: values you wish to be + live-out to the enclosing scope. The number of outputs must match the + number of outputs in the else_branch. + +Returns +======= +outputs : Sequence[Var] + Type V. + Values that are live-out to the enclosing scope. The return values in + the ``then_branch`` and ``else_branch`` must be of the same data type. + The ``then_branch`` and ``else_branch`` may produce tensors with the + same element type and different shapes. If corresponding outputs from + the then-branch and the else-branch have static shapes S1 and S2, then + the shape of the corresponding output variable of the if-node (if + present) must be compatible with both S1 and S2 as it represents the + union of both possible shapes.For example, if in a model file, the first + output of ``then_branch`` is typed float tensor with shape [2] and the + first output of ``else_branch`` is another float tensor with shape [3], + If's first output should have (a) no shape set, or (b) a shape of rank 1 + with neither ``dim_value`` nor ``dim_param`` set, or (c) a shape of rank + 1 with a unique ``dim_param``. In contrast, the first output cannot have + the shape [2] since [2] and [3] are not compatible. + +Notes +===== +Signature: ``ai.onnx@16::If``. + +Type constraints: + - B: `tensor(bool)` + - V: `optional(seq(tensor(bfloat16)))`, `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bfloat16))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bfloat16))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + _else_branch_subgraph: Graph = subgraph( + (), + else_branch + ) + _then_branch_subgraph: Graph = subgraph( + (), + then_branch + ) + return _If( + _If.Attributes( + else_branch=AttrGraph(_else_branch_subgraph, name="else_branch"), + then_branch=AttrGraph(_then_branch_subgraph, name="then_branch"), + ), _If.Inputs( + cond=unwrap_vars(cond), ), out_variadic=len(_else_branch_subgraph.requested_results), ).get_output_vars( + cond=get_value(cond), ).outputs + + +def instance_normalization(input: Var, scale: Var, B: Var, *, epsilon: float = 9.999999747378752e-06, ) -> Var: + r""" +Carries out instance normalization as described in the paper +https://arxiv.org/abs/1607.08022. + +y = scale \* (x - mean) / sqrt(variance + epsilon) + B, where mean and +variance are computed per instance per channel. + +Parameters +========== +input + Type T. + Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. +scale + Type T. + The input 1-dimensional scale tensor of size C. +B + Type T. + The input 1-dimensional bias tensor of size C. +epsilon + Attribute. + The epsilon value to use to avoid division by zero. + +Returns +======= +output : Var + Type T. + The output tensor of the same shape as input. + +Notes +===== +Signature: ``ai.onnx@6::InstanceNormalization``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _InstanceNormalization( + _InstanceNormalization.Attributes( + epsilon=AttrFloat32(epsilon, name="epsilon"), + ), _InstanceNormalization.Inputs( + input=unwrap_vars(input), scale=unwrap_vars(scale), B=unwrap_vars(B), ), ).get_output_vars( + input=get_value(input), scale=get_value(scale), B=get_value(B), ).output + + +def isinf(X: Var, *, detect_negative: int = 1, detect_positive: int = 1, ) -> Var: + r""" +Map infinity to true and other values to false. + +Parameters +========== +X + Type T1. + input +detect_negative + Attribute. + (Optional) Whether map negative infinity to true. Default to 1 so that + negative infinity induces true. Set this attribute to 0 if negative + infinity should be mapped to false. +detect_positive + Attribute. + (Optional) Whether map positive infinity to true. Default to 1 so that + positive infinity induces true. Set this attribute to 0 if positive + infinity should be mapped to false. + +Returns +======= +Y : Var + Type T2. + output + +Notes +===== +Signature: ``ai.onnx@10::IsInf``. + +Type constraints: + - T1: `tensor(double)`, `tensor(float)` + - T2: `tensor(bool)` + """ + return _IsInf( + _IsInf.Attributes( + detect_negative=AttrInt64(detect_negative, name="detect_negative"), + detect_positive=AttrInt64(detect_positive, name="detect_positive"), + ), _IsInf.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def isnan(X: Var, ) -> Var: + r""" +Returns which elements of the input are NaN. + +Parameters +========== +X + Type T1. + input - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Asinh( - _Asinh.Attributes(), - _Asinh.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) +Returns +======= +Y : Var + Type T2. + output + +Notes +===== +Signature: ``ai.onnx@13::IsNaN``. + +Type constraints: + - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + - T2: `tensor(bool)` + """ + return _IsNaN( + _IsNaN.Attributes( + ), _IsNaN.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def lrn(X: Var, *, alpha: float = 9.999999747378752e-05, beta: float = 0.75, bias: float = 1.0, size: int, ) -> Var: + r""" +Local Response Normalization proposed in the `AlexNet +paper `__. +It normalizes over local input regions. The local region is defined +across the channels. For an element ``X[n, c, d1, ..., dk]`` in a tensor +of shape ``(N x C x D1 x D2, ..., Dk)``, its region is +``{X[n, i, d1, ..., dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}``. + +``square_sum[n, c, d1, ..., dk] = sum(X[n, i, d1, ..., dk] ^ 2)``, where +``max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))``. + +``Y[n, c, d1, ..., dk] = X[n, c, d1, ..., dk] / (bias + alpha / size * square_sum[n, c, d1, ..., dk] ) ^ beta`` + +Parameters +========== +X + Type T. + Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. Optionally, if dimension denotation is in + effect, the operation expects the input data tensor to arrive with the + dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. +alpha + Attribute. + Scaling parameter. +beta + Attribute. + The exponent. +bias + Attribute. + +size + Attribute. + The number of channels to sum over + +Returns +======= +Y : Var + Type T. + Output tensor, which has the shape and type as input tensor + +Notes +===== +Signature: ``ai.onnx@13::LRN``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _LRN( + _LRN.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + beta=AttrFloat32(beta, name="beta"), + bias=AttrFloat32(bias, name="bias"), + size=AttrInt64(size, name="size"), + ), _LRN.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def lstm(X: Var, W: Var, R: Var, B: Optional[Var] = None, sequence_lens: Optional[Var] = None, initial_h: Optional[Var] = None, initial_c: Optional[Var] = None, P: Optional[Var] = None, *, activation_alpha: Optional[Iterable[float]] = None, activation_beta: Optional[Iterable[float]] = None, activations: Optional[Iterable[str]] = None, clip: Optional[float] = None, direction: str = "forward", hidden_size: Optional[int] = None, input_forget: int = 0, layout: int = 0, ) -> tuple[Var, Var, Var]: + r""" +Computes an one-layer LSTM. This operator is usually supported via some +custom implementation such as CuDNN. + +Notations: + +- ``X`` - input tensor +- ``i`` - input gate +- ``o`` - output gate +- ``f`` - forget gate +- ``c`` - cell gate +- ``t`` - time step (t-1 means previous time step) +- ``W[iofc]`` - W parameter weight matrix for input, output, forget, + and cell gates +- ``R[iofc]`` - R recurrence weight matrix for input, output, forget, + and cell gates +- ``Wb[iofc]`` - W bias vectors for input, output, forget, and cell + gates +- ``Rb[iofc]`` - R bias vectors for input, output, forget, and cell + gates +- ``P[iof]`` - P peephole weight vector for input, output, and forget + gates +- ``WB[iofc]`` - W parameter weight matrix for backward input, output, + forget, and cell gates +- ``RB[iofc]`` - R recurrence weight matrix for backward input, output, + forget, and cell gates +- ``WBb[iofc]`` - W bias vectors for backward input, output, forget, + and cell gates +- ``RBb[iofc]`` - R bias vectors for backward input, output, forget, + and cell gates +- ``PB[iof]`` - P peephole weight vector for backward input, output, + and forget gates +- ``H`` - Hidden state +- ``num_directions`` - 2 if direction == bidirectional else 1 + +Activation functions: + +- Relu(x) - max(0, x) +- Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) +- Sigmoid(x) - 1/(1 + e^{-x}) + +NOTE: Below are optional + +- Affine(x) - alpha*x + beta +- LeakyRelu(x) - x if x >= 0 else alpha \* x +- ThresholdedRelu(x) - x if x >= alpha else 0 +- ScaledTanh(x) - alpha\ *Tanh(beta*\ x) +- HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) +- Elu(x) - x if x >= 0 else alpha*(e^x - 1) +- Softsign(x) - x/(1 + \|x\|) +- Softplus(x) - log(1 + e^x) + +Equations (Default: f=Sigmoid, g=Tanh, h=Tanh): + +- it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) +- ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) +- ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) +- Ct = ft (.) Ct-1 + it (.) ct +- ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) +- Ht = ot (.) h(Ct) This operator has **optional** inputs/outputs. See + `the doc `__ for + more details about the representation of optional arguments. An empty + string may be used in the place of an actual argument's name to + indicate a missing argument. Trailing optional arguments (those not + followed by an argument that is present) may also be simply omitted. + +Parameters +========== +X + Type T. + The input sequences packed (and potentially padded) into one 3-D tensor + with the shape of ``[seq_length, batch_size, input_size]``. +W + Type T. + The weight tensor for the gates. Concatenation of ``W[iofc]`` and + ``WB[iofc]`` (if bidirectional) along dimension 0. The tensor has shape + ``[num_directions, 4*hidden_size, input_size]``. +R + Type T. + The recurrence weight tensor. Concatenation of ``R[iofc]`` and + ``RB[iofc]`` (if bidirectional) along dimension 0. This tensor has shape + ``[num_directions, 4*hidden_size, hidden_size]``. +B + Type T. + The bias tensor for input gate. Concatenation of + ``[Wb[iofc], Rb[iofc]]``, and ``[WBb[iofc], RBb[iofc]]`` (if + bidirectional) along dimension 0. This tensor has shape + ``[num_directions, 8*hidden_size]``. Optional: If not specified - + assumed to be 0. +sequence_lens + Type T1. + Optional tensor specifying lengths of the sequences in a batch. If not + specified - assumed all sequences in the batch to have length + ``seq_length``. It has shape ``[batch_size]``. +initial_h + Type T. + Optional initial value of the hidden. If not specified - assumed to be + 0. It has shape ``[num_directions, batch_size, hidden_size]``. +initial_c + Type T. + Optional initial value of the cell. If not specified - assumed to be 0. + It has shape ``[num_directions, batch_size, hidden_size]``. +P + Type T. + The weight tensor for peepholes. Concatenation of ``P[iof]`` and + ``PB[iof]`` (if bidirectional) along dimension 0. It has shape + ``[num_directions, 3*hidde_size]``. Optional: If not specified - assumed + to be 0. +activation_alpha + Attribute. + Optional scaling values used by some activation functions. The values + are consumed in the order of activation functions, for example (f, g, h) + in LSTM. Default values are the same as of corresponding ONNX + operators.For example with LeakyRelu, the default alpha is 0.01. +activation_beta + Attribute. + Optional scaling values used by some activation functions. The values + are consumed in the order of activation functions, for example (f, g, h) + in LSTM. Default values are the same as of corresponding ONNX operators. +activations + Attribute. + A list of 3 (or 6 if bidirectional) activation functions for input, + output, forget, cell, and hidden. The activation functions must be one + of the activation functions specified above. Optional: See the equations + for default if not specified. +clip + Attribute. + Cell clip threshold. Clipping bounds the elements of a tensor in the + range of [-threshold, +threshold] and is applied to the input of + activations. No clip if not specified. +direction + Attribute. + Specify if the RNN is forward, reverse, or bidirectional. Must be one of + forward (default), reverse, or bidirectional. +hidden_size + Attribute. + Number of neurons in the hidden layer +input_forget + Attribute. + Couple the input and forget gates if 1. +layout + Attribute. + The shape format of inputs X, initial_h, initial_c and outputs Y, Y_h, + Y_c. If 0, the following shapes are expected: X.shape = [seq_length, + batch_size, input_size], Y.shape = [seq_length, num_directions, + batch_size, hidden_size], initial_h.shape = Y_h.shape = initial_c.shape + = Y_c.shape = [num_directions, batch_size, hidden_size]. If 1, the + following shapes are expected: X.shape = [batch_size, seq_length, + input_size], Y.shape = [batch_size, seq_length, num_directions, + hidden_size], initial_h.shape = Y_h.shape = initial_c.shape = Y_c.shape + = [batch_size, num_directions, hidden_size]. + +Returns +======= +Y : Var + Type T. + A tensor that concats all the intermediate output values of the hidden. + It has shape ``[seq_length, num_directions, batch_size, hidden_size]``. +Y_h : Var + Type T. + The last output value of the hidden. It has shape + ``[num_directions, batch_size, hidden_size]``. +Y_c : Var + Type T. + The last output value of the cell. It has shape + ``[num_directions, batch_size, hidden_size]``. + +Notes +===== +Signature: ``ai.onnx@14::LSTM``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + - T1: `tensor(int32)` + """ + return _LSTM( + _LSTM.Attributes( + activation_alpha=AttrFloat32s.maybe(activation_alpha, name="activation_alpha"), + activation_beta=AttrFloat32s.maybe(activation_beta, name="activation_beta"), + activations=AttrStrings.maybe(activations, name="activations"), + clip=AttrFloat32.maybe(clip, name="clip"), + direction=AttrString(direction, name="direction"), + hidden_size=AttrInt64.maybe(hidden_size, name="hidden_size"), + input_forget=AttrInt64(input_forget, name="input_forget"), + layout=AttrInt64(layout, name="layout"), + ), _LSTM.Inputs( + X=unwrap_vars(X), W=unwrap_vars(W), R=unwrap_vars(R), B=unwrap_vars(B), sequence_lens=unwrap_vars(sequence_lens), initial_h=unwrap_vars(initial_h), initial_c=unwrap_vars(initial_c), P=unwrap_vars(P), ), ).get_output_vars( + X=get_value(X), W=get_value(W), R=get_value(R), B=get_value(B), sequence_lens=get_value(sequence_lens), initial_h=get_value(initial_h), initial_c=get_value(initial_c), P=get_value(P), )._unpack_to_any() + + +def layer_normalization(X: Var, Scale: Var, B: Optional[Var] = None, *, axis: int = -1, epsilon: float = 9.999999747378752e-06, stash_type: int = 1, ) -> tuple[Var, Var, Var]: + r""" +This is layer normalization defined in ONNX as function. The overall +computation can be split into two stages. The first stage is +standardization, which makes the normalized elements have zero mean and +unit variances. The computation required by standardization can be +described by the following equations. +``Mean = ReduceMean(X) D = Sub(X, Mean) DD = Mul(D, D) Var = ReduceMean(DD) VarEps = Add(Var, epsilon) StdDev = Sqrt(VarEps) InvStdDev = Reciprocal(StdDev) Normalized = Mul(D, InvStdDev)`` +where ``normalized_axes`` is ``[axis, ..., rank of X - 1]``. The +variables ``Var`` and ``StdDev`` stand for variance and standard +deviation, respectively. The second output is ``Mean`` and the last one +is ``InvStdDev``. Depending on ``stash_type`` attribute, the actual +computation must happen in different floating-point precision. For +example, if ``stash_type`` is 1, this operator casts all input variables +to 32-bit float, perform the computation, and finally cast +``Normalized`` back to the original type of ``X``. The second stage then +scales and shifts the outcome of the first stage using +``NormalizedScaled = Mul(Normalized, Scale) Y = Add(NormalizedScaled, B)`` +The second stage doesn't depends on ``stash_type``. All equations are in +`this syntax `__. +The same variable (i.e., input, output, and attribute) uses the same +name in the equations above and this operator's definition. Let ``d[i]`` +indicate the i-th dimension of ``X``. If ``X``'s shape is +``[d[0], ..., d[axis-1], d[axis], ..., d[rank-1]]``, the shape of +``Mean`` and ``InvStdDev`` is ``[d[0], ..., d[axis-1], 1, ..., 1]``. +``Y`` and ``X`` have the same shape. This operator supports +unidirectional broadcasting (tensors ``Scale`` and ``B`` should be +unidirectional broadcastable to tensor ``X``); for more details please +check `the +doc `__. + +Parameters +========== +X + Type T. + Tensor to be normalized. +Scale + Type T. + Scale tensor. +B + Type T. + Bias tensor. +axis + Attribute. + The first normalization dimension. If rank(X) is r, axis' allowed range + is [-r, r). Negative value means counting dimensions from the back. +epsilon + Attribute. + The epsilon value to use to avoid division by zero. +stash_type + Attribute. + Type of Mean and InvStdDev. This also specifies stage one's computation + precision. + +Returns +======= +Y : Var + Type T. + Normalized tensor. +Mean : Var + Type U. + Saved mean used during training to speed up gradient computation +InvStdDev : Var + Type U. + Saved inverse standard deviation used during training to speed up + gradient computation. + +Notes +===== +Signature: ``ai.onnx@17::LayerNormalization``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + - U: `tensor(bfloat16)`, `tensor(float)` + """ + return _LayerNormalization( + _LayerNormalization.Attributes( + axis=AttrInt64(axis, name="axis"), + epsilon=AttrFloat32(epsilon, name="epsilon"), + stash_type=AttrInt64(stash_type, name="stash_type"), + ), _LayerNormalization.Inputs( + X=unwrap_vars(X), Scale=unwrap_vars(Scale), B=unwrap_vars(B), ), ).get_output_vars( + X=get_value(X), Scale=get_value(Scale), B=get_value(B), )._unpack_to_any() + + +def leaky_relu(X: Var, *, alpha: float = 0.009999999776482582, ) -> Var: + r""" +LeakyRelu takes input data (Tensor) and an argument alpha, and +produces one output data (Tensor) where the function +``f(x) = alpha * x for x < 0``, ``f(x) = x for x >= 0``, is applied to +the data tensor elementwise. + +Parameters +========== +X + Type T. + Input tensor +alpha + Attribute. + Coefficient of leakage. + +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@16::LeakyRelu``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _LeakyRelu( + _LeakyRelu.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + ), _LeakyRelu.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def less(A: Var, B: Var, ) -> Var: + r""" +Returns the tensor resulted from performing the ``less`` logical +operation elementwise on the input tensors ``A`` and ``B`` (with +Numpy-style broadcasting support). + +This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +Parameters +========== +A + Type T. + First input operand for the logical operator. +B + Type T. + Second input operand for the logical operator. + +Returns +======= +C : Var + Type T1. + Result tensor. + +Notes +===== +Signature: ``ai.onnx@13::Less``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T1: `tensor(bool)` + """ + return _Less( + _Less.Attributes( + ), _Less.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), ), ).get_output_vars( + A=get_value(A), B=get_value(B), ).C + + +def less_or_equal(A: Var, B: Var, ) -> Var: + r""" +Returns the tensor resulted from performing the ``less_equal`` logical +operation elementwise on the input tensors ``A`` and ``B`` (with +Numpy-style broadcasting support). + +This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +Parameters +========== +A + Type T. + First input operand for the logical operator. +B + Type T. + Second input operand for the logical operator. + +Returns +======= +C : Var + Type T1. + Result tensor. + +Notes +===== +Signature: ``ai.onnx@16::LessOrEqual``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T1: `tensor(bool)` + """ + return _LessOrEqual( + _LessOrEqual.Attributes( + ), _LessOrEqual.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), ), ).get_output_vars( + A=get_value(A), B=get_value(B), ).C + + +def log(input: Var, ) -> Var: + r""" +Calculates the natural log of the given input tensor, element-wise. +Parameters +========== +input + Type T. + Input tensor -def atan( - input: Var, -) -> Var: - r""" - Calculates the arctangent (inverse of tangent) of the given input - tensor, element-wise. +Returns +======= +output : Var + Type T. + The natural log of the input tensor computed element-wise - Parameters - ========== - input - Type T. - Input tensor +Notes +===== +Signature: ``ai.onnx@13::Log``. - Returns - ======= - output : Var - Type T. - The arctangent of the input tensor computed element-wise +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Log( + _Log.Attributes( + ), _Log.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def log_softmax(input: Var, *, axis: int = -1, ) -> Var: + r""" +The operator computes the log of softmax values for the given input: + +LogSoftmax(input, axis) = Log(Softmax(input, axis=axis)) + +The "axis" attribute indicates the dimension along which LogSoftmax will +be performed. The output tensor has the same shape and contains the +LogSoftmax values of the corresponding input. + +Parameters +========== +input + Type T. + The input tensor of rank >= axis. +axis + Attribute. + Describes the dimension LogSoftmax will be performed on. Negative value + means counting dimensions from the back. Accepted range is [-r, r-1] + where r = rank(input). + +Returns +======= +output : Var + Type T. + The output values with the same shape as the input tensor. + +Notes +===== +Signature: ``ai.onnx@13::LogSoftmax``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _LogSoftmax( + _LogSoftmax.Attributes( + axis=AttrInt64(axis, name="axis"), + ), _LogSoftmax.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def loop(M: Optional[Var] = None, cond: Optional[Var] = None, v_initial: Sequence[Var] = (), *, body: Callable[..., Iterable[Var]], ) -> Sequence[Var]: + r""" +Generic Looping construct. This loop has multiple termination +conditions: + +1) Trip count. Iteration count specified at runtime. Set by specifying + the input M. Optional. Set to empty string to omit. Note that a + static trip count (specified at graph construction time) can be + specified by passing in a constant node for input M. +2) Loop termination condition. This is an input to the op that + determines whether to run the first iteration and also a loop-carried + dependency for the body graph. The body graph must yield a value for + the condition variable, whether this input is provided or not. + +This table summarizes the operating modes of this operator with +equivalent C-style code: + +Operator inputs defined as (max_trip_count, condition_var). + +- input ("", ""): for (int i=0; ; ++i) { cond = ... // Note this value + is ignored, but is required in the body } + +- input ("", cond) // Note this is analogous to a while loop bool cond + = ...; for (int i=0; cond; ++i) { cond = ...; } + +- input ("", 1) // Note this is analogous to a do-while loop bool cond + = true for (int i=0; cond; ++i) { cond = ...; } + +- input (trip_count, "") // Note this is analogous to a for loop int + trip_count = ... for (int i=0; i < trip_count; ++i) { cond = ...; // + ignored } + +- input (trip_count, cond) int trip_count = ...; bool cond = ...; for + (int i=0; i < trip_count && cond; ++i) { cond = ...; } + +*Sample usage - cond as well as trip count* + +:: + + graph predict-net { + %a = Constant[value = ]() + %b = Constant[value = ]() + %keepgoing = Constant[value = ]() + %max_trip_count = Constant[value = ]() + %keepgoing_out, %b_out, %user_defined_vals = Loop[body = ](%max_trip_count, %keepgoing, %b) + return + } + + graph body-net ( + %i[INT32, scalar] // iteration number + %keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used + %b_in[INT32, scalar] // incoming value of loop-carried-dependency b + ) { + %my_local = Add(%a, %b_in) + %b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b + %keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition + %user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated + return %keepgoing_out, %b_out, %user_defined_val + } + +*Sample equivalent C code* + +:: + + { + /* User-defined code (enclosing scope) */ + int a = 3, b = 6; + bool keepgoing = true; // Analogous to input cond + /* End user-defined code */ + + /* Implicitly-defined code */ + const int max_trip_count = 10; // Analogous to input M + int user_defined_vals[]; // Imagine this is resizable + /* End implicitly-defined code */ + /* initialize loop-carried variables and scan-output variables */ + bool keepgoing_out = keepgoing + int b_out = b + + for (int i=0; i < max_trip_count && keepgoing_out; ++i) { + /* Implicitly-defined code: bind actual parameter values + to formal parameter variables of loop-body */ + bool keepgoing_in = keepgoing_out; + bool b_in = b_out; + + /* User-defined code (loop body) */ + int my_local = a + b_in; // Reading value "a" from the enclosing scope is fine + b_out = a - b_in; + keepgoing_out = my_local > b_out; + user_defined_val = b_in + b_in; // b_in and b_out are different variables + /* End user-defined code */ + + /* Implicitly defined-code */ + user_defined_vals[i] = user_defined_val // accumulate scan-output values + } + // int t = my_local; // Can't do this. my_local is not accessible here. + + // The values below are bound to the output variables of the loop and therefore accessible + // b_out; user_defined_vals; keepgoing_out; + } + +There are several things of note in this code snippet: + +1) Values from the enclosing scope (i.e. variable "a" here) are in scope + and can be referenced in the inputs of the loop. +2) Any values computed in the loop body that needs to be used in a + subsequent iteration or after the loop are modelled using a pair of + variables in the loop-body, consisting of an input variable (eg., + b_in) and an output variable (eg., b_out). These are referred to as + loop-carried dependences. The loop operation node supplies the input + value of the input variable for the first iteration, and returns the + output value of the output variable produced by the final iteration. +3) Scan_output variables are used to implicitly concatenate values + computed across all the iterations. In the above example, the value + of user_defined_val computed over all iterations are concatenated and + returned as the value of user_defined_vals after the loop. +4) Values created in the body cannot be accessed in the enclosing scope, + except using the mechanism described above. + +Note that the semantics of this op support "diagonal" or "wavefront" +execution. (See Step 3 here for an example: +https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). +Frontends should emit multi-layer RNNs as a series of While operators +(with time being the inner looping dimension), with each successive +layer consuming the scan_outputs from the previous layer, possibly going +through several point-wise operators (e.g. dropout, residual +connections, linear layer). + +The input/output of subgraph (produced by loop node) matching is based +on order instead of name. The implementation will figure out the names +based on this order. + +Parameters +========== +M + Type I. + A maximum trip-count for the loop specified at runtime. Optional. Pass + empty string to skip. +cond + Type B. + A boolean termination condition. Optional. Pass empty string to skip. +v_initial + Type V. + The initial values of any loop-carried dependencies (values that change + across loop iterations) +body + Attribute. + The graph run each iteration. It has 2+N inputs: (iteration_num, + condition, loop carried dependencies...). It has 1+N+K outputs: + (condition, loop carried dependencies..., scan_outputs...). Each + scan_output is created by concatenating the value of the specified + output value at the end of each iteration of the loop. It is an error if + the dimensions or data type of these scan_outputs change across loop + iterations. + +Returns +======= +v_final_and_scan_outputs : Sequence[Var] + Type V. + Final N loop carried dependency values then K scan_outputs. Scan outputs + must be Tensors. + +Notes +===== +Signature: ``ai.onnx@16::Loop``. + +Type constraints: + - I: `tensor(int64)` + - B: `tensor(bool)` + - V: `optional(seq(tensor(bfloat16)))`, `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bfloat16))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bfloat16))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + _body_subgraph: Graph = subgraph( + typing_cast(list[Type], [Tensor(np.int64, (1,)), Tensor(np.bool_, (1,))])+ [var.unwrap_type() for var in v_initial], + body + ) + return _Loop( + _Loop.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + ), _Loop.Inputs( + M=unwrap_vars(M), cond=unwrap_vars(cond), v_initial=unwrap_vars(v_initial), ), out_variadic=len(_body_subgraph.requested_results) - 1, ).get_output_vars( + M=get_value(M), cond=get_value(cond), v_initial=get_value(v_initial), ).v_final_and_scan_outputs + + +def lp_normalization(input: Var, *, axis: int = -1, p: int = 2, ) -> Var: + r""" +Given a matrix, apply Lp-normalization along the provided axis. + +Parameters +========== +input + Type T. + Input matrix +axis + Attribute. + The axis on which to apply normalization, -1 mean last axis. +p + Attribute. + The order of the normalization, only 1 or 2 are supported. + +Returns +======= +output : Var + Type T. + Matrix after normalization + +Notes +===== +Signature: ``ai.onnx@1::LpNormalization``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _LpNormalization( + _LpNormalization.Attributes( + axis=AttrInt64(axis, name="axis"), + p=AttrInt64(p, name="p"), + ), _LpNormalization.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def lp_pool(X: Var, *, auto_pad: str = "NOTSET", kernel_shape: Iterable[int], p: int = 2, pads: Optional[Iterable[int]] = None, strides: Optional[Iterable[int]] = None, ) -> Var: + r""" +LpPool consumes an input tensor X and applies Lp pooling across the +tensor according to kernel sizes, stride sizes, and pad lengths. Lp +pooling consisting of computing the Lp norm on all values of a subset of +the input tensor according to the kernel size and downsampling the data +into the output tensor Y for further processing. + +Parameters +========== +X + Type T. + Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. +auto_pad + Attribute. + auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where + default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that + ``output_shape[i] = ceil(input_shape[i] / strides[i])`` for each axis + ``i``. The padding is split between the two sides equally or almost + equally (depending on whether it is even or odd). In case the padding is + an odd number, the extra padding is added at the end for SAME_UPPER and + at the beginning for SAME_LOWER. +kernel_shape + Attribute. + The size of the kernel along each axis. +p + Attribute. + p value of the Lp norm used to pool over the input data. +pads + Attribute. + Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. ``pads`` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis ``i`` and xi_end, the number of pixels + added at the end of axis ``i``. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. +strides + Attribute. + Stride along each spatial axis. If not present, the stride defaults to 1 + along each spatial axis. + +Returns +======= +Y : Var + Type T. + Output data tensor from Lp pooling across the input tensor. Dimensions + will vary based on various kernel, stride, and pad sizes. + +Notes +===== +Signature: ``ai.onnx@11::LpPool``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _LpPool( + _LpPool.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), + p=AttrInt64(p, name="p"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), _LpPool.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def matmul(A: Var, B: Var, ) -> Var: + r""" +Matrix product that behaves like numpy.matmul: +https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html + +Parameters +========== +A + Type T. + N-dimensional matrix A +B + Type T. + N-dimensional matrix B + +Returns +======= +Y : Var + Type T. + Matrix multiply results from A \* B + +Notes +===== +Signature: ``ai.onnx@13::MatMul``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` + """ + return _MatMul( + _MatMul.Attributes( + ), _MatMul.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), ), ).get_output_vars( + A=get_value(A), B=get_value(B), ).Y + + +def matmul_integer(A: Var, B: Var, a_zero_point: Optional[Var] = None, b_zero_point: Optional[Var] = None, ) -> Var: + r""" +Matrix product that behaves like numpy.matmul: +https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. +The production MUST never overflow. The accumulation may overflow if and +only if in 32 bits. + +Parameters +========== +A + Type T1. + N-dimensional matrix A +B + Type T2. + N-dimensional matrix B +a_zero_point + Type T1. + Zero point tensor for input 'A'. It's optional and default value is 0. + It could be a scalar or N-D tensor. Scalar refers to per tensor + quantization whereas N-D refers to per row quantization. If the input is + 2D of shape [M, K] then zero point tensor may be an M element vector + [zp_1, zp_2, ..., zp_M]. If the input is N-D tensor with shape [D1, D2, + M, K] then zero point tensor may have shape [D1, D2, M, 1]. +b_zero_point + Type T2. + Zero point tensor for input 'B'. It's optional and default value is 0. + It could be a scalar or a N-D tensor, Scalar refers to per tensor + quantization whereas N-D refers to per col quantization. If the input is + 2D of shape [K, N] then zero point tensor may be an N element vector + [zp_1, zp_2, ..., zp_N]. If the input is N-D tensor with shape [D1, D2, + K, N] then zero point tensor may have shape [D1, D2, 1, N]. + +Returns +======= +Y : Var + Type T3. + Matrix multiply results from A \* B + +Notes +===== +Signature: ``ai.onnx@10::MatMulInteger``. + +Type constraints: + - T1: `tensor(int8)`, `tensor(uint8)` + - T2: `tensor(int8)`, `tensor(uint8)` + - T3: `tensor(int32)` + """ + return _MatMulInteger( + _MatMulInteger.Attributes( + ), _MatMulInteger.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), a_zero_point=unwrap_vars(a_zero_point), b_zero_point=unwrap_vars(b_zero_point), ), ).get_output_vars( + A=get_value(A), B=get_value(B), a_zero_point=get_value(a_zero_point), b_zero_point=get_value(b_zero_point), ).Y + + +def max(data_0: Sequence[Var], ) -> Var: + r""" +Element-wise max of each of the input tensors (with Numpy-style +broadcasting support). All inputs and outputs must have the same data +type. This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +Parameters +========== +data_0 + Type T. + List of tensors for max. + +Returns +======= +max : Var + Type T. + Output tensor. + +Notes +===== +Signature: ``ai.onnx@13::Max``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Max( + _Max.Attributes( + ), _Max.Inputs( + data_0=unwrap_vars(data_0), ), ).get_output_vars( + data_0=get_value(data_0), ).max + + +def max_pool(X: Var, *, auto_pad: str = "NOTSET", ceil_mode: int = 0, dilations: Optional[Iterable[int]] = None, kernel_shape: Iterable[int], pads: Optional[Iterable[int]] = None, storage_order: int = 0, strides: Optional[Iterable[int]] = None, ) -> tuple[Var, Var]: + r""" +MaxPool consumes an input tensor X and applies max pooling across the +tensor according to kernel sizes, stride sizes, and pad lengths. max +pooling consisting of computing the max on all values of a subset of the +input tensor according to the kernel size and downsampling the data into +the output tensor Y for further processing. The output spatial shape is +calculated differently depending on whether explicit padding is used, +where pads is employed, or auto padding is used, where auto_pad is +utilized. With explicit padding +(https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html?highlight=maxpool#torch.nn.MaxPool2d): + +:: + + output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1) + +or + +:: + + output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1) + +if ceil_mode is enabled. ``pad_shape[i]`` is the sum of pads along axis +``i``. Sliding windows that would start in the right padded region are +ignored. + +``auto_pad`` is a DEPRECATED attribute. If you are using them currently, +the output spatial shape will be following when ceil_mode is enabled: + +:: + + VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i]) + SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) + +or when ceil_mode is disabled +(https://www.tensorflow.org/api_docs/python/tf/keras/layers/AveragePooling2D): + +:: + + VALID: output_spatial_shape[i] = floor((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i]) + 1 + SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = floor((input_spatial_shape[i] - 1) / strides_spatial_shape[i]) + 1 + +And pad shape will be following if ``SAME_UPPER`` or ``SAME_LOWER``: + +:: + + pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i] + +The output of each pooling window is maximum number of elements exclude +pad. + +Parameters +========== +X + Type T. + Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. Optionally, if dimension denotation is in + effect, the operation expects the input data tensor to arrive with the + dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. +auto_pad + Attribute. + auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where + default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that + ``output_shape[i] = ceil(input_shape[i] / strides[i])`` for each axis + ``i``. The padding is split between the two sides equally or almost + equally (depending on whether it is even or odd). In case the padding is + an odd number, the extra padding is added at the end for SAME_UPPER and + at the beginning for SAME_LOWER. +ceil_mode + Attribute. + Whether to use ceil or floor (default) to compute the output shape. +dilations + Attribute. + Dilation value along each spatial axis of filter. If not present, the + dilation defaults to 1 along each spatial axis. +kernel_shape + Attribute. + The size of the kernel along each axis. +pads + Attribute. + Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. ``pads`` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis ``i`` and xi_end, the number of pixels + added at the end of axis ``i``. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. +storage_order + Attribute. + The storage order of the tensor. 0 is row major, and 1 is column major. + This attribute is used only to convert an n-tuple index value into a + single integer value for producing the second output. +strides + Attribute. + Stride along each spatial axis. If not present, the stride defaults to 1 + along each spatial axis. + +Returns +======= +Y : Var + Type T. + Output data tensor from average or max pooling across the input tensor. + Dimensions will vary based on various kernel, stride, and pad sizes. + Floor value of the dimension is used +Indices : Var + Type I. + Indices tensor from max pooling across the input tensor. The dimensions + of indices are the same as output tensor. The values in indices of are + the indices of the selected values during pooling. The indices are + computed as flatten 1-D tensor, and the indices do not consider padding. + So the values in indices are in [0, N x C x D1 x ... x Dn). + +Notes +===== +Signature: ``ai.onnx@12::MaxPool``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int8)`, `tensor(uint8)` + - I: `tensor(int64)` + """ + return _MaxPool( + _MaxPool.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + storage_order=AttrInt64(storage_order, name="storage_order"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), _MaxPool.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), )._unpack_to_any() + + +def max_roi_pool(X: Var, rois: Var, *, pooled_shape: Iterable[int], spatial_scale: float = 1.0, ) -> Var: + r""" +ROI max pool consumes an input tensor X and region of interests (RoIs) +to apply max pooling across each RoI, to produce output 4-D tensor of +shape (num_rois, channels, pooled_shape[0], pooled_shape[1]). + +Parameters +========== +X + Type T. + Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. +rois + Type T. + RoIs (Regions of Interest) to pool over. Should be a 2-D tensor of shape + (num_rois, 5) given as [[batch_id, x1, y1, x2, y2], ...]. +pooled_shape + Attribute. + ROI pool output shape (height, width). +spatial_scale + Attribute. + Multiplicative spatial scale factor to translate ROI coordinates from + their input scale to the scale used when pooling. + +Returns +======= +Y : Var + Type T. + RoI pooled output 4-D tensor of shape (num_rois, channels, + pooled_shape[0], pooled_shape[1]). + +Notes +===== +Signature: ``ai.onnx@1::MaxRoiPool``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _MaxRoiPool( + _MaxRoiPool.Attributes( + pooled_shape=AttrInt64s(pooled_shape, name="pooled_shape"), + spatial_scale=AttrFloat32(spatial_scale, name="spatial_scale"), + ), _MaxRoiPool.Inputs( + X=unwrap_vars(X), rois=unwrap_vars(rois), ), ).get_output_vars( + X=get_value(X), rois=get_value(rois), ).Y + + +def max_unpool(X: Var, I: Var, output_shape: Optional[Var] = None, *, kernel_shape: Iterable[int], pads: Optional[Iterable[int]] = None, strides: Optional[Iterable[int]] = None, ) -> Var: + r""" +MaxUnpool essentially computes the partial inverse of the MaxPool op. +The input information to this op is typically the output information +from a MaxPool op. The first input tensor X is the tensor that needs to +be unpooled, which is typically the pooled tensor (first output) from +MaxPool. The second input tensor, I, contains the indices to the +(locally maximal) elements corresponding to the elements in the first +input tensor X. Input tensor I is typically the second output of the +MaxPool op. The third (optional) input is a tensor that specifies the +output size of the unpooling operation. + +MaxUnpool is intended to do 'partial' inverse of the MaxPool op. +'Partial' because all the non-maximal values from the original input to +MaxPool are set to zero in the output of the MaxUnpool op. Pooling the +result of an unpooling operation should give back the original input to +the unpooling op. + +MaxUnpool can produce the same output size for several input sizes, +which makes unpooling op ambiguous. The third input argument, +output_size, is meant to disambiguate the op and produce output tensor +of known/predictable size. + +In addition to the inputs, MaxUnpool takes three attributes, namely +kernel_shape, strides, and pads, which define the exact unpooling op. +The attributes typically have the same values as the corresponding +pooling op that the unpooling op is trying to invert. + +Parameters +========== +X + Type T1. + Input data tensor that has to be unpooled. This tensor is typically the + first output of the MaxPool op.Dimensions for image case are (N x C x H + x W), where N is the batch size, C is the number of channels, and H and + W are the height and the width of the data. For non-image case, the + dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the + batch size. Optionally, if dimension denotation is in effect, the + operation expects the input data tensor to arrive with the dimension + denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE + ...]. +I + Type T2. + Input data tensor containing the indices corresponding to elements in + the first input tensor X.This tensor is typically the second output of + the MaxPool op.Dimensions must be the same as input tensor X. The + indices are linear, i.e. computed considering the tensor as flattened + 1-D tensor, assuming row-major storage. Also, the linear indices should + not consider padding. So the values in indices are in the range [0, N x + C x D1 x ... x Dn). +output_shape + Type T2. + The shape of the output can be explicitly set which will cause pads + values to be auto generated. If 'output_shape' is specified, 'pads' + values are ignored. +kernel_shape + Attribute. + The size of the kernel along each axis. +pads + Attribute. + Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. ``pads`` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis ``i`` and xi_end, the number of pixels + added at the end of axis ``i``. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. +strides + Attribute. + Stride along each spatial axis. If not present, the stride defaults to 1 + along each spatial axis. + +Returns +======= +output : Var + Type T1. + Output data tensor that contains the result of the unpooling. + +Notes +===== +Signature: ``ai.onnx@11::MaxUnpool``. + +Type constraints: + - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` + - T2: `tensor(int64)` + """ + return _MaxUnpool( + _MaxUnpool.Attributes( + kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), _MaxUnpool.Inputs( + X=unwrap_vars(X), I=unwrap_vars(I), output_shape=unwrap_vars(output_shape), ), ).get_output_vars( + X=get_value(X), I=get_value(I), output_shape=get_value(output_shape), ).output + + +def mean(data_0: Sequence[Var], ) -> Var: + r""" +Element-wise mean of each of the input tensors (with Numpy-style +broadcasting support). All inputs and outputs must have the same data +type. This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +Parameters +========== +data_0 + Type T. + List of tensors for mean. + +Returns +======= +mean : Var + Type T. + Output tensor. + +Notes +===== +Signature: ``ai.onnx@13::Mean``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Mean( + _Mean.Attributes( + ), _Mean.Inputs( + data_0=unwrap_vars(data_0), ), ).get_output_vars( + data_0=get_value(data_0), ).mean + + +def mean_variance_normalization(X: Var, *, axes: Iterable[int] = (0, 2, 3), ) -> Var: + r""" +A MeanVarianceNormalization Function: Perform mean variance +normalization on the input tensor X using formula: +``(X-EX)/sqrt(E(X-EX)^2)`` + +Parameters +========== +X + Type T. + Input tensor +axes + Attribute. + A list of integers, along which to reduce. The default is to calculate + along axes [0,2,3] for calculating mean and variance along each channel. + Two variables with the same C-coordinate are associated with the same + mean and variance. + +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@13::MeanVarianceNormalization``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _MeanVarianceNormalization( + _MeanVarianceNormalization.Attributes( + axes=AttrInt64s(axes, name="axes"), + ), _MeanVarianceNormalization.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def mel_weight_matrix(num_mel_bins: Var, dft_length: Var, sample_rate: Var, lower_edge_hertz: Var, upper_edge_hertz: Var, *, output_datatype: int = 1, ) -> Var: + r""" +Generate a MelWeightMatrix that can be used to re-weight a Tensor +containing a linearly sampled frequency spectra (from DFT or STFT) into +num_mel_bins frequency information based on the [lower_edge_hertz, +upper_edge_hertz] range on the mel scale. This function defines the mel +scale in terms of a frequency in hertz according to the following +formula: + +:: + + mel(f) = 2595 * log10(1 + f/700) + +In the returned matrix, all the triangles (filterbanks) have a peak +value of 1.0. + +The returned MelWeightMatrix can be used to right-multiply a spectrogram +S of shape [frames, num_spectrogram_bins] of linear scale spectrum +values (e.g. STFT magnitudes) to generate a "mel spectrogram" M of shape +[frames, num_mel_bins]. + +Parameters +========== +num_mel_bins + Type T1. + The number of bands in the mel spectrum. +dft_length + Type T1. + The size of the original DFT. The size of the original DFT is used to + infer the size of the onesided DFT, which is understood to be + floor(dft_length/2) + 1, i.e. the spectrogram only contains the + nonredundant DFT bins. +sample_rate + Type T1. + Samples per second of the input signal used to create the spectrogram. + Used to figure out the frequencies corresponding to each spectrogram + bin, which dictates how they are mapped into the mel scale. +lower_edge_hertz + Type T2. + Lower bound on the frequencies to be included in the mel spectrum. This + corresponds to the lower edge of the lowest triangular band. +upper_edge_hertz + Type T2. + The desired top edge of the highest frequency band. +output_datatype + Attribute. + The data type of the output tensor. Strictly must be one of the values + from DataType enum in TensorProto whose values correspond to T3. The + default value is 1 = FLOAT. + +Returns +======= +output : Var + Type T3. + The Mel Weight Matrix. The output has the shape: [floor(dft_length/2) + + 1][num_mel_bins]. + +Notes +===== +Signature: ``ai.onnx@17::MelWeightMatrix``. + +Type constraints: + - T1: `tensor(int32)`, `tensor(int64)` + - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + - T3: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _MelWeightMatrix( + _MelWeightMatrix.Attributes( + output_datatype=AttrInt64(output_datatype, name="output_datatype"), + ), _MelWeightMatrix.Inputs( + num_mel_bins=unwrap_vars(num_mel_bins), dft_length=unwrap_vars(dft_length), sample_rate=unwrap_vars(sample_rate), lower_edge_hertz=unwrap_vars(lower_edge_hertz), upper_edge_hertz=unwrap_vars(upper_edge_hertz), ), ).get_output_vars( + num_mel_bins=get_value(num_mel_bins), dft_length=get_value(dft_length), sample_rate=get_value(sample_rate), lower_edge_hertz=get_value(lower_edge_hertz), upper_edge_hertz=get_value(upper_edge_hertz), ).output + + +def min(data_0: Sequence[Var], ) -> Var: + r""" +Element-wise min of each of the input tensors (with Numpy-style +broadcasting support). All inputs and outputs must have the same data +type. This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +Parameters +========== +data_0 + Type T. + List of tensors for min. + +Returns +======= +min : Var + Type T. + Output tensor. + +Notes +===== +Signature: ``ai.onnx@13::Min``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Min( + _Min.Attributes( + ), _Min.Inputs( + data_0=unwrap_vars(data_0), ), ).get_output_vars( + data_0=get_value(data_0), ).min + + +def mod(A: Var, B: Var, *, fmod: int = 0, ) -> Var: + r""" +Performs element-wise binary modulus (with Numpy-style broadcasting +support). The sign of the remainder is the same as that of the Divisor. + +Mod operator can also behave like C fmod() or numpy.fmod. In this case, +the sign of the remainder however, will be the same as the Dividend (in +contrast to integer mod). To force a behavior like numpy.fmod() an +'fmod' Attribute is provided. This attribute is set to 0 by default +causing the behavior to be like integer mod. Setting this attribute to 1 +causes the remainder to be calculated similar to that of numpy.fmod(). + +If the input type is floating point, then ``fmod`` attribute must be set +to 1. + +In case of dividend being zero, the results will be platform dependent. + +This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +Parameters +========== +A + Type T. + Dividend tensor +B + Type T. + Divisor tensor +fmod + Attribute. + Whether the operator should behave like fmod (default=0 meaning it will + do integer mods); Set this to 1 to force fmod treatment + +Returns +======= +C : Var + Type T. + Remainder tensor + +Notes +===== +Signature: ``ai.onnx@13::Mod``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Mod( + _Mod.Attributes( + fmod=AttrInt64(fmod, name="fmod"), + ), _Mod.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), ), ).get_output_vars( + A=get_value(A), B=get_value(B), ).C + + +def mul(A: Var, B: Var, ) -> Var: + r""" +Performs element-wise binary multiplication (with Numpy-style +broadcasting support). + +This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +(Opset 14 change): Extend supported types to include uint8, int8, +uint16, and int16. + +Parameters +========== +A + Type T. + First operand. +B + Type T. + Second operand. + +Returns +======= +C : Var + Type T. + Result, has same element type as two inputs + +Notes +===== +Signature: ``ai.onnx@14::Mul``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Mul( + _Mul.Attributes( + ), _Mul.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), ), ).get_output_vars( + A=get_value(A), B=get_value(B), ).C + + +def multinomial(input: Var, *, dtype: npt.DTypeLike = np.int32, sample_size: int = 1, seed: Optional[float] = None, ) -> Var: + r""" +Generate a tensor of samples from a multinomial distribution according +to the probabilities of each of the possible outcomes. + +Parameters +========== +input + Type T1. + Input tensor with shape [batch_size, class_size], where class_size is + the number of all possible outcomes. Each value along the axis zero + represents the unnormalized log-probability of each corresponding + outcome in a batch. +dtype + Attribute. + (Optional) The data type for the elements of the output tensor, if not + specified, we will use int32. +sample_size + Attribute. + Number of times to sample. +seed + Attribute. + (Optional) Seed to the random generator, if not specified we will auto + generate one. + +Returns +======= +output : Var + Type T2. + Output tensor with shape [batch_size, sample_size], where sample_size is + the number of times to sample. Each value along the axis zero represents + the outcome of the corresponding sample in a batch. - Notes - ===== - Signature: ``ai.onnx@7::Atan``. +Notes +===== +Signature: ``ai.onnx@7::Multinomial``. - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` +Type constraints: + - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` + - T2: `tensor(int32)`, `tensor(int64)` """ - return ( - _Atan( - _Atan.Attributes(), - _Atan.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) + return _Multinomial( + _Multinomial.Attributes( + dtype=AttrDtype(dtype, name="dtype"), + sample_size=AttrInt64(sample_size, name="sample_size"), + seed=AttrFloat32.maybe(seed, name="seed"), + ), _Multinomial.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output -def atanh( - input: Var, -) -> Var: +def neg(X: Var, ) -> Var: r""" - Calculates the hyperbolic arctangent of the given input tensor - element-wise. +Neg takes one input data (Tensor) and produces one output data +(Tensor) where each element flipped sign, y = -x, is applied to the +tensor elementwise. - Parameters - ========== - input - Type T. - Input tensor - - Returns - ======= - output : Var - Type T. - The hyperbolic arctangent values of the input tensor computed - element-wise - - Notes - ===== - Signature: ``ai.onnx@9::Atanh``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Atanh( - _Atanh.Attributes(), - _Atanh.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) +Parameters +========== +X + Type T. + Input tensor +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@13::Neg``. -def average_pool( - X: Var, - *, - auto_pad: str = "NOTSET", - ceil_mode: int = 0, - count_include_pad: int = 0, - kernel_shape: Iterable[int], - pads: Optional[Iterable[int]] = None, - strides: Optional[Iterable[int]] = None, -) -> Var: - r""" - AveragePool consumes an input tensor X and applies average pooling - across the tensor according to kernel sizes, stride sizes, and pad - lengths. average pooling consisting of computing the average on all - values of a subset of the input tensor according to the kernel size and - downsampling the data into the output tensor Y for further processing. - The output spatial shape will be following: - - :: - - output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1) - - or - - :: - - output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1) - - if ceil_mode is enabled - - :: - - * pad_shape[i] is sum of pads along axis i - - ``auto_pad`` is a DEPRECATED attribute. If you are using them currently, - the output spatial shape will be following when ceil_mode is enabled: - - :: - - VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i]) - SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) - - or when ceil_mode is disabled: - - :: - - VALID: output_spatial_shape[i] = floor((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i]) - SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = floor(input_spatial_shape[i] / strides_spatial_shape[i]) - - And pad shape will be following if ``SAME_UPPER`` or ``SAME_LOWER``: - - :: - - pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i] - - The output of each pooling window is divided by the number of elements - (exclude pad when attribute count_include_pad is zero). - - Parameters - ========== - X - Type T. - Input data tensor from the previous operator; dimensions for image case - are (N x C x H x W), where N is the batch size, C is the number of - channels, and H and W are the height and the width of the data. For non - image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), - where N is the batch size. Optionally, if dimension denotation is in - effect, the operation expects the input data tensor to arrive with the - dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, - DATA_FEATURE ...]. - auto_pad - Attribute. - auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where - default value is NOTSET, which means explicit padding is used. - SAME_UPPER or SAME_LOWER mean pad the input so that - ``output_shape[i] = ceil(input_shape[i] / strides[i])`` for each axis - ``i``. The padding is split between the two sides equally or almost - equally (depending on whether it is even or odd). In case the padding is - an odd number, the extra padding is added at the end for SAME_UPPER and - at the beginning for SAME_LOWER. - ceil_mode - Attribute. - Whether to use ceil or floor (default) to compute the output shape. - count_include_pad - Attribute. - Whether include pad pixels when calculating values for the edges. - Default is 0, doesn't count include pad. - kernel_shape - Attribute. - The size of the kernel along each axis. - pads - Attribute. - Padding for the beginning and ending along each spatial axis, it can - take any value greater than or equal to 0. The value represent the - number of pixels added to the beginning and end part of the - corresponding axis. ``pads`` format should be as follow [x1_begin, - x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels - added at the beginning of axis ``i`` and xi_end, the number of pixels - added at the end of axis ``i``. This attribute cannot be used - simultaneously with auto_pad attribute. If not present, the padding - defaults to 0 along start and end of each spatial axis. - strides - Attribute. - Stride along each spatial axis. If not present, the stride defaults to 1 - along each spatial axis. - - Returns - ======= - Y : Var - Type T. - Output data tensor from average or max pooling across the input tensor. - Dimensions will vary based on various kernel, stride, and pad sizes. - Floor value of the dimension is used - - Notes - ===== - Signature: ``ai.onnx@11::AveragePool``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)` """ - return ( - _AveragePool( - _AveragePool.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), - count_include_pad=AttrInt64( - count_include_pad, name="count_include_pad" - ), - kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _AveragePool.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) + return _Neg( + _Neg.Attributes( + ), _Neg.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y -def batch_normalization( - X: Var, - scale: Var, - B: Var, - input_mean: Var, - input_var: Var, - *, - epsilon: float = 9.999999747378752e-06, - momentum: float = 0.8999999761581421, - training_mode: int = 0, -) -> tuple[Var, Var, Var]: +def negative_log_likelihood_loss(input: Var, target: Var, weight: Optional[Var] = None, *, ignore_index: Optional[int] = None, reduction: str = "mean", ) -> Var: r""" - Carries out batch normalization as described in the paper - https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, - There are five required inputs 'X', 'scale', 'B', 'input_mean' and - 'input_var'. Note that 'input_mean' and 'input_var' are expected to be - the estimated statistics in inference mode (training_mode=False, - default), and the running statistics in training mode - (training_mode=True). There are multiple cases for the number of - outputs, which we list below: - - - Output case #1: Y, running_mean, running_var (training_mode=True) - - Output case #2: Y (training_mode=False) - - When training_mode=False, extra outputs are invalid. The outputs are - updated as follows when training_mode=True: - - :: - - running_mean = input_mean * momentum + current_mean * (1 - momentum) - running_var = input_var * momentum + current_var * (1 - momentum) - - Y = (X - current_mean) / sqrt(current_var + epsilon) * scale + B - - where: - - :: - - current_mean = ReduceMean(X, axis=all_except_channel_index) - current_var = ReduceVar(X, axis=all_except_channel_index) - - Notice that ``ReduceVar`` refers to the population variance, and it - equals to ``sum(sqrd(x_i - x_avg)) / N`` where ``N`` is the population - size (this formula does not use sample size ``N - 1``). - - The computation of ReduceMean and ReduceVar uses float to avoid overflow - for float16 inputs. - - When training_mode=False: - - :: - - Y = (X - input_mean) / sqrt(input_var + epsilon) * scale + B - - For previous (depreciated) non-spatial cases, implementors are suggested - to flatten the input shape to (N x C \* D1 \* D2 \* ... \* Dn) before a - BatchNormalization Op. This operator has **optional** inputs/outputs. - See `the doc `__ for - more details about the representation of optional arguments. An empty - string may be used in the place of an actual argument's name to indicate - a missing argument. Trailing optional arguments (those not followed by - an argument that is present) may also be simply omitted. - - Parameters - ========== - X - Type T. - Input data tensor from the previous operator; dimensions are in the form - of (N x C x D1 x D2 ... Dn), where N is the batch size, C is the number - of channels. Statistics are computed for every channel of C over N and - D1 to Dn dimensions. For image data, input dimensions become (N x C x H - x W). The op also accepts single dimension input of size N in which case - C is assumed to be 1 - scale - Type T1. - Scale tensor of shape (C). - B - Type T1. - Bias tensor of shape (C). - input_mean - Type T2. - running (training) or estimated (testing) mean tensor of shape (C). - input_var - Type T2. - running (training) or estimated (testing) variance tensor of shape (C). - epsilon - Attribute. - The epsilon value to use to avoid division by zero. - momentum - Attribute. - Factor used in computing the running mean and variance.e.g., - running_mean = running_mean \* momentum + mean \* (1 - momentum). - training_mode - Attribute. - If set to true, it indicates BatchNormalization is being used for - training, and outputs 1 and 2 are to be computed. - - Returns - ======= - Y : Var - Type T. - The output tensor of the same shape as X - running_mean : Var - Type T2. - The running mean after the BatchNormalization operator. - running_var : Var - Type T2. - The running variance after the BatchNormalization operator. This op uses - the population size (N) for calculating variance, and not the sample - size N-1. - - Notes - ===== - Signature: ``ai.onnx@15::BatchNormalization``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _BatchNormalization( - _BatchNormalization.Attributes( - epsilon=AttrFloat32(epsilon, name="epsilon"), - momentum=AttrFloat32(momentum, name="momentum"), - training_mode=AttrInt64(training_mode, name="training_mode"), - ), - _BatchNormalization.Inputs( - X=unwrap_vars(X), - scale=unwrap_vars(scale), - B=unwrap_vars(B), - input_mean=unwrap_vars(input_mean), - input_var=unwrap_vars(input_var), - ), - ) - .get_output_vars( - X=get_value(X), - scale=get_value(scale), - B=get_value(B), - input_mean=get_value(input_mean), - input_var=get_value(input_var), - ) - ._unpack_to_any() - ) +A NegativeLogLikelihoodLoss operator computes (weighted) negative log +likelihood loss. Its "input" tensor has the shape of (N, C, d1, d2, ..., +dk) where k >= 0. The "input" tensor contains log-probabilities for +input[n, :, d_1, d_2,..., d_k] being in a class of [0, C). The +operator's "target" input tensor has the shape of (N, d1, d2, ..., dk). +It encodes class labels (one of C classes) or it may contain a special +value (indicated by an attribute ignore_index) for N x d1 x d2 x ... x +dk samples. The loss value for input[n, :, d_1, d_2,...d_k] being +classified as class c = target[n][d_1][d_2]...[d_k] is computed as: + +:: + + loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k]. + +When an optional "weight" is provided, the sample loss is calculated as: + +:: + + loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c]. + +loss is zero for the case when target-value equals ignore_index. + +:: + + loss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index + +If "reduction" attribute is set to "none", the operator's output will be +the above loss with shape (N, d1, d2, ..., dk). If "reduction" attribute +is set to "mean" (the default attribute value), the output loss is +(weight) averaged: + +:: + + mean(loss), if "weight" is not provided, + +or if weight is provided, + +:: + + sum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples. + +If "reduction" attribute is set to "sum", the output is a scalar: +``sum(loss)``. + +See also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss. + +Example 1: + +:: + + // negative log likelihood loss, "none" reduction + N, C, d1 = 2, 3, 2 + input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], + [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] + target = [[2, 1], [0, 2]] + + loss = np.zeros((N, d1)) + for n in range(N): + for d_1 in range(d1): + c = target[n][d_1] + loss[n][d_1] = -input[n][c][d_1] + + // print(loss) + // [[-3. -2.] + // [-0. -2.]] + +Example 2: + +:: + + // weighted negative log likelihood loss, sum reduction + N, C, d1 = 2, 3, 2 + input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], + [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] + target = [[2, 1], [0, 2]] + weight = [0.2, 0.3, 0.1] + loss = np.zeros((N, d1)) + for n in range(N): + for d_1 in range(d1): + c = target[n][d_1] + loss[n][d_1] = -input[n][c][d_1] * weight[c] + + loss = np.sum(loss) + // print(loss) + // -1.1 + +Example 3: + +:: + + // weighted negative log likelihood loss, mean reduction + N, C, d1 = 2, 3, 2 + input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], + [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] + target = [[2, 1], [0, 2]] + weight = [0.2, 0.3, 0.1] + loss = np.zeros((N, d1)) + weight_total = 0 + for n in range(N): + for d_1 in range(d1): + c = target[n][d_1] + loss[n][d_1] = -input[n][c][d_1] * weight[c] + weight_total = weight_total + weight[c] + + loss = np.sum(loss) / weight_total + // print(loss) + // -1.57 + +Parameters +========== +input + Type T. + Input tensor of shape (N, C) or (N, C, d1, d2, ..., dk). +target + Type Tind. + Target tensor of shape (N) or (N, d1, d2, ..., dk). Target element value + shall be in range of [0, C). If ignore_index is specified, it may have a + value outside [0, C) and the target values should either be in the range + [0, C) or have the value ignore_index. +weight + Type T. + Optional rescaling weight tensor. If given, it has to be a tensor of + size C. Otherwise, it is treated as if having all ones. +ignore_index + Attribute. + Specifies a target value that is ignored and does not contribute to the + input gradient. It's an optional value. +reduction + Attribute. + Type of reduction to apply to loss: none, sum, mean (default). 'none': + the output is the loss for each sample. 'sum': the output will be + summed. 'mean': the sum of the output will be divided by the sum of + applied weights. + +Returns +======= +loss : Var + Type T. + The negative log likelihood loss + +Notes +===== +Signature: ``ai.onnx@13::NegativeLogLikelihoodLoss``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + - Tind: `tensor(int32)`, `tensor(int64)` + """ + return _NegativeLogLikelihoodLoss( + _NegativeLogLikelihoodLoss.Attributes( + ignore_index=AttrInt64.maybe(ignore_index, name="ignore_index"), + reduction=AttrString(reduction, name="reduction"), + ), _NegativeLogLikelihoodLoss.Inputs( + input=unwrap_vars(input), target=unwrap_vars(target), weight=unwrap_vars(weight), ), ).get_output_vars( + input=get_value(input), target=get_value(target), weight=get_value(weight), ).loss + + +def non_max_suppression(boxes: Var, scores: Var, max_output_boxes_per_class: Optional[Var] = None, iou_threshold: Optional[Var] = None, score_threshold: Optional[Var] = None, *, center_point_box: int = 0, ) -> Var: + r""" +Filter out boxes that have high intersection-over-union (IOU) overlap +with previously selected boxes. Bounding boxes with score less than +score_threshold are removed. Bounding box format is indicated by +attribute center_point_box. Note that this algorithm is agnostic to +where the origin is in the coordinate system and more generally is +invariant to orthogonal transformations and translations of the +coordinate system; thus translating or reflections of the coordinate +system result in the same boxes being selected by the algorithm. The +selected_indices output is a set of integers indexing into the input +collection of bounding boxes representing the selected boxes. The +bounding box coordinates corresponding to the selected indices can then +be obtained using the Gather or GatherND operation. + +Parameters +========== +boxes + Type tensor(float). + An input tensor with shape [num_batches, spatial_dimension, 4]. The + single box data format is indicated by center_point_box. +scores + Type tensor(float). + An input tensor with shape [num_batches, num_classes, spatial_dimension] +max_output_boxes_per_class + Type tensor(int64). + Integer representing the maximum number of boxes to be selected per + batch per class. It is a scalar. Default to 0, which means no output. +iou_threshold + Type tensor(float). + Float representing the threshold for deciding whether boxes overlap too + much with respect to IOU. It is scalar. Value range [0, 1]. Default to + 0. +score_threshold + Type tensor(float). + Float representing the threshold for deciding when to remove boxes based + on score. It is a scalar. +center_point_box + Attribute. + Integer indicate the format of the box data. The default is 0. 0 - the + box data is supplied as [y1, x1, y2, x2] where (y1, x1) and (y2, x2) are + the coordinates of any diagonal pair of box corners and the coordinates + can be provided as normalized (i.e., lying in the interval [0, 1]) or + absolute. Mostly used for TF models. 1 - the box data is supplied as + [x_center, y_center, width, height]. Mostly used for Pytorch models. + +Returns +======= +selected_indices : Var + Type tensor(int64). + selected indices from the boxes tensor. [num_selected_indices, 3], the + selected index format is [batch_index, class_index, box_index]. + +Notes +===== +Signature: ``ai.onnx@11::NonMaxSuppression``. + + """ + return _NonMaxSuppression( + _NonMaxSuppression.Attributes( + center_point_box=AttrInt64(center_point_box, name="center_point_box"), + ), _NonMaxSuppression.Inputs( + boxes=unwrap_vars(boxes), scores=unwrap_vars(scores), max_output_boxes_per_class=unwrap_vars(max_output_boxes_per_class), iou_threshold=unwrap_vars(iou_threshold), score_threshold=unwrap_vars(score_threshold), ), ).get_output_vars( + boxes=get_value(boxes), scores=get_value(scores), max_output_boxes_per_class=get_value(max_output_boxes_per_class), iou_threshold=get_value(iou_threshold), score_threshold=get_value(score_threshold), ).selected_indices + + +def non_zero(X: Var, ) -> Var: + r""" +Returns the indices of the elements that are non-zero (in row-major +order - by dimension). NonZero behaves similar to numpy.nonzero: +https://docs.scipy.org/doc/numpy/reference/generated/numpy.nonzero.html, +but for scalar input, NonZero produces output shape (0, N) instead of +(1, N), which is different from Numpy's behavior. + +Parameters +========== +X + Type T. + input +Returns +======= +Y : Var + Type tensor(int64). + output + +Notes +===== +Signature: ``ai.onnx@13::NonZero``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _NonZero( + _NonZero.Attributes( + ), _NonZero.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def not_(X: Var, ) -> Var: + r""" +Returns the negation of the input tensor element-wise. + +Parameters +========== +X + Type T. + Input tensor + +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@1::Not``. + +Type constraints: + - T: `tensor(bool)` + """ + return _Not( + _Not.Attributes( + ), _Not.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def one_hot(indices: Var, depth: Var, values: Var, *, axis: int = -1, ) -> Var: + r""" +Produces a one-hot tensor based on inputs. The locations represented by +the index values in the 'indices' input tensor will have 'on_value' and +the other locations will have 'off_value' in the output tensor, where +'on_value' and 'off_value' are specified as part of required input +argument 'values', which is a two-element tensor of format [off_value, +on_value]. The rank of the output tensor will be one greater than the +rank of the input tensor. The additional dimension is for one-hot +representation. The additional dimension will be inserted at the +position specified by 'axis'. If 'axis' is not specified then then +additional dimension will be inserted as the innermost dimension, i.e. +axis=-1. The size of the additional dimension is specified by required +scalar input 'depth'. The type of the output tensor is the same as the +type of the 'values' input. Any entries in the 'indices' input tensor +with values outside the range [-depth, depth-1] will result in one-hot +representation with all 'off_value' values in the output tensor. + +:: + + when axis = 0: + output[input[i, j, k], i, j, k] = 1 for all i, j, k and 0 otherwise. + + when axis = -1: + output[i, j, k, input[i, j, k]] = 1 for all i, j, k and 0 otherwise. + +Parameters +========== +indices + Type T1. + Input tensor containing indices. Any entries in the 'indices' input + tensor with values outside the range [-depth, depth-1] will result in + one-hot representation with all 'off_value' values in the output + tensor.In case 'indices' is of non-integer type, the values will be + casted to int64 before use. +depth + Type T2. + Scalar or Rank 1 tensor containing exactly one element, specifying the + number of classes in one-hot tensor. This is also the size of the + one-hot dimension (specified by 'axis' attribute) added on in the output + tensor. The values in the 'indices' input tensor are expected to be in + the range [-depth, depth-1]. In case 'depth' is of non-integer type, it + will be casted to int64 before use. +values + Type T3. + Rank 1 tensor containing exactly two elements, in the format [off_value, + on_value], where 'on_value' is the value used for filling locations + specified in 'indices' input tensor, and 'off_value' is the value used + for filling locations other than those specified in 'indices' input + tensor. +axis + Attribute. + (Optional) Axis along which one-hot representation in added. Default: + axis=-1. axis=-1 means that the additional dimension will be inserted as + the innermost/last dimension in the output tensor. Negative value means + counting dimensions from the back. Accepted range is [-r-1, r] where r = + rank(indices). + +Returns +======= +output : Var + Type T3. + Tensor of rank one greater than input tensor 'indices', i.e. + rank(output) = rank(indices) + 1. The data type for the elements of the + output tensor is the same as the type of input 'values' is used. + +Notes +===== +Signature: ``ai.onnx@11::OneHot``. + +Type constraints: + - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T3: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _OneHot( + _OneHot.Attributes( + axis=AttrInt64(axis, name="axis"), + ), _OneHot.Inputs( + indices=unwrap_vars(indices), depth=unwrap_vars(depth), values=unwrap_vars(values), ), ).get_output_vars( + indices=get_value(indices), depth=get_value(depth), values=get_value(values), ).output + + +def optional(input: Optional[Var] = None, *, type: Optional[Type] = None, ) -> Var: + r""" +Constructs an optional-type value containing either an empty optional of +a certain type specified by the attribute, or a non-empty value +containing the input element. + +Parameters +========== +input + Type V. + The input element. +type + Attribute. + Type of the element in the optional output + +Returns +======= +output : Var + Type O. + The optional output enclosing the input element. + +Notes +===== +Signature: ``ai.onnx@15::Optional``. + +Type constraints: + - V: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))` + """ + return _Optional( + _Optional.Attributes( + type=AttrType.maybe(type, name="type"), + ), _Optional.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def optional_get_element(input: Var, ) -> Var: + r""" +Outputs the element in the optional-type input. It is an error if the +input value does not have an element and the behavior is undefined in +this case. + +Parameters +========== +input + Type O. + The optional input. + +Returns +======= +output : Var + Type V. + Output element in the optional input. + +Notes +===== +Signature: ``ai.onnx@15::OptionalGetElement``. + +Type constraints: + - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))` + - V: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _OptionalGetElement( + _OptionalGetElement.Attributes( + ), _OptionalGetElement.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def optional_has_element(input: Var, ) -> Var: + r""" +Returns true if the optional-type input contains an element. If it is an +empty optional-type, this op returns false. + +Parameters +========== +input + Type O. + The optional input. + +Returns +======= +output : Var + Type B. + A scalar boolean tensor. If true, it indicates that optional-type input + contains an element. Otherwise, it is empty. + +Notes +===== +Signature: ``ai.onnx@15::OptionalHasElement``. + +Type constraints: + - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))` + - B: `tensor(bool)` + """ + return _OptionalHasElement( + _OptionalHasElement.Attributes( + ), _OptionalHasElement.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def or_(A: Var, B: Var, ) -> Var: + r""" +Returns the tensor resulted from performing the ``or`` logical operation +elementwise on the input tensors ``A`` and ``B`` (with Numpy-style +broadcasting support). + +This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. -def bernoulli( - input: Var, - *, - dtype: Optional[npt.DTypeLike] = None, - seed: Optional[float] = None, -) -> Var: - r""" - Draws binary random numbers (0 or 1) from a Bernoulli distribution. The - input tensor should be a tensor containing probabilities p (a value in - the range [0,1]) to be used for drawing the binary random number, where - an output of 1 is produced with probability p and an output of 0 is - produced with probability (1-p). +Parameters +========== +A + Type T. + First input operand for the logical operator. +B + Type T. + Second input operand for the logical operator. - This operator is non-deterministic and may not produce the same values - in different implementations (even if a seed is specified). +Returns +======= +C : Var + Type T1. + Result tensor. - Parameters - ========== - input - Type T1. - All values in input have to be in the range:[0, 1]. - dtype - Attribute. - The data type for the elements of the output tensor. if not specified, - we will use the data type of the input tensor. - seed - Attribute. - (Optional) Seed to the random generator, if not specified we will auto - generate one. - - Returns - ======= - output : Var - Type T2. - The returned output tensor only has values 0 or 1, same shape as input - tensor. - - Notes - ===== - Signature: ``ai.onnx@15::Bernoulli``. - - Type constraints: - - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` - - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +Notes +===== +Signature: ``ai.onnx@7::Or``. + +Type constraints: + - T: `tensor(bool)` + - T1: `tensor(bool)` """ - return ( - _Bernoulli( - _Bernoulli.Attributes( - dtype=AttrDtype.maybe(dtype, name="dtype"), - seed=AttrFloat32.maybe(seed, name="seed"), - ), - _Bernoulli.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) + return _Or( + _Or.Attributes( + ), _Or.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), ), ).get_output_vars( + A=get_value(A), B=get_value(B), ).C -def bit_shift( - X: Var, - Y: Var, - *, - direction: str, -) -> Var: +def prelu(X: Var, slope: Var, ) -> Var: r""" - Bitwise shift operator performs element-wise operation. For each input - element, if the attribute "direction" is "RIGHT", this operator moves - its binary representation toward the right side so that the input value - is effectively decreased. If the attribute "direction" is "LEFT", bits - of binary representation moves toward the left side, which results the - increase of its actual value. The input X is the tensor to be shifted - and another input Y specifies the amounts of shifting. For example, if - "direction" is "Right", X is [1, 4], and S is [1, 1], the corresponding - output Z would be [0, 2]. If "direction" is "LEFT" with X=[1, 2] and - S=[1, 2], the corresponding output Y would be [2, 8]. - - Because this operator supports Numpy-style broadcasting, X's and Y's - shapes are not necessarily identical. This operator supports - **multidirectional (i.e., Numpy-style) broadcasting**; for more details - please check `the - doc `__. - - Parameters - ========== - X - Type T. - First operand, input to be shifted. - Y - Type T. - Second operand, amounts of shift. - direction - Attribute. - Direction of moving bits. It can be either "RIGHT" (for right shift) or - "LEFT" (for left shift). - - Returns - ======= - Z : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@11::BitShift``. - - Type constraints: - - T: `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _BitShift( - _BitShift.Attributes( - direction=AttrString(direction, name="direction"), - ), - _BitShift.Inputs( - X=unwrap_vars(X), - Y=unwrap_vars(Y), - ), - ) - .get_output_vars( - X=get_value(X), - Y=get_value(Y), - ) - .Z - ) +PRelu takes input data (Tensor) and slope tensor as input, and +produces one output data (Tensor) where the function +``f(x) = slope * x for x < 0``, ``f(x) = x for x >= 0``., is applied to +the data tensor elementwise. This operator supports **unidirectional +broadcasting** (tensor slope should be unidirectional broadcastable to +input tensor X); for more details please check `the +doc `__. +Parameters +========== +X + Type T. + Input tensor +slope + Type T. + Slope tensor. The shape of slope can be smaller than first input X; if + so, its shape must be unidirectional broadcastable to X + +Returns +======= +Y : Var + Type T. + Output tensor (same size as X) -def blackman_window( - size: Var, - *, - output_datatype: int = 1, - periodic: int = 1, -) -> Var: - r""" - Generates a Blackman window as described in the paper - https://ieeexplore.ieee.org/document/1455106. - - Parameters - ========== - size - Type T1. - A scalar value indicating the length of the window. - output_datatype - Attribute. - The data type of the output tensor. Strictly must be one of the values - from DataType enum in TensorProto whose values correspond to T2. The - default value is 1 = FLOAT. - periodic - Attribute. - If 1, returns a window to be used as periodic function. If 0, return a - symmetric window. When 'periodic' is specified, hann computes a window - of length size + 1 and returns the first size points. The default value - is 1. - - Returns - ======= - output : Var - Type T2. - A Blackman window with length: size. The output has the shape: [size]. - - Notes - ===== - Signature: ``ai.onnx@17::BlackmanWindow``. - - Type constraints: - - T1: `tensor(int32)`, `tensor(int64)` - - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +Notes +===== +Signature: ``ai.onnx@16::PRelu``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` + """ + return _PRelu( + _PRelu.Attributes( + ), _PRelu.Inputs( + X=unwrap_vars(X), slope=unwrap_vars(slope), ), ).get_output_vars( + X=get_value(X), slope=get_value(slope), ).Y + + +def pad(data: Var, pads: Var, constant_value: Optional[Var] = None, *, mode: str = "constant", ) -> Var: + r""" +Given a tensor containing the data to be padded (``data``), a tensor +containing the number of start and end pad values for axis (``pads``), +(optionally) a ``mode``, and (optionally) ``constant_value``, a padded +tensor (``output``) is generated. + +The three supported ``modes`` are (similar to corresponding modes +supported by ``numpy.pad``): + +1) ``constant``\ (default) - pads with a given constant value as + specified by ``constant_value`` (which defaults to 0, empty string, + or False) + +2) ``reflect`` - pads with the reflection of the vector mirrored on the + first and last values of the vector along each axis + +3) ``edge`` - pads with the edge values of array + +Example 1 (``constant`` mode): Insert 0 pads to the beginning of the +second dimension. + +data = [ [1.0, 1.2], [2.3, 3.4], [4.5, 5.7], ] + +pads = [0, 2, 0, 0] + +mode = 'constant' + +constant_value = 0.0 + +output = [ [0.0, 0.0, 1.0, 1.2], [0.0, 0.0, 2.3, 3.4], [0.0, 0.0, 4.5, +5.7], ] + +Example 2 (``reflect`` mode): data = [ [1.0, 1.2], [2.3, 3.4], [4.5, +5.7], ] + +pads = [0, 2, 0, 0] + +mode = 'reflect' + +output = [ [1.0, 1.2, 1.0, 1.2], [2.3, 3.4, 2.3, 3.4], [4.5, 5.7, 4.5, +5.7], ] + +Example 3 (``edge`` mode): data = [ [1.0, 1.2], [2.3, 3.4], [4.5, 5.7], +] + +pads = [0, 2, 0, 0] + +mode = 'edge' + +output = [ [1.0, 1.0, 1.0, 1.2], [2.3, 2.3, 2.3, 3.4], [4.5, 4.5, 4.5, +5.7], ] + +Parameters +========== +data + Type T. + Input tensor. +pads + Type tensor(int64). + Tensor of integers indicating the number of padding elements to add or + remove (if negative) at the beginning and end of each axis. For 2D input + tensor, it is the number of pixels. ``pads`` should be a 1D tensor of + shape [2 \* input_rank]. ``pads`` format should be: [x1_begin, + x2_begin,...,x1_end, x2_end,...], where xi_begin is the number of pad + values added at the beginning of axis ``i`` and xi_end, the number of + pad values added at the end of axis ``i``. +constant_value + Type T. + (Optional) A scalar value to be used if the mode chosen is ``constant`` + (by default it is 0, empty string or False). +mode + Attribute. + Supported modes: ``constant``\ (default), ``reflect``, ``edge`` + +Returns +======= +output : Var + Type T. + Tensor after padding. + +Notes +===== +Signature: ``ai.onnx@13::Pad``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Pad( + _Pad.Attributes( + mode=AttrString(mode, name="mode"), + ), _Pad.Inputs( + data=unwrap_vars(data), pads=unwrap_vars(pads), constant_value=unwrap_vars(constant_value), ), ).get_output_vars( + data=get_value(data), pads=get_value(pads), constant_value=get_value(constant_value), ).output + + +def pow(X: Var, Y: Var, ) -> Var: + r""" +Pow takes input data (Tensor) and exponent Tensor, and produces one +output data (Tensor) where the function ``f(x) = x^exponent``, is +applied to the data tensor elementwise. This operator supports +**multidirectional (i.e., Numpy-style) broadcasting**; for more details +please check `the +doc `__. + +Parameters +========== +X + Type T. + First operand, base of the exponent. +Y + Type T1. + Second operand, power of the exponent. + +Returns +======= +Z : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@15::Pow``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)` + - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Pow( + _Pow.Attributes( + ), _Pow.Inputs( + X=unwrap_vars(X), Y=unwrap_vars(Y), ), ).get_output_vars( + X=get_value(X), Y=get_value(Y), ).Z + + +def qlinear_conv(x: Var, x_scale: Var, x_zero_point: Var, w: Var, w_scale: Var, w_zero_point: Var, y_scale: Var, y_zero_point: Var, B: Optional[Var] = None, *, auto_pad: str = "NOTSET", dilations: Optional[Iterable[int]] = None, group: int = 1, kernel_shape: Optional[Iterable[int]] = None, pads: Optional[Iterable[int]] = None, strides: Optional[Iterable[int]] = None, ) -> Var: + r""" +The convolution operator consumes a quantized input tensor, its scale +and zero point, a quantized filter, its scale and zero point, and +output's scale and zero point, and computes the quantized output. Each +scale and zero-point pair must have same shape. It means they must be +either scalars (per tensor) or 1-D tensors (per output channel). Each +input or output and its related zero point must have same type. When +bias is present it must be quantized using scale = input scale \* weight +scale and zero point as 0. + +Parameters +========== +x + Type T1. + Input data tensor from previous layer; has size (N x C x H x W), where N + is the batch size, C is the number of channels, and H and W are the + height and width. Note that this is for the 2D image. Otherwise the size + is (N x C x D1 x D2 ... x Dn). Optionally, if dimension denotation is in + effect, the operation expects input data tensor to arrive with the + dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. +x_scale + Type tensor(float). + Scale tensor for input 'x'. It's a scalar, which means a + per-tensor/layer quantization. +x_zero_point + Type T1. + Zero point tensor for input 'x'. It's a scalar, which means a + per-tensor/layer quantization. +w + Type T2. + The weight tensor that will be used in the convolutions; has size (M x + C/group x kH x kW), where C is the number of channels, and kH and kW are + the height and width of the kernel, and M is the number of feature maps. + For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x + k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. + Optionally, if dimension denotation is in effect, the operation expects + the weight tensor to arrive with the dimension denotation of + [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL + ...]. X.shape[1] == (W.shape[1] \* group) == C (assuming zero based + indices for the shape array). Or in other words FILTER_IN_CHANNEL should + be equal to DATA_CHANNEL. +w_scale + Type tensor(float). + Scale tensor for input 'w'. It could be a scalar or a 1-D tensor, which + means a per-tensor/layer or per output channel quantization. If it's a + 1-D tensor, its number of elements should be equal to the number of + output channels (M). +w_zero_point + Type T2. + Zero point tensor for input 'w'. It could be a scalar or a 1-D tensor, + which means a per-tensor/layer or per output channel quantization. If + it's a 1-D tensor, its number of elements should be equal to the number + of output channels (M). +y_scale + Type tensor(float). + Scale tensor for output 'y'. It's a scalar, which means a + per-tensor/layer quantization. +y_zero_point + Type T3. + Zero point tensor for output 'y'. It's a scalar, which means a + per-tensor/layer quantization. +B + Type T4. + Optional 1D bias to be added to the convolution, has size of M. Bias + must be quantized using scale = x_scale \* w_scale and zero_point = 0 +auto_pad + Attribute. + auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where + default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that + ``output_shape[i] = ceil(input_shape[i] / strides[i])`` for each axis + ``i``. The padding is split between the two sides equally or almost + equally (depending on whether it is even or odd). In case the padding is + an odd number, the extra padding is added at the end for SAME_UPPER and + at the beginning for SAME_LOWER. +dilations + Attribute. + dilation value along each spatial axis of the filter. If not present, + the dilation defaults to 1 along each spatial axis. +group + Attribute. + number of groups input channels and output channels are divided into. + default is 1. +kernel_shape + Attribute. + The shape of the convolution kernel. If not present, should be inferred + from input 'w'. +pads + Attribute. + Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0.The value represent the number + of pixels added to the beginning and end part of the corresponding + axis.\ ``pads`` format should be as follow [x1_begin, x2_begin...x1_end, + x2_end,...], where xi_begin the number ofpixels added at the beginning + of axis ``i`` and xi_end, the number of pixels added at the end of axis + ``i``.This attribute cannot be used simultaneously with auto_pad + attribute. If not present, the padding defaultsto 0 along start and end + of each spatial axis. +strides + Attribute. + Stride along each spatial axis. If not present, the stride defaults to 1 + along each spatial axis. + +Returns +======= +y : Var + Type T3. + Output data tensor that contains the result of the convolution. The + output dimensions are functions of the kernel size, stride size, and pad + lengths. + +Notes +===== +Signature: ``ai.onnx@10::QLinearConv``. + +Type constraints: + - T1: `tensor(int8)`, `tensor(uint8)` + - T2: `tensor(int8)`, `tensor(uint8)` + - T3: `tensor(int8)`, `tensor(uint8)` + - T4: `tensor(int32)` + """ + return _QLinearConv( + _QLinearConv.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + group=AttrInt64(group, name="group"), + kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), _QLinearConv.Inputs( + x=unwrap_vars(x), x_scale=unwrap_vars(x_scale), x_zero_point=unwrap_vars(x_zero_point), w=unwrap_vars(w), w_scale=unwrap_vars(w_scale), w_zero_point=unwrap_vars(w_zero_point), y_scale=unwrap_vars(y_scale), y_zero_point=unwrap_vars(y_zero_point), B=unwrap_vars(B), ), ).get_output_vars( + x=get_value(x), x_scale=get_value(x_scale), x_zero_point=get_value(x_zero_point), w=get_value(w), w_scale=get_value(w_scale), w_zero_point=get_value(w_zero_point), y_scale=get_value(y_scale), y_zero_point=get_value(y_zero_point), B=get_value(B), ).y + + +def qlinear_matmul(a: Var, a_scale: Var, a_zero_point: Var, b: Var, b_scale: Var, b_zero_point: Var, y_scale: Var, y_zero_point: Var, ) -> Var: + r""" +Matrix product that behaves like numpy.matmul: +https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. +It consumes two quantized input tensors, their scales and zero points, +scale and zero point of output, and computes the quantized output. The +quantization formula is y = saturate((x / y_scale) + y_zero_point). For +(x / y_scale), it is rounding to nearest ties to even. Refer to +https://en.wikipedia.org/wiki/Rounding for details. Scale and zero point +must have same shape. They must be either scalar (per tensor) or N-D +tensor (per row for 'a' and per column for 'b'). Scalar refers to per +tensor quantization whereas N-D refers to per row or per column +quantization. If the input is 2D of shape [M, K] then zero point and +scale tensor may be an M element vector [v_1, v_2, ..., v_M] for per row +quantization and K element vector of shape [v_1, v_2, ..., v_K] for per +column quantization. If the input is N-D tensor with shape [D1, D2, M, +K] then zero point and scale tensor may have shape [D1, D2, M, 1] for +per row quantization and shape [D1, D2, 1, K] for per column +quantization. Production must never overflow, and accumulation may +overflow if and only if in 32 bits. + +Parameters +========== +a + Type T1. + N-dimensional quantized matrix a +a_scale + Type tensor(float). + scale of quantized input a +a_zero_point + Type T1. + zero point of quantized input a +b + Type T2. + N-dimensional quantized matrix b +b_scale + Type tensor(float). + scale of quantized input b +b_zero_point + Type T2. + zero point of quantized input b +y_scale + Type tensor(float). + scale of quantized output y +y_zero_point + Type T3. + zero point of quantized output y + +Returns +======= +y : Var + Type T3. + Quantized matrix multiply results from a \* b + +Notes +===== +Signature: ``ai.onnx@10::QLinearMatMul``. + +Type constraints: + - T1: `tensor(int8)`, `tensor(uint8)` + - T2: `tensor(int8)`, `tensor(uint8)` + - T3: `tensor(int8)`, `tensor(uint8)` + """ + return _QLinearMatMul( + _QLinearMatMul.Attributes( + ), _QLinearMatMul.Inputs( + a=unwrap_vars(a), a_scale=unwrap_vars(a_scale), a_zero_point=unwrap_vars(a_zero_point), b=unwrap_vars(b), b_scale=unwrap_vars(b_scale), b_zero_point=unwrap_vars(b_zero_point), y_scale=unwrap_vars(y_scale), y_zero_point=unwrap_vars(y_zero_point), ), ).get_output_vars( + a=get_value(a), a_scale=get_value(a_scale), a_zero_point=get_value(a_zero_point), b=get_value(b), b_scale=get_value(b_scale), b_zero_point=get_value(b_zero_point), y_scale=get_value(y_scale), y_zero_point=get_value(y_zero_point), ).y + + +def quantize_linear(x: Var, y_scale: Var, y_zero_point: Optional[Var] = None, *, axis: int = 1, ) -> Var: + r""" +The linear quantization operator. It consumes a high precision tensor, a +scale, and a zero point to compute the low precision / quantized tensor. +The scale factor and zero point must have same shape, and can be either +a scalar for per-tensor / per layer quantization, or a 1-D tensor for +per-axis quantization. The quantization formula is y = saturate ((x / +y_scale) + y_zero_point). For saturation, it saturates to [0, 255] if +it's uint8, or [-128, 127] if it's int8. For (x / y_scale), it's +rounding to the nearest even. Refer to +https://en.wikipedia.org/wiki/Rounding for details. 'y_zero_point' and +'y' must have same type. + +Parameters +========== +x + Type T1. + N-D full precision Input tensor to be quantized. +y_scale + Type tensor(float). + Scale for doing quantization to get 'y'. It can be a scalar, which means + per-tensor/layer quantization, or a 1-D Tensor for per-axis + quantization. +y_zero_point + Type T2. + Zero point for doing quantization to get 'y'. Shape must match y_scale. + Default is uint8 with zero point of 0 if it's not specified. +axis + Attribute. + (Optional) The axis of the quantization dimension of the input tensor. + Ignored for per-tensor quantization. Negative value means counting + dimensions from the back. Accepted range is [-r, r-1] where r = + rank(input). + +Returns +======= +y : Var + Type T2. + N-D quantized output tensor. It has same shape as input 'x'. + +Notes +===== +Signature: ``ai.onnx@13::QuantizeLinear``. + +Type constraints: + - T1: `tensor(float)`, `tensor(int32)` + - T2: `tensor(int8)`, `tensor(uint8)` + """ + return _QuantizeLinear( + _QuantizeLinear.Attributes( + axis=AttrInt64(axis, name="axis"), + ), _QuantizeLinear.Inputs( + x=unwrap_vars(x), y_scale=unwrap_vars(y_scale), y_zero_point=unwrap_vars(y_zero_point), ), ).get_output_vars( + x=get_value(x), y_scale=get_value(y_scale), y_zero_point=get_value(y_zero_point), ).y + + +def rnn(X: Var, W: Var, R: Var, B: Optional[Var] = None, sequence_lens: Optional[Var] = None, initial_h: Optional[Var] = None, *, activation_alpha: Optional[Iterable[float]] = None, activation_beta: Optional[Iterable[float]] = None, activations: Iterable[str] = ('Tanh', 'Tanh'), clip: Optional[float] = None, direction: str = "forward", hidden_size: Optional[int] = None, layout: int = 0, ) -> tuple[Var, Var]: + r""" +Computes an one-layer simple RNN. This operator is usually supported via +some custom implementation such as CuDNN. + +Notations: + +- ``X`` - input tensor +- ``i`` - input gate +- ``t`` - time step (t-1 means previous time step) +- ``Wi`` - W parameter weight matrix for input gate +- ``Ri`` - R recurrence weight matrix for input gate +- ``Wbi`` - W parameter bias vector for input gate +- ``Rbi`` - R parameter bias vector for input gate +- ``WBi`` - W parameter weight matrix for backward input gate +- ``RBi`` - R recurrence weight matrix for backward input gate +- ``WBbi`` - WR bias vectors for backward input gate +- ``RBbi`` - RR bias vectors for backward input gate +- ``H`` - Hidden state +- ``num_directions`` - 2 if direction == bidirectional else 1 + +Activation functions: + +- Relu(x) - max(0, x) +- Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) +- Sigmoid(x) - 1/(1 + e^{-x}) + +NOTE: Below are optional + +- Affine(x) - alpha*x + beta +- LeakyRelu(x) - x if x >= 0 else alpha \* x +- ThresholdedRelu(x) - x if x >= alpha else 0 +- ScaledTanh(x) - alpha\ *Tanh(beta*\ x) +- HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) +- Elu(x) - x if x >= 0 else alpha*(e^x - 1) +- Softsign(x) - x/(1 + \|x\|) +- Softplus(x) - log(1 + e^x) + +Equations (Default: f=Tanh): + +- Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) This operator has + **optional** inputs/outputs. See `the + doc `__ for more + details about the representation of optional arguments. An empty + string may be used in the place of an actual argument's name to + indicate a missing argument. Trailing optional arguments (those not + followed by an argument that is present) may also be simply omitted. + +Parameters +========== +X + Type T. + The input sequences packed (and potentially padded) into one 3-D tensor + with the shape of ``[seq_length, batch_size, input_size]``. +W + Type T. + The weight tensor for input gate. Concatenation of ``Wi`` and ``WBi`` + (if bidirectional). The tensor has shape + ``[num_directions, hidden_size, input_size]``. +R + Type T. + The recurrence weight tensor. Concatenation of ``Ri`` and ``RBi`` (if + bidirectional). The tensor has shape + ``[num_directions, hidden_size, hidden_size]``. +B + Type T. + The bias tensor for input gate. Concatenation of ``[Wbi, Rbi]`` and + ``[WBbi, RBbi]`` (if bidirectional). The tensor has shape + ``[num_directions, 2*hidden_size]``. Optional: If not specified - + assumed to be 0. +sequence_lens + Type T1. + Optional tensor specifying lengths of the sequences in a batch. If not + specified - assumed all sequences in the batch to have length + ``seq_length``. It has shape ``[batch_size]``. +initial_h + Type T. + Optional initial value of the hidden. If not specified - assumed to be + 0. It has shape ``[num_directions, batch_size, hidden_size]``. +activation_alpha + Attribute. + Optional scaling values used by some activation functions. The values + are consumed in the order of activation functions, for example (f, g, h) + in LSTM. Default values are the same as of corresponding ONNX + operators.For example with LeakyRelu, the default alpha is 0.01. +activation_beta + Attribute. + Optional scaling values used by some activation functions. The values + are consumed in the order of activation functions, for example (f, g, h) + in LSTM. Default values are the same as of corresponding ONNX operators. +activations + Attribute. + One (or two if bidirectional) activation function for input gate. The + activation function must be one of the activation functions specified + above. Optional: Default ``Tanh`` if not specified. +clip + Attribute. + Cell clip threshold. Clipping bounds the elements of a tensor in the + range of [-threshold, +threshold] and is applied to the input of + activations. No clip if not specified. +direction + Attribute. + Specify if the RNN is forward, reverse, or bidirectional. Must be one of + forward (default), reverse, or bidirectional. +hidden_size + Attribute. + Number of neurons in the hidden layer +layout + Attribute. + The shape format of inputs X, initial_h and outputs Y, Y_h. If 0, the + following shapes are expected: X.shape = [seq_length, batch_size, + input_size], Y.shape = [seq_length, num_directions, batch_size, + hidden_size], initial_h.shape = Y_h.shape = [num_directions, batch_size, + hidden_size]. If 1, the following shapes are expected: X.shape = + [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, + num_directions, hidden_size], initial_h.shape = Y_h.shape = [batch_size, + num_directions, hidden_size]. + +Returns +======= +Y : Var + Type T. + A tensor that concats all the intermediate output values of the hidden. + It has shape ``[seq_length, num_directions, batch_size, hidden_size]``. +Y_h : Var + Type T. + The last output value of the hidden. It has shape + ``[num_directions, batch_size, hidden_size]``. + +Notes +===== +Signature: ``ai.onnx@14::RNN``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + - T1: `tensor(int32)` + """ + return _RNN( + _RNN.Attributes( + activation_alpha=AttrFloat32s.maybe(activation_alpha, name="activation_alpha"), + activation_beta=AttrFloat32s.maybe(activation_beta, name="activation_beta"), + activations=AttrStrings(activations, name="activations"), + clip=AttrFloat32.maybe(clip, name="clip"), + direction=AttrString(direction, name="direction"), + hidden_size=AttrInt64.maybe(hidden_size, name="hidden_size"), + layout=AttrInt64(layout, name="layout"), + ), _RNN.Inputs( + X=unwrap_vars(X), W=unwrap_vars(W), R=unwrap_vars(R), B=unwrap_vars(B), sequence_lens=unwrap_vars(sequence_lens), initial_h=unwrap_vars(initial_h), ), ).get_output_vars( + X=get_value(X), W=get_value(W), R=get_value(R), B=get_value(B), sequence_lens=get_value(sequence_lens), initial_h=get_value(initial_h), )._unpack_to_any() + + +def random_normal(*, dtype: npt.DTypeLike = np.float32, mean: float = 0.0, scale: float = 1.0, seed: Optional[float] = None, shape: Iterable[int], ) -> Var: + r""" +Generate a tensor with random values drawn from a normal distribution. +The shape of the tensor is specified by the ``shape`` argument and the +parameter of the normal distribution specified by ``mean`` and +``scale``. + +The data type is specified by the 'dtype' argument. The 'dtype' argument +must be one of the data types specified in the 'DataType' enum field in +the TensorProto message. + +Parameters +========== +dtype + Attribute. + The data type for the elements of the output tensor. Default is + TensorProto::FLOAT. +mean + Attribute. + The mean of the normal distribution. +scale + Attribute. + The standard deviation of the normal distribution. +seed + Attribute. + (Optional) Seed to the random generator, if not specified we will auto + generate one. +shape + Attribute. + The shape of the output tensor. + +Returns +======= +output : Var + Type T. + Output tensor of random values drawn from normal distribution + +Notes +===== +Signature: ``ai.onnx@1::RandomNormal``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _RandomNormal( + _RandomNormal.Attributes( + dtype=AttrDtype(dtype, name="dtype"), + mean=AttrFloat32(mean, name="mean"), + scale=AttrFloat32(scale, name="scale"), + seed=AttrFloat32.maybe(seed, name="seed"), + shape=AttrInt64s(shape, name="shape"), + ), _RandomNormal.Inputs( + ), ).get_output_vars( + ).output + + +def random_normal_like(input: Var, *, dtype: Optional[npt.DTypeLike] = None, mean: float = 0.0, scale: float = 1.0, seed: Optional[float] = None, ) -> Var: + r""" +Generate a tensor with random values drawn from a normal distribution. +The shape of the output tensor is copied from the shape of the input +tensor, and the parameters of the normal distribution are specified by +``mean`` and ``scale``. + +The data type is specified by the 'dtype' argument, or copied from the +input tensor if not provided. The 'dtype' argument must be one of the +data types specified in the 'DataType' enum field in the TensorProto +message, and be valid as an output type. + +Parameters +========== +input + Type T1. + Input tensor to copy shape and optionally type information from. +dtype + Attribute. + (Optional) The data type for the elements of the output tensor, if not + specified, we will use the data type of the input tensor. +mean + Attribute. + The mean of the normal distribution. +scale + Attribute. + The standard deviation of the normal distribution. +seed + Attribute. + (Optional) Seed to the random generator, if not specified we will auto + generate one. + +Returns +======= +output : Var + Type T2. + Output tensor of random values drawn from normal distribution + +Notes +===== +Signature: ``ai.onnx@1::RandomNormalLike``. + +Type constraints: + - T1: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _RandomNormalLike( + _RandomNormalLike.Attributes( + dtype=AttrDtype.maybe(dtype, name="dtype"), + mean=AttrFloat32(mean, name="mean"), + scale=AttrFloat32(scale, name="scale"), + seed=AttrFloat32.maybe(seed, name="seed"), + ), _RandomNormalLike.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def random_uniform(*, dtype: npt.DTypeLike = np.float32, high: float = 1.0, low: float = 0.0, seed: Optional[float] = None, shape: Iterable[int], ) -> Var: + r""" +Generate a tensor with random values drawn from a uniform distribution. +The shape of the tensor is specified by the ``shape`` argument and the +range by ``low`` and ``high``. + +The data type is specified by the 'dtype' argument. The 'dtype' argument +must be one of the data types specified in the 'DataType' enum field in +the TensorProto message. + +Parameters +========== +dtype + Attribute. + The data type for the elements of the output tensor. If not specified, + default is TensorProto::FLOAT. +high + Attribute. + Upper boundary of the output values. +low + Attribute. + Lower boundary of the output values. +seed + Attribute. + (Optional) Seed to the random generator, if not specified we will auto + generate one. +shape + Attribute. + The shape of the output tensor. + +Returns +======= +output : Var + Type T. + Output tensor of random values drawn from uniform distribution + +Notes +===== +Signature: ``ai.onnx@1::RandomUniform``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _RandomUniform( + _RandomUniform.Attributes( + dtype=AttrDtype(dtype, name="dtype"), + high=AttrFloat32(high, name="high"), + low=AttrFloat32(low, name="low"), + seed=AttrFloat32.maybe(seed, name="seed"), + shape=AttrInt64s(shape, name="shape"), + ), _RandomUniform.Inputs( + ), ).get_output_vars( + ).output + + +def random_uniform_like(input: Var, *, dtype: Optional[npt.DTypeLike] = None, high: float = 1.0, low: float = 0.0, seed: Optional[float] = None, ) -> Var: + r""" +Generate a tensor with random values drawn from a uniform distribution. +The shape of the output tensor is copied from the shape of the input +tensor, and the parameters of the uniform distribution are specified by +``low`` and ``high``. + +The data type is specified by the 'dtype' argument, or copied from the +input tensor if not provided. The 'dtype' argument must be one of the +data types specified in the 'DataType' enum field in the TensorProto +message and be valid as an output type. + +Parameters +========== +input + Type T1. + Input tensor to copy shape and optionally type information from. +dtype + Attribute. + (Optional) The data type for the elements of the output tensor, if not + specified, we will use the data type of the input tensor. +high + Attribute. + Upper boundary of the output values. +low + Attribute. + Lower boundary of the output values. +seed + Attribute. + (Optional) Seed to the random generator, if not specified we will auto + generate one. + +Returns +======= +output : Var + Type T2. + Output tensor of random values drawn from uniform distribution + +Notes +===== +Signature: ``ai.onnx@1::RandomUniformLike``. + +Type constraints: + - T1: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _RandomUniformLike( + _RandomUniformLike.Attributes( + dtype=AttrDtype.maybe(dtype, name="dtype"), + high=AttrFloat32(high, name="high"), + low=AttrFloat32(low, name="low"), + seed=AttrFloat32.maybe(seed, name="seed"), + ), _RandomUniformLike.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def range(start: Var, limit: Var, delta: Var, ) -> Var: + r""" +Generate a tensor containing a sequence of numbers that begin at +``start`` and extends by increments of ``delta`` up to ``limit`` +(exclusive). + +The number of elements in the output of range is computed as below: + +:: + + number_of_elements = max( ceil( (limit - start) / delta ) , 0 ) + +The pseudocode determining the contents of the output is shown below: + +:: + + for(int i=0; i Var: + r""" +Reciprocal takes one input data (Tensor) and produces one output data +(Tensor) where the reciprocal is, y = 1/x, is applied to the tensor +elementwise. + +Parameters +========== +X + Type T. + Input tensor + +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@13::Reciprocal``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Reciprocal( + _Reciprocal.Attributes( + ), _Reciprocal.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def reduce_l1(data: Var, *, axes: Optional[Iterable[int]] = None, keepdims: int = 1, ) -> Var: + r""" +Computes the L1 norm of the input tensor's elements along the provided +axes. The resulting tensor has the same rank as the input if +``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting +tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. Reduction over an empty set of values yields 0. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Attribute. + A list of integers, along which to reduce. The default is to reduce over + all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@13::ReduceL1``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` + """ + return _ReduceL1( + _ReduceL1.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), _ReduceL1.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).reduced + + +def reduce_l2(data: Var, *, axes: Optional[Iterable[int]] = None, keepdims: int = 1, ) -> Var: + r""" +Computes the L2 norm of the input tensor's elements along the provided +axes. The resulting tensor has the same rank as the input if +``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting +tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. Reduction over an empty set of values yields 0. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Attribute. + A list of integers, along which to reduce. The default is to reduce over + all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@13::ReduceL2``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` + """ + return _ReduceL2( + _ReduceL2.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), _ReduceL2.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).reduced + + +def reduce_log_sum(data: Var, *, axes: Optional[Iterable[int]] = None, keepdims: int = 1, ) -> Var: + r""" +Computes the log sum of the input tensor's elements along the provided +axes. The resulting tensor has the same rank as the input if +``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting +tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. Reduction over an empty set of values yields minus infinity (if +supported by the datatype) or undefined otherwise. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Attribute. + A list of integers, along which to reduce. The default is to reduce over + all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@13::ReduceLogSum``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` + """ + return _ReduceLogSum( + _ReduceLogSum.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), _ReduceLogSum.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).reduced + + +def reduce_log_sum_exp(data: Var, *, axes: Optional[Iterable[int]] = None, keepdims: int = 1, ) -> Var: + r""" +Computes the log sum exponent of the input tensor's elements along the +provided axes. The resulting tensor has the same rank as the input if +``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting +tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. Reduction over an empty set of values yields minus infinity (if +supported by the datatype) or undefined otherwise. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Attribute. + A list of integers, along which to reduce. The default is to reduce over + all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@13::ReduceLogSumExp``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` + """ + return _ReduceLogSumExp( + _ReduceLogSumExp.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), _ReduceLogSumExp.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).reduced + + +def reduce_max(data: Var, *, axes: Optional[Iterable[int]] = None, keepdims: int = 1, ) -> Var: + r""" +Computes the max of the input tensor's elements along the provided axes. +The resulting tensor has the same rank as the input if ``keepdims`` +equals 1. If ``keepdims`` equals 0, then the resulting tensor has the +reduced dimension pruned. Input tensors of rank zero are valid. +Reduction over an empty set of values yields minus infinity (if +supported by the datatype) or the minimum value of the data type +otherwise. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Attribute. + A list of integers, along which to reduce. The default is to reduce over + all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@13::ReduceMax``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _ReduceMax( + _ReduceMax.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), _ReduceMax.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).reduced + + +def reduce_mean(data: Var, *, axes: Optional[Iterable[int]] = None, keepdims: int = 1, ) -> Var: + r""" +Computes the mean of the input tensor's elements along the provided +axes. The resulting tensor has the same rank as the input if +``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting +tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. Reduction over an empty set of values yields undefined. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Attribute. + A list of integers, along which to reduce. The default is to reduce over + all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@13::ReduceMean``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` + """ + return _ReduceMean( + _ReduceMean.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), _ReduceMean.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).reduced + + +def reduce_min(data: Var, *, axes: Optional[Iterable[int]] = None, keepdims: int = 1, ) -> Var: + r""" +Computes the min of the input tensor's elements along the provided axes. +The resulting tensor has the same rank as the input if ``keepdims`` +equals 1. If ``keepdims`` equals 0, then the resulting tensor has the +reduced dimension pruned. Input tensors of rank zero are valid. +Reduction over an empty set of values yields plus infinity (if supported +by the datatype) or the maximum value of the data type otherwise. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Attribute. + A list of integers, along which to reduce. The default is to reduce over + all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@13::ReduceMin``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _ReduceMin( + _ReduceMin.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), _ReduceMin.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).reduced + + +def reduce_prod(data: Var, *, axes: Optional[Iterable[int]] = None, keepdims: int = 1, ) -> Var: + r""" +Computes the product of the input tensor's elements along the provided +axes. The resulting tensor has the same rank as the input if +``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting +tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. Reduction over an empty set of values yields 1. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Attribute. + A list of integers, along which to reduce. The default is to reduce over + all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@13::ReduceProd``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` + """ + return _ReduceProd( + _ReduceProd.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), _ReduceProd.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).reduced + + +def reduce_sum(data: Var, axes: Optional[Var] = None, *, keepdims: int = 1, noop_with_empty_axes: int = 0, ) -> Var: + r""" +Computes the sum of the input tensor's elements along the provided axes. +The resulting tensor has the same rank as the input if ``keepdims`` +equals 1. If ``keepdims`` equals 0, then the resulting tensor has the +reduced dimension pruned. Input tensors of rank zero are valid. +Reduction over an empty set of values yields 0. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Type tensor(int64). + Optional input list of integers, along which to reduce. The default is + to reduce over all the dimensions of the input tensor if + 'noop_with_empty_axes' is false, else act as an Identity op when + 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = + rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. +noop_with_empty_axes + Attribute. + Defines behavior if 'axes' is empty. Default behavior with 'false' is to + reduce all axes. When axes is empty and this attribute is set to true, + input tensor will not be reduced,and the output tensor would be + equivalent to input tensor. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@13::ReduceSum``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` + """ + return _ReduceSum( + _ReduceSum.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64(noop_with_empty_axes, name="noop_with_empty_axes"), + ), _ReduceSum.Inputs( + data=unwrap_vars(data), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), axes=get_value(axes), ).reduced + + +def reduce_sum_square(data: Var, *, axes: Optional[Iterable[int]] = None, keepdims: int = 1, ) -> Var: + r""" +Computes the sum square of the input tensor's elements along the +provided axes. The resulting tensor has the same rank as the input if +``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting +tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. Reduction over an empty set of values yields 0. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Attribute. + A list of integers, along which to reduce. The default is to reduce over + all the dimensions of the input tensor. Accepted range is [-r, r-1] + where r = rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@13::ReduceSumSquare``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` + """ + return _ReduceSumSquare( + _ReduceSumSquare.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), _ReduceSumSquare.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).reduced + + +def relu(X: Var, ) -> Var: + r""" +Relu takes one input data (Tensor) and produces one output data +(Tensor) where the rectified linear function, y = max(0, x), is +applied to the tensor elementwise. + +Parameters +========== +X + Type T. + Input tensor + +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@14::Relu``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)` + """ + return _Relu( + _Relu.Attributes( + ), _Relu.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def reshape(data: Var, shape: Var, *, allowzero: int = 0, ) -> Var: + r""" +Reshape the input tensor similar to numpy.reshape. First input is the +data tensor, second input is a shape tensor which specifies the output +shape. It outputs the reshaped tensor. At most one dimension of the new +shape can be -1. In this case, the value is inferred from the size of +the tensor and the remaining dimensions. A dimension could also be 0, in +which case the actual dimension value is unchanged (i.e. taken from the +input tensor). If 'allowzero' is set, and the new shape includes 0, the +dimension will be set explicitly to zero (i.e. not taken from input +tensor). Shape (second input) could be an empty shape, which means +converting to a scalar. The input tensor's shape and the output tensor's +shape are required to have the same number of elements. + +If the attribute 'allowzero' is set, it is invalid for the specified +shape to contain both a zero value and -1, as the value of the dimension +corresponding to -1 cannot be determined uniquely. + +Parameters +========== +data + Type T. + An input tensor. +shape + Type tensor(int64). + Specified shape for output. +allowzero + Attribute. + (Optional) By default, when any value in the 'shape' input is equal to + zero the corresponding dimension value is copied from the input tensor + dynamically. allowzero=1 indicates that if any value in the 'shape' + input is set to zero, the zero value is honored, similar to NumPy. + +Returns +======= +reshaped : Var + Type T. + Reshaped data. + +Notes +===== +Signature: ``ai.onnx@14::Reshape``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Reshape( + _Reshape.Attributes( + allowzero=AttrInt64(allowzero, name="allowzero"), + ), _Reshape.Inputs( + data=unwrap_vars(data), shape=unwrap_vars(shape), ), ).get_output_vars( + data=get_value(data), shape=get_value(shape), ).reshaped + + +def resize(X: Var, roi: Optional[Var] = None, scales: Optional[Var] = None, sizes: Optional[Var] = None, *, coordinate_transformation_mode: str = "half_pixel", cubic_coeff_a: float = -0.75, exclude_outside: int = 0, extrapolation_value: float = 0.0, mode: str = "nearest", nearest_mode: str = "round_prefer_floor", ) -> Var: + r""" +Resize the input tensor. In general, it calculates every value in the +output tensor as a weighted average of neighborhood (a.k.a. sampling +locations) in the input tensor. Each dimension value of the output +tensor is: output_dimension = floor(input_dimension \* (roi_end - +roi_start) \* scale) if input "sizes" is not specified. + +Parameters +========== +X + Type T1. + N-D tensor +roi + Type T2. + 1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is + the rank of X. The RoIs' coordinates are normalized in the coordinate + system of the input image. It only takes effect when + coordinate_transformation_mode is "tf_crop_and_resize" +scales + Type tensor(float). + The scale array along each dimension. It takes value greater than 0. If + it's less than 1, it's sampling down, otherwise, it's upsampling. The + number of elements of 'scales' should be the same as the rank of input + 'X'. One of 'scales' and 'sizes' MUST be specified and it is an error if + both are specified. If 'sizes' is needed, the user can use an empty + string as the name of 'scales' in this operator's input list. +sizes + Type tensor(int64). + The size of the output tensor. The number of elements of 'sizes' should + be the same as the rank of input 'X'. Only one of 'scales' and 'sizes' + can be specified. +coordinate_transformation_mode + Attribute. + This attribute describes how to transform the coordinate in the resized + tensor to the coordinate in the original tensor. + + The coordinate of each dimension is transformed individually. Let's + describe a case using axis x as an example. Denote x_resized as the + coordinate of axis x in the resized tensor, x_original as the coordinate + of axis x in the original tensor, length_original as the length of the + original tensor in axis x, length_resized as the length of the resized + tensor in axis x, roi_x = (start_x, end_x) of the axis x in input "roi", + scale = length_resized / length_original, + + if coordinate_transformation_mode is "half_pixel", x_original = + (x_resized + 0.5) / scale - 0.5, + + if coordinate_transformation_mode is "pytorch_half_pixel", x_original = + length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0, + + if coordinate_transformation_mode is "align_corners", x_original = + x_resized \* (length_original - 1) / (length_resized - 1), + + if coordinate_transformation_mode is "asymmetric", x_original = + x_resized / scale, + + if coordinate_transformation_mode is "tf_crop_and_resize", x_original = + length_resized > 1 ? start_x \* (length_original - 1) + x_resized \* + (end_x - start_x) \* (length_original - 1) / (length_resized - 1) : 0.5 + \* (start_x + end_x) \* (length_original - 1). +cubic_coeff_a + Attribute. + The coefficient 'a' used in cubic interpolation. Two common choice are + -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out + Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the + details. This attribute is valid only if "mode" is "cubic". +exclude_outside + Attribute. + If set to 1, the weight of sampling locations outside the tensor will be + set to 0 and the weight will be renormalized so that their sum is 1.0. + The default value is 0. +extrapolation_value + Attribute. + When coordinate_transformation_mode is "tf_crop_and_resize" and + x_original is outside the range [0, length_original - 1], this value is + used as the corresponding output value. Default is 0.0f. +mode + Attribute. + Three interpolation modes: nearest (default), linear and cubic. The + "linear" mode includes linear interpolation for 1D tensor and N-linear + interpolation for N-D tensor (for example, bilinear interpolation for 2D + tensor). The "cubic" mode includes cubic interpolation for 1D tensor and + N-cubic interpolation for N-D tensor (for example, bicubic interpolation + for 2D tensor). +nearest_mode + Attribute. + Four modes: round_prefer_floor (default, as known as round half down), + round_prefer_ceil (as known as round half up), floor, ceil. Only used by + nearest interpolation. It indicates how to get "nearest" pixel in input + tensor from x_original, so this attribute is valid only if "mode" is + "nearest". + +Returns +======= +Y : Var + Type T1. + N-D tensor after resizing + +Notes +===== +Signature: ``ai.onnx@13::Resize``. + +Type constraints: + - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Resize( + _Resize.Attributes( + coordinate_transformation_mode=AttrString(coordinate_transformation_mode, name="coordinate_transformation_mode"), + cubic_coeff_a=AttrFloat32(cubic_coeff_a, name="cubic_coeff_a"), + exclude_outside=AttrInt64(exclude_outside, name="exclude_outside"), + extrapolation_value=AttrFloat32(extrapolation_value, name="extrapolation_value"), + mode=AttrString(mode, name="mode"), + nearest_mode=AttrString(nearest_mode, name="nearest_mode"), + ), _Resize.Inputs( + X=unwrap_vars(X), roi=unwrap_vars(roi), scales=unwrap_vars(scales), sizes=unwrap_vars(sizes), ), ).get_output_vars( + X=get_value(X), roi=get_value(roi), scales=get_value(scales), sizes=get_value(sizes), ).Y + + +def reverse_sequence(input: Var, sequence_lens: Var, *, batch_axis: int = 1, time_axis: int = 0, ) -> Var: + r""" +Reverse batch of sequences having different lengths specified by +``sequence_lens``. + +For each slice i iterating on batch axis, the operator reverses the +first sequence_lens[i] elements on time axis, and copies elements whose +index's beyond sequence_lens[i] to the output. So the output slice i +contains reversed sequences on the first sequence_lens[i] elements, then +have original values copied for the other elements. + +Example 1: input = [[0.0, 4.0, 8.0, 12.0], [1.0, 5.0, 9.0, 13.0], [2.0, +6.0, 10.0, 14.0], [3.0, 7.0, 11.0, 15.0]] sequence_lens = [4, 3, 2, 1] +time_axis = 0 batch_axis = 1 + +output = [[3.0, 6.0, 9.0, 12.0], [2.0, 5.0, 8.0, 13.0], [1.0, 4.0, 10.0, +14.0], [0.0, 7.0, 11.0, 15.0]] + +Example 2: input = [[0.0, 1.0, 2.0, 3.0 ], [4.0, 5.0, 6.0, 7.0 ], [8.0, +9.0, 10.0, 11.0], [12.0, 13.0, 14.0, 15.0]] sequence_lens = [1, 2, 3, 4] +time_axis = 1 batch_axis = 0 + +output = [[0.0, 1.0, 2.0, 3.0 ], [5.0, 4.0, 6.0, 7.0 ], [10.0, 9.0, 8.0, +11.0], [15.0, 14.0, 13.0, 12.0]] + +Parameters +========== +input + Type T. + Tensor of rank r >= 2. +sequence_lens + Type tensor(int64). + Tensor specifying lengths of the sequences in a batch. It has shape + ``[batch_size]``. +batch_axis + Attribute. + (Optional) Specify which axis is batch axis. Must be one of 1 (default), + or 0. +time_axis + Attribute. + (Optional) Specify which axis is time axis. Must be one of 0 (default), + or 1. + +Returns +======= +Y : Var + Type T. + Tensor with same shape of input. + +Notes +===== +Signature: ``ai.onnx@10::ReverseSequence``. + +Type constraints: + - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _ReverseSequence( + _ReverseSequence.Attributes( + batch_axis=AttrInt64(batch_axis, name="batch_axis"), + time_axis=AttrInt64(time_axis, name="time_axis"), + ), _ReverseSequence.Inputs( + input=unwrap_vars(input), sequence_lens=unwrap_vars(sequence_lens), ), ).get_output_vars( + input=get_value(input), sequence_lens=get_value(sequence_lens), ).Y + + +def roi_align(X: Var, rois: Var, batch_indices: Var, *, coordinate_transformation_mode: str = "half_pixel", mode: str = "avg", output_height: int = 1, output_width: int = 1, sampling_ratio: int = 0, spatial_scale: float = 1.0, ) -> Var: + r""" +Region of Interest (RoI) align operation described in the `Mask R-CNN +paper `__. RoiAlign consumes an input +tensor X and region of interests (rois) to apply pooling across each +RoI; it produces a 4-D tensor of shape (num_rois, C, output_height, +output_width). + +RoiAlign is proposed to avoid the misalignment by removing quantizations +while converting from original image into feature map and from feature +map into RoI feature; in each ROI bin, the value of the sampled +locations are computed directly through bilinear interpolation. + +Parameters +========== +X + Type T1. + Input data tensor from the previous operator; 4-D feature map of shape + (N, C, H, W), where N is the batch size, C is the number of channels, + and H and W are the height and the width of the data. +rois + Type T1. + RoIs (Regions of Interest) to pool over; rois is 2-D input of shape + (num_rois, 4) given as [[x1, y1, x2, y2], ...]. The RoIs' coordinates + are in the coordinate system of the input image. Each coordinate set has + a 1:1 correspondence with the 'batch_indices' input. +batch_indices + Type T2. + 1-D tensor of shape (num_rois,) with each element denoting the index of + the corresponding image in the batch. +coordinate_transformation_mode + Attribute. + Allowed values are 'half_pixel' and 'output_half_pixel'. Use the value + 'half_pixel' to pixel shift the input coordinates by -0.5 (the + recommended behavior). Use the value 'output_half_pixel' to omit the + pixel shift for the input (use this for a backward-compatible behavior). +mode + Attribute. + The pooling method. Two modes are supported: 'avg' and 'max'. Default is + 'avg'. +output_height + Attribute. + default 1; Pooled output Y's height. +output_width + Attribute. + default 1; Pooled output Y's width. +sampling_ratio + Attribute. + Number of sampling points in the interpolation grid used to compute the + output value of each pooled output bin. If > 0, then exactly + sampling_ratio x sampling_ratio grid points are used. If == 0, then an + adaptive number of grid points are used (computed as ceil(roi_width / + output_width), and likewise for height). Default is 0. +spatial_scale + Attribute. + Multiplicative spatial scale factor to translate ROI coordinates from + their input spatial scale to the scale used when pooling, i.e., spatial + scale of the input feature map X relative to the input image. E.g.; + default is 1.0f. + +Returns +======= +Y : Var + Type T1. + RoI pooled output, 4-D tensor of shape (num_rois, C, output_height, + output_width). The r-th batch element Y[r-1] is a pooled feature map + corresponding to the r-th RoI X[r-1]. + +Notes +===== +Signature: ``ai.onnx@16::RoiAlign``. + +Type constraints: + - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` + - T2: `tensor(int64)` + """ + return _RoiAlign( + _RoiAlign.Attributes( + coordinate_transformation_mode=AttrString(coordinate_transformation_mode, name="coordinate_transformation_mode"), + mode=AttrString(mode, name="mode"), + output_height=AttrInt64(output_height, name="output_height"), + output_width=AttrInt64(output_width, name="output_width"), + sampling_ratio=AttrInt64(sampling_ratio, name="sampling_ratio"), + spatial_scale=AttrFloat32(spatial_scale, name="spatial_scale"), + ), _RoiAlign.Inputs( + X=unwrap_vars(X), rois=unwrap_vars(rois), batch_indices=unwrap_vars(batch_indices), ), ).get_output_vars( + X=get_value(X), rois=get_value(rois), batch_indices=get_value(batch_indices), ).Y + + +def round(X: Var, ) -> Var: + r""" +Round takes one input Tensor and rounds the values, element-wise, +meaning it finds the nearest integer for each value. In case of halves, +the rule is to round them to the nearest even integer. If input x is +integral, +0, -0, NaN, or infinite, x itself is returned. The output +tensor has the same shape and type as the input. + +Examples: + +:: + + round([0.9]) = [1.0] + round([2.5]) = [2.0] + round([2.3]) = [2.0] + round([1.5]) = [2.0] + round([-4.5]) = [-4.0] + +Parameters +========== +X + Type T. + Input tensor + +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@11::Round``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Round( + _Round.Attributes( + ), _Round.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def stft(signal: Var, frame_step: Var, window: Optional[Var] = None, frame_length: Optional[Var] = None, *, onesided: int = 1, ) -> Var: + r""" +Computes the Short-time Fourier Transform of the signal. + +Parameters +========== +signal + Type T1. + Input tensor representing a real or complex valued signal. For real + input, the following shape is expected: [batch_size][signal_length][1]. + For complex input, the following shape is expected: + [batch_size][signal_length][2], where [batch_size][signal_length][0] + represents the real component and [batch_size][signal_length][1] + represents the imaginary component of the signal. +frame_step + Type T2. + The number of samples to step between successive DFTs. +window + Type T1. + A tensor representing the window that will be slid over the signal.The + window must have rank 1 with shape: [window_shape]. It's an optional + value. +frame_length + Type T2. + A scalar representing the size of the DFT. It's an optional value. +onesided + Attribute. + If onesided is 1, only values for w in [0, 1, 2, ..., floor(n_fft/2) + + 1] are returned because the real-to-complex Fourier transform satisfies + the conjugate symmetry, i.e., X[m, w] = X[m,w]=X[m,n_fft-w]\*. Note if + the input or window tensors are complex, then onesided output is not + possible. Enabling onesided with real inputs performs a Real-valued fast + Fourier transform (RFFT).When invoked with real or complex valued input, + the default value is 1. Values can be 0 or 1. + +Returns +======= +output : Var + Type T1. + The Short-time Fourier Transform of the signals.If onesided is 1, the + output has the shape: [batch_size][frames][dft_unique_bins][2], where + dft_unique_bins is frame_length // 2 + 1 (the unique components of the + DFT) If onesided is 0, the output has the shape: + [batch_size][frames][frame_length][2], where frame_length is the length + of the DFT. + +Notes +===== +Signature: ``ai.onnx@17::STFT``. + +Type constraints: + - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + - T2: `tensor(int32)`, `tensor(int64)` + """ + return _STFT( + _STFT.Attributes( + onesided=AttrInt64(onesided, name="onesided"), + ), _STFT.Inputs( + signal=unwrap_vars(signal), frame_step=unwrap_vars(frame_step), window=unwrap_vars(window), frame_length=unwrap_vars(frame_length), ), ).get_output_vars( + signal=get_value(signal), frame_step=get_value(frame_step), window=get_value(window), frame_length=get_value(frame_length), ).output + + +def scan(initial_state_and_scan_inputs: Sequence[Var], *, body: Callable[..., Iterable[Var]], num_scan_inputs: int, scan_input_axes: Optional[Iterable[int]] = None, scan_input_directions: Optional[Iterable[int]] = None, scan_output_axes: Optional[Iterable[int]] = None, scan_output_directions: Optional[Iterable[int]] = None, ) -> Sequence[Var]: + r""" +Scan can be used to iterate over one or more scan_input tensors, +constructing zero or more scan_output tensors. It combines ideas from +general recurrences, functional programming constructs such as scan, +fold, map, and zip, and is intended to enable generalizations of +RNN-like constructs for sequence-to-sequence processing. Other tensors +(referred to as state_variables here) can be used to carry a state when +iterating from one element to another (similar to hidden-state in RNNs, +also referred to as loop-carried dependences in the context of loops). +Many common usages involve a single scan_input tensor (where +functionality similar to scan, fold and map can be obtained). When more +than one scan_input is used, a behavior similar to zip is obtained. + +The attribute body must be a graph, specifying the computation to be +performed in every iteration. It takes as input the current values of +the state_variables and the current iterated element of the scan_inputs. +It must return the (updated) values of the state_variables and zero or +more scan_output_element tensors. The values of the scan_output_element +tensors are concatenated over all the iterations to produce the +scan_output values of the scan construct (similar to the concatenated +intermediate hidden-state values of RNN-like constructs). All the output +tensors (state_variables as well as scan_output_element tensors) are +required to have the same shape in each iteration of the loop (a +restriction imposed to enable efficient memory allocation). + +Note that the iterated element passed to the body subgraph does not have +a sequence axis. It will have a rank one less than the rank of the +corresponding scan_input. + +The scan operation returns the final values of the state_variables as +well as the scan_outputs. + +The optional attribute scan_input_directions specifies the direction +(forward or backward) for each scan input. If this attribute is omitted, +all sequences are scanned in the forward direction. A bidirectional scan +may be performed by specifying the same tensor input twice in the +scan_inputs, once with a forward direction, and once with a backward +direction. + +The scan_output of the operation is produced by concatenating the +scan_output_element values produced by the body in each iteration. The +optional attribute scan_output_directions specifies the direction in +which scan_output is constructed (by appending or prepending the +scan_output_element to scan_output in each iteration) for each +scan_output. If this attribute is omitted, the scan_output_element is +appended to the scan_output in each iteration. + +The optional attribute scan_input_axes specifies the axis to be scanned +for each scan_input. If omitted, every scan_input will be scanned in +axis 0. For example, if axis 0 is the batch axis and axis 1 is the time +axis (to be scanned), specify an axis value of 1. Note that scanning a +non-zero axis may be less efficient than scanning axis zero. + +The optional attribute scan_output_axes specifies the axis along which +the scan_outputs are accumulated for each scan_output. For example, if +axis 1 is the time axis (to be scanned) for both inputs and outputs, +specify a scan_input axis and scan_output axis value of 1. + +Note that because of the ONNX restriction that only the last parameter +of an operator can be variadic, the initial-states and scan-inputs are +listed together as one input parameter. Similarly, the final-states and +scan-outputs are listed together as one output parameter. The attribute +num_scan_inputs indicates the number M of scan-inputs. + +The behavior of + +:: + + Scan < + num_scan_inputs = m, + body = loop-body, + scan_input_axes = [axis_1, ..., axis_m] + > (init_1, ..., init_n, scan_1, ..., scan_m) + +is equivalent to the following pseudo-code: + +:: + + // scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i + // scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j. + sequence_length = scan_1.shape[axis_1]; + + // initialize state-variables + st_1 = init_1; ... st_n = init_n; + // initialize scan-output variables: [] denotes an empty tensor + scan_out_1 = []; ...; scan_out_k = []; + // identify number of iterations: + + // execute loop + for (int t = 0; t < sequence_length; ++t) { + // generate the scan-input elements: the notation T[t] indicates the sub-tensor + // of rank one less than T obtained by indexing T at position t along axis k. + si_1 = scan_1[t]; + ... ; + si_m = scan_m[t]; + // execute loop-body + st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m) + // accumulate the scan-output elements + scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k); + } + + return st_1, ..., st_n, scan_out_1, ..., scan_out_k; + +*Sample usage: Encoding RNN using a Scan* + +The following example shows how a simple RNN over an input tensor %X, +with weight tensor %Wi, recurrence weight tensor %Ri, bias tensors %Wbi +and %Rbi, and initial hidden-state %H_0 can be encoded as a ScanLoop. +Note that the loop-body is a nested graph, and it directly computes %Wi, +%Ri, %Wbi, and %Rbi (typically constants or initializers in the body +graph). If these values are computed in the outer graph, they need to be +passed in as extra state_variables. + +:: + + graph rnn-encoding { + %H_0 = ... + %X = ... + %Y_h, %Y = Scan[body = , num_scan_inputs=1](%H_0, %X) + return %Y, %Y_h + } + + graph rnn-cell-1 ( + %H_tminus1[FLOAT, tensor] + %X_t[FLOAT, tensor] + ) { + %Wi = ... + %Ri = ... + %Wbi = ... + %Rbi = ... + %t1 = X_t * (Wi^T) + %t2 = H_tminus1*(Ri^T) + %t3 = Add(%t1, %t2) + %t4 = Add(%t3, %Wbi) + %t5 = Add(%t4, %Rbi) + %Ht = Tanh(%t5) + %Accumulate = Identity(%Ht) + return %Ht, %Accumulate + } + +Parameters +========== +initial_state_and_scan_inputs + Type V. + Initial values of the loop's N state variables followed by M scan_inputs +body + Attribute. + The graph run each iteration. It has N+M inputs: (loop state + variables..., scan_input_elts...). It has N+K outputs: (loop state + variables..., scan_output_elts...). Each scan_output is created by + concatenating the value of the specified scan_output_elt value at the + end of each iteration of the loop. It is an error if the dimensions of + these values change across loop iterations. +num_scan_inputs + Attribute. + An attribute specifying the number of scan_inputs M. +scan_input_axes + Attribute. + An optional list of M flags. The i-th element of the list specifies the + axis to be scanned (the sequence axis) for the i-th scan_input. If + omitted, 0 will be used as the scan axis for every scan_input. Negative + value for an axis means counting dimensions from the back. Accepted + range is [-r, r-1] where r = rank(input). +scan_input_directions + Attribute. + An optional list of M flags. The i-th element of the list specifies the + direction to be scanned for the i-th scan_input tensor: 0 indicates + forward direction and 1 indicates reverse direction. If omitted, all + scan_input tensors will be scanned in the forward direction. +scan_output_axes + Attribute. + An optional list of K flags. The i-th element of the list specifies the + axis for the i-th scan_output. The scan outputs are accumulated along + the specified axis. If omitted, 0 will be used as the scan axis for + every scan_output. Negative value for an axis means counting dimensions + from the back. Accepted range is [-r, r-1]. +scan_output_directions + Attribute. + An optional list of K flags, one for each scan_output. The i-th element + of the list specifies whether the i-th scan_output should be constructed + by appending or prepending a new value in each iteration: 0 indicates + appending and 1 indicates prepending. If omitted, all scan_output + tensors will be produced by appending a value in each iteration. + +Returns +======= +final_state_and_scan_outputs : Sequence[Var] + Type V. + Final values of the loop's N state variables followed by K scan_outputs + +Notes +===== +Signature: ``ai.onnx@16::Scan``. + +Type constraints: + - V: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _BlackmanWindow( - _BlackmanWindow.Attributes( - output_datatype=AttrInt64(output_datatype, name="output_datatype"), - periodic=AttrInt64(periodic, name="periodic"), - ), - _BlackmanWindow.Inputs( - size=unwrap_vars(size), - ), - ) - .get_output_vars( - size=get_value(size), - ) - .output - ) + _body_subgraph: Graph = subgraph( + [Tensor(var.unwrap_tensor().dtype, (lambda x: x[1:] if x is not None else None)(var.unwrap_tensor().shape)) for var in initial_state_and_scan_inputs[:num_scan_inputs]] + [Tensor(var.unwrap_tensor().dtype) for var in initial_state_and_scan_inputs[num_scan_inputs:]], + body + ) + return _Scan( + _Scan.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + num_scan_inputs=AttrInt64(num_scan_inputs, name="num_scan_inputs"), + scan_input_axes=AttrInt64s.maybe(scan_input_axes, name="scan_input_axes"), + scan_input_directions=AttrInt64s.maybe(scan_input_directions, name="scan_input_directions"), + scan_output_axes=AttrInt64s.maybe(scan_output_axes, name="scan_output_axes"), + scan_output_directions=AttrInt64s.maybe(scan_output_directions, name="scan_output_directions"), + ), _Scan.Inputs( + initial_state_and_scan_inputs=unwrap_vars(initial_state_and_scan_inputs), ), out_variadic=len(_body_subgraph.requested_results), ).get_output_vars( + initial_state_and_scan_inputs=get_value(initial_state_and_scan_inputs), ).final_state_and_scan_outputs + + +def scatter_elements(data: Var, indices: Var, updates: Var, *, axis: int = 0, reduction: str = "none", ) -> Var: + r""" +ScatterElements takes three inputs ``data``, ``updates``, and +``indices`` of the same rank r >= 1 and an optional attribute axis that +identifies an axis of ``data`` (by default, the outer-most axis, that is +axis 0). The output of the operation is produced by creating a copy of +the input ``data``, and then updating its value to values specified by +``updates`` at specific index positions specified by ``indices``. Its +output shape is the same as the shape of ``data``. For each entry in +``updates``, the target index in ``data`` is obtained by combining the +corresponding entry in ``indices`` with the index of the entry itself: +the index-value for dimension = axis is obtained from the value of the +corresponding entry in ``indices`` and the index-value for dimension != +axis is obtained from the index of the entry itself. ``reduction`` +allows specification of an optional reduction operation, which is +applied to all values in ``updates`` tensor into ``output`` at the +specified ``indices``. In cases where ``reduction`` is set to "none", +indices should not have duplicate entries: that is, if idx1 != idx2, +then indices[idx1] != indices[idx2]. For instance, in a 2-D tensor case, +the update corresponding to the [i][j] entry is performed as below: + +:: + + output[indices[i][j]][j] = updates[i][j] if axis = 0, + output[i][indices[i][j]] = updates[i][j] if axis = 1, + +When ``reduction`` is set to "add", the update corresponding to the +[i][j] entry is performed as below: + +:: + + output[indices[i][j]][j] += updates[i][j] if axis = 0, + output[i][indices[i][j]] += updates[i][j] if axis = 1, + +When ``reduction`` is set to "mul", the update corresponding to the +[i][j] entry is performed as below: + +:: + + output[indices[i][j]][j] *= updates[i][j] if axis = 0, + output[i][indices[i][j]] *= updates[i][j] if axis = 1, + +This operator is the inverse of GatherElements. It is similar to Torch's +Scatter operation. Example 1: + +:: + + data = [ + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + ] + indices = [ + [1, 0, 2], + [0, 2, 1], + ] + updates = [ + [1.0, 1.1, 1.2], + [2.0, 2.1, 2.2], + ] + output = [ + [2.0, 1.1, 0.0] + [1.0, 0.0, 2.2] + [0.0, 2.1, 1.2] + ] + +Example 2: + +:: + + data = [[1.0, 2.0, 3.0, 4.0, 5.0]] + indices = [[1, 3]] + updates = [[1.1, 2.1]] + axis = 1 + output = [[1.0, 1.1, 3.0, 2.1, 5.0]] + +Parameters +========== +data + Type T. + Tensor of rank r >= 1. +indices + Type Tind. + Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index + values are expected to be within bounds [-s, s-1] along axis of size s. + It is an error if any of the index values are out of bounds. +updates + Type T. + Tensor of rank r >=1 (same rank and shape as indices) +axis + Attribute. + Which axis to scatter on. Negative value means counting dimensions from + the back. Accepted range is [-r, r-1] where r = rank(data). +reduction + Attribute. + Type of reduction to apply: none (default), add, mul. 'none': no + reduction applied. 'add': reduction using the addition operation. 'mul': + reduction using the multiplication operation. + +Returns +======= +output : Var + Type T. + Tensor of rank r >= 1 (same rank as input). + +Notes +===== +Signature: ``ai.onnx@16::ScatterElements``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - Tind: `tensor(int32)`, `tensor(int64)` + """ + return _ScatterElements( + _ScatterElements.Attributes( + axis=AttrInt64(axis, name="axis"), + reduction=AttrString(reduction, name="reduction"), + ), _ScatterElements.Inputs( + data=unwrap_vars(data), indices=unwrap_vars(indices), updates=unwrap_vars(updates), ), ).get_output_vars( + data=get_value(data), indices=get_value(indices), updates=get_value(updates), ).output + + +def scatter_nd(data: Var, indices: Var, updates: Var, *, reduction: str = "none", ) -> Var: + r""" +ScatterND takes three inputs ``data`` tensor of rank r >= 1, ``indices`` +tensor of rank q >= 1, and ``updates`` tensor of rank q + r - +indices.shape[-1] - 1. The output of the operation is produced by +creating a copy of the input ``data``, and then updating its value to +values specified by ``updates`` at specific index positions specified by +``indices``. Its output shape is the same as the shape of ``data``. + +``indices`` is an integer tensor. Let k denote indices.shape[-1], the +last dimension in the shape of ``indices``. ``indices`` is treated as a +(q-1)-dimensional tensor of k-tuples, where each k-tuple is a +partial-index into ``data``. Hence, k can be a value at most the rank of +``data``. When k equals rank(data), each update entry specifies an +update to a single element of the tensor. When k is less than rank(data) +each update entry specifies an update to a slice of the tensor. Index +values are allowed to be negative, as per the usual convention for +counting backwards from the end, but are expected in the valid range. + +``updates`` is treated as a (q-1)-dimensional tensor of +replacement-slice-values. Thus, the first (q-1) dimensions of +updates.shape must match the first (q-1) dimensions of indices.shape. +The remaining dimensions of ``updates`` correspond to the dimensions of +the replacement-slice-values. Each replacement-slice-value is a (r-k) +dimensional tensor, corresponding to the trailing (r-k) dimensions of +``data``. Thus, the shape of ``updates`` must equal indices.shape[0:q-1] +++ data.shape[k:r-1], where ++ denotes the concatenation of shapes. + +The ``output`` is calculated via the following equation: output = +np.copy(data) update_indices = indices.shape[:-1] for idx in +np.ndindex(update_indices): output[indices[idx]] = updates[idx] The +order of iteration in the above loop is not specified. In particular, +indices should not have duplicate entries: that is, if idx1 != idx2, +then indices[idx1] != indices[idx2]. This ensures that the output value +does not depend on the iteration order. + +``reduction`` allows specification of an optional reduction operation, +which is applied to all values in ``updates`` tensor into ``output`` at +the specified ``indices``. In cases where ``reduction`` is set to +"none", indices should not have duplicate entries: that is, if idx1 != +idx2, then indices[idx1] != indices[idx2]. This ensures that the output +value does not depend on the iteration order. When ``reduction`` is set +to "add", ``output`` is calculated as follows: output = np.copy(data) +update_indices = indices.shape[:-1] for idx in +np.ndindex(update_indices): output[indices[idx]] += updates[idx] When +``reduction`` is set to "mul", ``output`` is calculated as follows: +output = np.copy(data) update_indices = indices.shape[:-1] for idx in +np.ndindex(update_indices): output[indices[idx]] \*= updates[idx] This +operator is the inverse of GatherND. Example 1: + +:: + + data = [1, 2, 3, 4, 5, 6, 7, 8] + indices = [[4], [3], [1], [7]] + updates = [9, 10, 11, 12] + output = [1, 11, 3, 10, 9, 6, 7, 12] + +Example 2: + +:: + + data = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] + indices = [[0], [2]] + updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]] + output = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] + +Parameters +========== +data + Type T. + Tensor of rank r >= 1. +indices + Type tensor(int64). + Tensor of rank q >= 1. +updates + Type T. + Tensor of rank q + r - indices_shape[-1] - 1. +reduction + Attribute. + Type of reduction to apply: none (default), add, mul. 'none': no + reduction applied. 'add': reduction using the addition operation. 'mul': + reduction using the multiplication operation. + +Returns +======= +output : Var + Type T. + Tensor of rank r >= 1. + +Notes +===== +Signature: ``ai.onnx@16::ScatterND``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _ScatterND( + _ScatterND.Attributes( + reduction=AttrString(reduction, name="reduction"), + ), _ScatterND.Inputs( + data=unwrap_vars(data), indices=unwrap_vars(indices), updates=unwrap_vars(updates), ), ).get_output_vars( + data=get_value(data), indices=get_value(indices), updates=get_value(updates), ).output + + +def selu(X: Var, *, alpha: float = 1.6732631921768188, gamma: float = 1.0507010221481323, ) -> Var: + r""" +Selu takes one input data (Tensor) and produces one output data +(Tensor) where the scaled exponential linear unit function, +``y = gamma * (alpha * e^x - alpha) for x <= 0``, +``y = gamma * x for x > 0``, is applied to the tensor elementwise. + +Parameters +========== +X + Type T. + Input tensor +alpha + Attribute. + Coefficient of SELU default to 1.67326319217681884765625 (i.e., float32 + approximation of 1.6732632423543772848170429916717). +gamma + Attribute. + Coefficient of SELU default to 1.05070102214813232421875 (i.e., float32 + approximation of 1.0507009873554804934193349852946). + +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@6::Selu``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Selu( + _Selu.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + gamma=AttrFloat32(gamma, name="gamma"), + ), _Selu.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def sequence_at(input_sequence: Var, position: Var, ) -> Var: + r""" +Outputs a tensor copy from the tensor at 'position' in 'input_sequence'. +Accepted range for 'position' is in ``[-n, n - 1]``, where ``n`` is the +number of tensors in 'input_sequence'. Negative value means counting +positions from the back. + +Parameters +========== +input_sequence + Type S. + Input sequence. +position + Type I. + Position of the tensor in the sequence. Negative value means counting + positions from the back. Accepted range in ``[-n, n - 1]``, where ``n`` + is the number of tensors in 'input_sequence'. It is an error if any of + the index values are out of bounds. It must be a scalar(tensor of empty + shape). + +Returns +======= +tensor : Var + Type T. + Output tensor at the specified position in the input sequence. + +Notes +===== +Signature: ``ai.onnx@11::SequenceAt``. + +Type constraints: + - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` + - I: `tensor(int32)`, `tensor(int64)` + - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _SequenceAt( + _SequenceAt.Attributes( + ), _SequenceAt.Inputs( + input_sequence=unwrap_vars(input_sequence), position=unwrap_vars(position), ), ).get_output_vars( + input_sequence=get_value(input_sequence), position=get_value(position), ).tensor + + +def sequence_construct(inputs: Sequence[Var], ) -> Var: + r""" +Construct a tensor sequence containing 'inputs' tensors. All tensors in +'inputs' must have the same data type. + +Parameters +========== +inputs + Type T. + Tensors. + +Returns +======= +output_sequence : Var + Type S. + Sequence enclosing the input tensors. + +Notes +===== +Signature: ``ai.onnx@11::SequenceConstruct``. + +Type constraints: + - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` + """ + return _SequenceConstruct( + _SequenceConstruct.Attributes( + ), _SequenceConstruct.Inputs( + inputs=unwrap_vars(inputs), ), ).get_output_vars( + inputs=get_value(inputs), ).output_sequence + + +def sequence_empty(*, dtype: Optional[npt.DTypeLike] = None, ) -> Var: + r""" +Construct an empty tensor sequence, with given data type. + +Parameters +========== +dtype + Attribute. + (Optional) The data type of the tensors in the output sequence. The + default type is 'float'. + +Returns +======= +output : Var + Type S. + Empty sequence. + +Notes +===== +Signature: ``ai.onnx@11::SequenceEmpty``. + +Type constraints: + - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` + """ + return _SequenceEmpty( + _SequenceEmpty.Attributes( + dtype=AttrDtype.maybe(dtype, name="dtype"), + ), _SequenceEmpty.Inputs( + ), ).get_output_vars( + ).output + + +def sequence_erase(input_sequence: Var, position: Optional[Var] = None, ) -> Var: + r""" +Outputs a tensor sequence that removes the tensor at 'position' from +'input_sequence'. Accepted range for 'position' is in ``[-n, n - 1]``, +where ``n`` is the number of tensors in 'input_sequence'. Negative value +means counting positions from the back. 'position' is optional, by +default it erases the last tensor from 'input_sequence'. + +Parameters +========== +input_sequence + Type S. + Input sequence. +position + Type I. + Position of the tensor in the sequence. Negative value means counting + positions from the back. Accepted range in ``[-n, n - 1]``, where ``n`` + is the number of tensors in 'input_sequence'. It is an error if any of + the index values are out of bounds. It must be a scalar(tensor of empty + shape). + +Returns +======= +output_sequence : Var + Type S. + Output sequence that has the tensor at the specified position removed. + +Notes +===== +Signature: ``ai.onnx@11::SequenceErase``. + +Type constraints: + - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` + - I: `tensor(int32)`, `tensor(int64)` + """ + return _SequenceErase( + _SequenceErase.Attributes( + ), _SequenceErase.Inputs( + input_sequence=unwrap_vars(input_sequence), position=unwrap_vars(position), ), ).get_output_vars( + input_sequence=get_value(input_sequence), position=get_value(position), ).output_sequence + + +def sequence_insert(input_sequence: Var, tensor: Var, position: Optional[Var] = None, ) -> Var: + r""" +Outputs a tensor sequence that inserts 'tensor' into 'input_sequence' at +'position'. 'tensor' must have the same data type as 'input_sequence'. +Accepted range for 'position' is in ``[-n, n]``, where ``n`` is the +number of tensors in 'input_sequence'. Negative value means counting +positions from the back. 'position' is optional, by default it inserts +'tensor' to the back of 'input_sequence'. + +Parameters +========== +input_sequence + Type S. + Input sequence. +tensor + Type T. + Input tensor to be inserted into the input sequence. +position + Type I. + Position in the sequence where the new tensor is inserted. It is + optional and default is to insert to the back of the sequence. Negative + value means counting positions from the back. Accepted range in + ``[-n, n]``, where ``n`` is the number of tensors in 'input_sequence'. + It is an error if any of the index values are out of bounds. It must be + a scalar(tensor of empty shape). + +Returns +======= +output_sequence : Var + Type S. + Output sequence that contains the inserted tensor at given position. + +Notes +===== +Signature: ``ai.onnx@11::SequenceInsert``. + +Type constraints: + - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` + - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - I: `tensor(int32)`, `tensor(int64)` + """ + return _SequenceInsert( + _SequenceInsert.Attributes( + ), _SequenceInsert.Inputs( + input_sequence=unwrap_vars(input_sequence), tensor=unwrap_vars(tensor), position=unwrap_vars(position), ), ).get_output_vars( + input_sequence=get_value(input_sequence), tensor=get_value(tensor), position=get_value(position), ).output_sequence + + +def sequence_length(input_sequence: Var, ) -> Var: + r""" +Produces a scalar(tensor of empty shape) containing the number of +tensors in 'input_sequence'. + +Parameters +========== +input_sequence + Type S. + Input sequence. + +Returns +======= +length : Var + Type I. + Length of input sequence. It must be a scalar(tensor of empty shape). + +Notes +===== +Signature: ``ai.onnx@11::SequenceLength``. + +Type constraints: + - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` + - I: `tensor(int64)` + """ + return _SequenceLength( + _SequenceLength.Attributes( + ), _SequenceLength.Inputs( + input_sequence=unwrap_vars(input_sequence), ), ).get_output_vars( + input_sequence=get_value(input_sequence), ).length + + +def sequence_map(input_sequence: Var, additional_inputs: Sequence[Var] = (), *, body: Callable[..., Iterable[Var]], ) -> Sequence[Var]: + r""" +Applies a sub-graph to each sample in the input sequence(s). + +Inputs can be either tensors or sequences, with the exception of the +first input which must be a sequence. The length of the first input +sequence will determine the number of samples in the outputs. Any other +sequence inputs should have the same number of samples. The number of +inputs and outputs, should match the one of the subgraph. + +For each i-th element in the output, a sample will be extracted from the +input sequence(s) at the i-th position and the sub-graph will be applied +to it. The outputs will contain the outputs of the sub-graph for each +sample, in the same order as in the input. + +This operator assumes that processing each sample is independent and +could executed in parallel or in any order. Users cannot expect any +specific ordering in which each subgraph is computed. + +Parameters +========== +input_sequence + Type S. + Input sequence. +additional_inputs + Type V. + Additional inputs to the graph +body + Attribute. + The graph to be run for each sample in the sequence(s). It should have + as many inputs and outputs as inputs and outputs to the SequenceMap + function. + +Returns +======= +out_sequence : Sequence[Var] + Type S. + Output sequence(s) + +Notes +===== +Signature: ``ai.onnx@17::SequenceMap``. + +Type constraints: + - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` + - V: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + _body_subgraph: Graph = subgraph( + [typing_cast(SpoxSequence, input_sequence.unwrap_type()).elem_type] + [typing_cast(SpoxSequence, var.unwrap_type()).elem_type for var in additional_inputs], + body + ) + return _SequenceMap( + _SequenceMap.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + ), _SequenceMap.Inputs( + input_sequence=unwrap_vars(input_sequence), additional_inputs=unwrap_vars(additional_inputs), ), out_variadic=len(_body_subgraph.requested_results), ).get_output_vars( + input_sequence=get_value(input_sequence), additional_inputs=get_value(additional_inputs), ).out_sequence + + +def shape(data: Var, *, end: Optional[int] = None, start: int = 0, ) -> Var: + r""" +Takes a tensor as input and outputs an 1D int64 tensor containing the +shape of the input tensor. Optional attributes start and end can be used +to compute a slice of the input tensor's shape. If start axis is +omitted, the slice starts from axis 0. The end axis, if specified, is +exclusive (and the returned value will not include the size of that +axis). If the end axis is omitted, the axes upto the last one will be +included. Negative axes indicate counting back from the last axis. Note +that axes will be clamped to the range [0, r-1], where r is the rank of +the input tensor if they are out-of-range (after adding r in the case of +negative axis). Thus, specifying any end value > r is equivalent to +specifying an end value of r, and specifying any start value < -r is +equivalent to specifying a start value of 0. + +Examples: + +:: + + Input tensor with shape: [2, 3, 4] + No attributes specified. + Output: [2, 3, 4] + +:: + + Input tensor with shape: [2, 3, 4] + start: -1 + Output: [4] + +:: + + Input tensor with shape: [2, 3, 4] + end: -1 + Output: [2, 3] + +:: + + Input tensor with shape: [2, 3, 4] + start: 1 + end: 2 + Output: [3] + +Parameters +========== +data + Type T. + An input tensor. +end + Attribute. + (Optional) Ending axis for slicing the shape. Negative value means + counting dimensions from the back. If omitted, sizes of all axes upto + (including) the last one will be included. +start + Attribute. + (Optional) Starting axis for slicing the shape. Default value is + 0.Negative value means counting dimensions from the back. + +Returns +======= +shape : Var + Type T1. + Shape of the input tensor + +Notes +===== +Signature: ``ai.onnx@15::Shape``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T1: `tensor(int64)` + """ + return _Shape( + _Shape.Attributes( + end=AttrInt64.maybe(end, name="end"), + start=AttrInt64(start, name="start"), + ), _Shape.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).shape + + +def shrink(input: Var, *, bias: float = 0.0, lambd: float = 0.5, ) -> Var: + r""" +Shrink takes one input data (Tensor) and produces one Tensor output, +having same datatype and shape with input. It has two attributes, lambd +and bias. The formula of this operator is: If x < -lambd, y = x + bias; +If x > lambd, y = x - bias; Otherwise, y = 0. + +Parameters +========== +input + Type T. + The input data as Tensor. +bias + Attribute. + The bias value added to output. Default is 0. +lambd + Attribute. + The lambd value for the Shrink formulation. Default is 0.5. + +Returns +======= +output : Var + Type T. + The output. + +Notes +===== +Signature: ``ai.onnx@9::Shrink``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Shrink( + _Shrink.Attributes( + bias=AttrFloat32(bias, name="bias"), + lambd=AttrFloat32(lambd, name="lambd"), + ), _Shrink.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def sigmoid(X: Var, ) -> Var: + r""" +Sigmoid takes one input data (Tensor) and produces one output data +(Tensor) where the sigmoid function, y = 1 / (1 + exp(-x)), is +applied to the tensor elementwise. + +Parameters +========== +X + Type T. + Input tensor + +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@13::Sigmoid``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Sigmoid( + _Sigmoid.Attributes( + ), _Sigmoid.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def sign(input: Var, ) -> Var: + r""" +Calculate the sign of the given input tensor element-wise. If input > 0, +output 1. if input < 0, output -1. if input == 0, output 0. + +Parameters +========== +input + Type T. + Input tensor + +Returns +======= +output : Var + Type T. + The sign of the input tensor computed element-wise. It has the same + shape and type of the input. + +Notes +===== +Signature: ``ai.onnx@13::Sign``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Sign( + _Sign.Attributes( + ), _Sign.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def sin(input: Var, ) -> Var: + r""" +Calculates the sine of the given input tensor, element-wise. + +Parameters +========== +input + Type T. + Input tensor +Returns +======= +output : Var + Type T. + The sine of the input tensor computed element-wise -def cast( - input: Var, - *, - to: npt.DTypeLike, -) -> Var: - r""" - The operator casts the elements of a given input tensor to a data type - specified by the 'to' argument and returns an output tensor of the same - size in the converted type. The 'to' argument must be one of the data - types specified in the 'DataType' enum field in the TensorProto message. - - Casting from string tensor in plain (e.g., "3.14" and "1000") and - scientific numeric representations (e.g., "1e-5" and "1E8") to float - types is supported. For example, converting string "100.5" to an integer - may yield result 100. There are some string literals reserved for - special floating-point values; "+INF" (and "INF"), "-INF", and "NaN" are - positive infinity, negative infinity, and not-a-number, respectively. - Any string which can exactly match "+INF" in a case-insensitive way - would be mapped to positive infinite. Similarly, this case-insensitive - rule is applied to "INF" and "NaN". When casting from numeric tensors to - string tensors, plain floating-point representation (such as - "314.15926") would be used. Converting non-numerical-literal string such - as "Hello World!" is an undefined behavior. Cases of converting string - representing floating-point arithmetic value, such as "2.718", to INT is - an undefined behavior. - - Conversion from a numerical type to any numerical type is always - allowed. User must be aware of precision loss and value change caused by - range difference between two types. For example, a 64-bit float - 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, - converting an integer 36 to Boolean may produce 1 because we truncate - bits which can't be stored in the targeted type. - - In more detail, the conversion among numerical types should follow these - rules: - - - Casting from floating point to: - - - floating point: +/- infinity if OOR (out of range). - - fixed point: undefined if OOR. - - bool: +/- 0.0 to False; all else to True. - - - Casting from fixed point to: - - - floating point: +/- infinity if OOR. (+ infinity in the case of - uint) - - fixed point: when OOR, discard higher bits and reinterpret (with - respect to two's complement representation for signed types). For - example, 200 (int16) -> -56 (int8). - - bool: zero to False; nonzero to True. - - - Casting from bool to: - - - floating point: ``{1.0, 0.0}``. - - fixed point: ``{1, 0}``. - - bool: no change. - - Parameters - ========== - input - Type T1. - Input tensor to be cast. - to - Attribute. - The data type to which the elements of the input tensor are cast. - Strictly must be one of the types from DataType enum in TensorProto - - Returns - ======= - output : Var - Type T2. - Output tensor with the same shape as input with type specified by the - 'to' argument - - Notes - ===== - Signature: ``ai.onnx@13::Cast``. - - Type constraints: - - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +Notes +===== +Signature: ``ai.onnx@7::Sin``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return ( - _Cast( - _Cast.Attributes( - to=AttrDtype(to, name="to"), - ), - _Cast.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) + return _Sin( + _Sin.Attributes( + ), _Sin.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output -def cast_like( - input: Var, - target_type: Var, -) -> Var: +def sinh(input: Var, ) -> Var: r""" - The operator casts the elements of a given input tensor (the first - input) to the same data type as the elements of the second input tensor. - See documentation of the Cast operator for further details. +Calculates the hyperbolic sine of the given input tensor element-wise. - Parameters - ========== - input - Type T1. - Input tensor to be cast. - target_type - Type T2. - The (first) input tensor will be cast to produce a tensor of the same - type as this (second input) tensor. - - Returns - ======= - output : Var - Type T2. - Output tensor produced by casting the first input tensor to have the - same type as the second input tensor. - - Notes - ===== - Signature: ``ai.onnx@15::CastLike``. - - Type constraints: - - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _CastLike( - _CastLike.Attributes(), - _CastLike.Inputs( - input=unwrap_vars(input), - target_type=unwrap_vars(target_type), - ), - ) - .get_output_vars( - input=get_value(input), - target_type=get_value(target_type), - ) - .output - ) +Parameters +========== +input + Type T. + Input tensor +Returns +======= +output : Var + Type T. + The hyperbolic sine values of the input tensor computed element-wise -def ceil( - X: Var, -) -> Var: - r""" - Ceil takes one input data (Tensor) and produces one output data - (Tensor) where the ceil is, y = ceil(x), is applied to the tensor - elementwise. If x is integral, +0, -0, NaN, or infinite, x itself is - returned. - - Parameters - ========== - X - Type T. - Input tensor - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@13::Ceil``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` +Notes +===== +Signature: ``ai.onnx@9::Sinh``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return ( - _Ceil( - _Ceil.Attributes(), - _Ceil.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) + return _Sinh( + _Sinh.Attributes( + ), _Sinh.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output -def celu( - X: Var, - *, - alpha: float = 1.0, -) -> Var: +def size(data: Var, ) -> Var: r""" - Continuously Differentiable Exponential Linear Units: Perform the linear - unit element-wise on the input tensor X using formula: - - :: - - max(0,x) + min(0,alpha*(exp(x/alpha)-1)) - - Parameters - ========== - X - Type T. - Input tensor - alpha - Attribute. - The Alpha value in Celu formula which control the shape of the unit. The - default value is 1.0. - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@12::Celu``. - - Type constraints: - - T: `tensor(float)` - """ - return ( - _Celu( - _Celu.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - ), - _Celu.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) +Takes a tensor as input and outputs a int64 scalar that equals to the +total number of elements of the input tensor. +Parameters +========== +data + Type T. + An input tensor. + +Returns +======= +size : Var + Type T1. + Total number of elements of the input tensor + +Notes +===== +Signature: ``ai.onnx@13::Size``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T1: `tensor(int64)` + """ + return _Size( + _Size.Attributes( + ), _Size.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).size + + +def slice(data: Var, starts: Var, ends: Var, axes: Optional[Var] = None, steps: Optional[Var] = None, ) -> Var: + r""" +Produces a slice of the input tensor along multiple axes. Similar to +numpy: +https://numpy.org/doc/stable/user/basics.indexing.html?highlight=slice#slicing-and-striding + +Slice uses the ``starts``, ``ends``, ``axes`` and ``steps`` inputs to +select a sub-tensor of its input ``data`` tensor. + +An effective ``starts[i]``, ``ends[i]``, and ``steps[i]`` must be +computed for each ``i`` in ``[0, ... r-1]`` where ``r = rank(input)`` as +follows: + +If ``axes`` are omitted, they are set to ``[0, ..., r-1]``. If ``steps`` +are omitted, they are set to ``[1, ..., 1]`` of length ``len(starts)`` + +The effective values are initialized as ``start[i] = 0``, +``ends[i] = dims[i]`` where ``dims`` are the dimensions of ``input`` and +``steps[i] = 1``. + +All negative elements of ``axes`` are made non-negative by adding ``r`` +to them, where ``r =rank(input)``. + +All negative values in ``starts[i]`` and ``ends[i]`` have +``dims[axes[i]]`` added to them, where ``dims`` are the dimensions of +``input``. Then ``start[axes[i]]`` is the adjusted ``starts[i]`` is +clamped into the range ``[0, dims[axes[i]]]`` for positive stepping and +``[0, dims[axes[i]]-1]`` for negative stepping. + +The clamping for the adjusted ``ends[i]`` depends on the sign of +``steps[i]`` and must accommodate copying 0 through ``dims[axes[i]]`` +elements, so for positive stepping ``ends[axes[i]]`` is clamped to +``[0, dims[axes[i]]]``, while for negative stepping it is clamped to +``[-1, dims[axes[i]]-1]``. + +Finally, ``steps[axes[i]] = steps[i]``. + +For slicing to the end of a dimension with unknown size, it is +recommended to pass in ``INT_MAX`` when slicing forward and 'INT_MIN' +when slicing backward. + +Example 1: + +:: + + data = [ + [1, 2, 3, 4], + [5, 6, 7, 8], + ] + axes = [0, 1] + starts = [1, 0] + ends = [2, 3] + steps = [1, 2] + result = [ + [5, 7], + ] + +Example 2: + +:: + + data = [ + [1, 2, 3, 4], + [5, 6, 7, 8], + ] + starts = [0, 1] + ends = [-1, 1000] + result = [ + [2, 3, 4], + ] + +Parameters +========== +data + Type T. + Tensor of data to extract slices from. +starts + Type Tind. + 1-D tensor of starting indices of corresponding axis in ``axes`` +ends + Type Tind. + 1-D tensor of ending indices (exclusive) of corresponding axis in + ``axes`` +axes + Type Tind. + 1-D tensor of axes that ``starts`` and ``ends`` apply to. Negative value + means counting dimensions from the back. Accepted range is [-r, r-1] + where r = rank(data). Behavior is undefined if an axis is repeated. +steps + Type Tind. + 1-D tensor of slice step of corresponding axis in ``axes``. Negative + value means slicing backward. 'steps' cannot be 0. Defaults to 1s. + +Returns +======= +output : Var + Type T. + Sliced data tensor. + +Notes +===== +Signature: ``ai.onnx@13::Slice``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - Tind: `tensor(int32)`, `tensor(int64)` + """ + return _Slice( + _Slice.Attributes( + ), _Slice.Inputs( + data=unwrap_vars(data), starts=unwrap_vars(starts), ends=unwrap_vars(ends), axes=unwrap_vars(axes), steps=unwrap_vars(steps), ), ).get_output_vars( + data=get_value(data), starts=get_value(starts), ends=get_value(ends), axes=get_value(axes), steps=get_value(steps), ).output + + +def softmax(input: Var, *, axis: int = -1, ) -> Var: + r""" +The operator computes the normalized exponential values for the given +input: + +Softmax(input, axis) = Exp(input) / ReduceSum(Exp(input), axis=axis, +keepdims=1) + +The "axis" attribute indicates the dimension along which Softmax will be +performed. The output tensor has the same shape and contains the Softmax +values of the corresponding input. + +Parameters +========== +input + Type T. + The input tensor of rank >= axis. +axis + Attribute. + Describes the dimension Softmax will be performed on. Negative value + means counting dimensions from the back. Accepted range is [-r, r-1] + where r = rank(input). + +Returns +======= +output : Var + Type T. + The output values with the same shape as the input tensor. + +Notes +===== +Signature: ``ai.onnx@13::Softmax``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Softmax( + _Softmax.Attributes( + axis=AttrInt64(axis, name="axis"), + ), _Softmax.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def softmax_cross_entropy_loss(scores: Var, labels: Var, weights: Optional[Var] = None, *, ignore_index: Optional[int] = None, reduction: str = "mean", ) -> tuple[Var, Var]: + r""" +Loss function that measures the softmax cross entropy between 'scores' +and 'labels'. This operator first computes a loss tensor whose shape is +identical to the labels input. If the input is 2-D with shape (N, C), +the loss tensor may be a N-element vector L = (l_1, l_2, ..., l_N). If +the input is N-D tensor with shape (N, C, D1, D2, ..., Dk), the loss +tensor L may have (N, D1, D2, ..., Dk) as its shape and +L[i,][j_1][j_2]...[j_k] denotes a scalar element in L. After L is +available, this operator can optionally do a reduction operator. + +- shape(scores): (N, C) where C is the number of classes, or (N, C, D1, + D2,..., Dk), with K >= 1 in case of K-dimensional loss. +- shape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, + D1, D2,..., Dk), with K >= 1 in case of K-dimensional loss. + +The loss for one sample, l_i, can calculated as follows: + +:: + + l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes. + +or + +:: + + l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if 'weights' is provided. + +loss is zero for the case when label-value equals ignore_index. + +:: + + l[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk] = ignore_index + +where: + +:: + + p = Softmax(scores) + y = Log(p) + c = labels[i][d1][d2]...[dk] + +Finally, L is optionally reduced: + +- If reduction = 'none', the output is L with shape (N, D1, D2, ..., + Dk). +- If reduction = 'sum', the output is scalar: Sum(L). +- If reduction = 'mean', the output is scalar: ReduceMean(L), or if + weight is provided: ``ReduceSum(L) / ReduceSum(W)``, where tensor W + is of shape ``(N, D1, D2, ..., Dk)`` and + ``W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]]``. + +Parameters +========== +scores + Type T. + The predicted outputs with shape [batch_size, class_size], or + [batch_size, class_size, D1, D2 , ..., Dk], where K is the number of + dimensions. +labels + Type Tind. + The ground truth output tensor, with shape [batch_size], or [batch_size, + D1, D2, ..., Dk], where K is the number of dimensions. Labels element + value shall be in range of [0, C). If ignore_index is specified, it may + have a value outside [0, C) and the label values should either be in the + range [0, C) or have the value ignore_index. +weights + Type T. + A manual rescaling weight given to each class. If given, it has to be a + 1D Tensor assigning weight to each of the classes. Otherwise, it is + treated as if having all ones. +ignore_index + Attribute. + Specifies a target value that is ignored and does not contribute to the + input gradient. It's an optional value. +reduction + Attribute. + Type of reduction to apply to loss: none, sum, mean(default). 'none': no + reduction will be applied, 'sum': the output will be summed. 'mean': the + sum of the output will be divided by the number of elements in the + output. + +Returns +======= +output : Var + Type T. + Weighted loss float Tensor. If reduction is 'none', this has the shape + of [batch_size], or [batch_size, D1, D2, ..., Dk] in case of + K-dimensional loss. Otherwise, it is a scalar. +log_prob : Var + Type T. + Log probability tensor. If the output of softmax is prob, its value is + log(prob). + +Notes +===== +Signature: ``ai.onnx@13::SoftmaxCrossEntropyLoss``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + - Tind: `tensor(int32)`, `tensor(int64)` + """ + return _SoftmaxCrossEntropyLoss( + _SoftmaxCrossEntropyLoss.Attributes( + ignore_index=AttrInt64.maybe(ignore_index, name="ignore_index"), + reduction=AttrString(reduction, name="reduction"), + ), _SoftmaxCrossEntropyLoss.Inputs( + scores=unwrap_vars(scores), labels=unwrap_vars(labels), weights=unwrap_vars(weights), ), ).get_output_vars( + scores=get_value(scores), labels=get_value(labels), weights=get_value(weights), )._unpack_to_any() + + +def softplus(X: Var, ) -> Var: + r""" +Softplus takes one input data (Tensor) and produces one output data +(Tensor) where the softplus function, y = ln(exp(x) + 1), is applied +to the tensor elementwise. + +Parameters +========== +X + Type T. + 1D input tensor + +Returns +======= +Y : Var + Type T. + 1D input tensor + +Notes +===== +Signature: ``ai.onnx@1::Softplus``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Softplus( + _Softplus.Attributes( + ), _Softplus.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def softsign(input: Var, ) -> Var: + r""" +Calculates the softsign (x/(1+|x\|)) of the given input tensor +element-wise. + +Parameters +========== +input + Type T. + Input tensor + +Returns +======= +output : Var + Type T. + The softsign (x/(1+|x\|)) values of the input tensor computed + element-wise + +Notes +===== +Signature: ``ai.onnx@1::Softsign``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Softsign( + _Softsign.Attributes( + ), _Softsign.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def space_to_depth(input: Var, *, blocksize: int, ) -> Var: + r""" +SpaceToDepth rearranges blocks of spatial data into depth. More +specifically, this op outputs a copy of the input tensor where values +from the height and width dimensions are moved to the depth dimension. + +Parameters +========== +input + Type T. + Input tensor of [N,C,H,W], where N is the batch axis, C is the channel + or depth, H is the height and W is the width. +blocksize + Attribute. + Blocks of [blocksize, blocksize] are moved. + +Returns +======= +output : Var + Type T. + Output tensor of [N, C \* blocksize \* blocksize, H/blocksize, + W/blocksize]. + +Notes +===== +Signature: ``ai.onnx@13::SpaceToDepth``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _SpaceToDepth( + _SpaceToDepth.Attributes( + blocksize=AttrInt64(blocksize, name="blocksize"), + ), _SpaceToDepth.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def split(input: Var, split: Optional[Var] = None, *, outputs_count: int, axis: int = 0, ) -> Sequence[Var]: + r""" +Split a tensor into a list of tensors, along the specified 'axis'. +Lengths of the parts can be specified using input 'split'. Otherwise, +the tensor is split to equal sized parts. + +Parameters +========== +input + Type T. + The tensor to split +split + Type tensor(int64). + Optional length of each output. Values should be >= 0.Sum of the values + must be equal to the dim value at 'axis' specified. +axis + Attribute. + Which axis to split on. A negative value means counting dimensions from + the back. Accepted range is [-rank, rank-1] where r = rank(input). +outputs_count + Specifies the number of variadic outputs of this operator. + Non-standard parameter created by the opset generator, as inference (a solution) it was not implemented or is impossible. + +Returns +======= +outputs : Sequence[Var] + Type T. + One or more outputs forming list of tensors after splitting + +Notes +===== +Signature: ``ai.onnx@13::Split``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Split( + _Split.Attributes( + axis=AttrInt64(axis, name="axis"), + ), _Split.Inputs( + input=unwrap_vars(input), split=unwrap_vars(split), ), out_variadic=outputs_count, ).get_output_vars( + input=get_value(input), split=get_value(split), ).outputs + + +def split_to_sequence(input: Var, split: Optional[Var] = None, *, axis: int = 0, keepdims: int = 1, ) -> Var: + r""" +Split a tensor into a sequence of tensors, along the specified 'axis'. +Lengths of the parts can be specified using the optional argument +'split'. If the argument +``split' is not specified, a default scalar value of 1 is used as the value of``\ split'. +'split' must contain only positive numbers. 'split' is either a scalar +(tensor of empty shape), or a 1-D tensor. If 'split' is a scalar, then +'input' will be split into chunks all of size 'split' if possible. The +last chunk alone may be smaller than 'split' if the 'input' size along +the given axis 'axis' is not divisible by 'split'. If 'split' is a +1-dimensional tensor, the input tensor is split into 'size(split)' +chunks, with lengths of the parts on 'axis' specified in 'split'. In +this scenario, the sum of entries in 'split' must be equal to the +dimension size of input tensor on 'axis'. + +Parameters +========== +input + Type T. + The tensor to split +split + Type I. + Length of each output. It can be either a scalar(tensor of empty shape), + or a 1-D tensor. All values must be >= 0. +axis + Attribute. + Which axis to split on. A negative value means counting dimensions from + the back. Accepted range is [-rank, rank-1]. +keepdims + Attribute. + Keep the split dimension or not. Default 1, which means we keep split + dimension. If input 'split' is specified, this attribute is ignored. + +Returns +======= +output_sequence : Var + Type S. + One or more outputs forming a sequence of tensors after splitting + +Notes +===== +Signature: ``ai.onnx@11::SplitToSequence``. + +Type constraints: + - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - I: `tensor(int32)`, `tensor(int64)` + - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` + """ + return _SplitToSequence( + _SplitToSequence.Attributes( + axis=AttrInt64(axis, name="axis"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), _SplitToSequence.Inputs( + input=unwrap_vars(input), split=unwrap_vars(split), ), ).get_output_vars( + input=get_value(input), split=get_value(split), ).output_sequence + + +def sqrt(X: Var, ) -> Var: + r""" +Square root takes one input data (Tensor) and produces one output +data (Tensor) where the square root is, y = x^0.5, is applied to the +tensor elementwise. If x is negative, then it will return NaN. + +Parameters +========== +X + Type T. + Input tensor + +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@13::Sqrt``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Sqrt( + _Sqrt.Attributes( + ), _Sqrt.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def squeeze(data: Var, axes: Optional[Var] = None, ) -> Var: + r""" +Remove single-dimensional entries from the shape of a tensor. Takes an +input ``axes`` with a list of axes to squeeze. If ``axes`` is not +provided, all the single dimensions will be removed from the shape. If +an axis is selected with shape entry not equal to one, an error is +raised. + +Parameters +========== +data + Type T. + Tensors with at least max(dims) dimensions. +axes + Type tensor(int64). + List of integers indicating the dimensions to squeeze. Negative value + means counting dimensions from the back. Accepted range is [-r, r-1] + where r = rank(data). + +Returns +======= +squeezed : Var + Type T. + Reshaped tensor with same data as input. + +Notes +===== +Signature: ``ai.onnx@13::Squeeze``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Squeeze( + _Squeeze.Attributes( + ), _Squeeze.Inputs( + data=unwrap_vars(data), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), axes=get_value(axes), ).squeezed + + +def string_normalizer(X: Var, *, case_change_action: str = "NONE", is_case_sensitive: int = 0, locale: Optional[str] = None, stopwords: Optional[Iterable[str]] = None, ) -> Var: + r""" +StringNormalization performs string operations for basic cleaning. This +operator has only one input (denoted by X) and only one output (denoted +by Y). This operator first examines the elements in the X, and removes +elements specified in "stopwords" attribute. After removing stop words, +the intermediate result can be further lowercased, uppercased, or just +returned depending the "case_change_action" attribute. This operator +only accepts [C]- and [1, C]-tensor. If all elements in X are dropped, +the output will be the empty value of string tensor with shape [1] if +input shape is [C] and shape [1, 1] if input shape is [1, C]. + +Parameters +========== +X + Type tensor(string). + UTF-8 strings to normalize +case_change_action + Attribute. + string enum that cases output to be lowercased/uppercases/unchanged. + Valid values are "LOWER", "UPPER", "NONE". Default is "NONE" +is_case_sensitive + Attribute. + Boolean. Whether the identification of stop words in X is + case-sensitive. Default is false +locale + Attribute. + Environment dependent string that denotes the locale according to which + output strings needs to be upper/lowercased.Default en_US or platform + specific equivalent as decided by the implementation. +stopwords + Attribute. + List of stop words. If not set, no word would be removed from X. + +Returns +======= +Y : Var + Type tensor(string). + UTF-8 Normalized strings + +Notes +===== +Signature: ``ai.onnx@10::StringNormalizer``. + + """ + return _StringNormalizer( + _StringNormalizer.Attributes( + case_change_action=AttrString(case_change_action, name="case_change_action"), + is_case_sensitive=AttrInt64(is_case_sensitive, name="is_case_sensitive"), + locale=AttrString.maybe(locale, name="locale"), + stopwords=AttrStrings.maybe(stopwords, name="stopwords"), + ), _StringNormalizer.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def sub(A: Var, B: Var, ) -> Var: + r""" +Performs element-wise binary subtraction (with Numpy-style broadcasting +support). + +This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +(Opset 14 change): Extend supported types to include uint8, int8, +uint16, and int16. + +Parameters +========== +A + Type T. + First operand. +B + Type T. + Second operand. + +Returns +======= +C : Var + Type T. + Result, has same element type as two inputs + +Notes +===== +Signature: ``ai.onnx@14::Sub``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Sub( + _Sub.Attributes( + ), _Sub.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), ), ).get_output_vars( + A=get_value(A), B=get_value(B), ).C + + +def sum(data_0: Sequence[Var], ) -> Var: + r""" +Element-wise sum of each of the input tensors (with Numpy-style +broadcasting support). All inputs and outputs must have the same data +type. This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +Parameters +========== +data_0 + Type T. + List of tensors for sum. + +Returns +======= +sum : Var + Type T. + Output tensor. + +Notes +===== +Signature: ``ai.onnx@13::Sum``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Sum( + _Sum.Attributes( + ), _Sum.Inputs( + data_0=unwrap_vars(data_0), ), ).get_output_vars( + data_0=get_value(data_0), ).sum + + +def tan(input: Var, ) -> Var: + r""" +Calculates the tangent of the given input tensor, element-wise. + +Parameters +========== +input + Type T. + Input tensor + +Returns +======= +output : Var + Type T. + The tangent of the input tensor computed element-wise + +Notes +===== +Signature: ``ai.onnx@7::Tan``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Tan( + _Tan.Attributes( + ), _Tan.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def tanh(input: Var, ) -> Var: + r""" +Calculates the hyperbolic tangent of the given input tensor +element-wise. + +Parameters +========== +input + Type T. + Input tensor + +Returns +======= +output : Var + Type T. + The hyperbolic tangent values of the input tensor computed element-wise + +Notes +===== +Signature: ``ai.onnx@13::Tanh``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Tanh( + _Tanh.Attributes( + ), _Tanh.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def tf_idf_vectorizer(X: Var, *, max_gram_length: int, max_skip_count: int, min_gram_length: int, mode: str, ngram_counts: Iterable[int], ngram_indexes: Iterable[int], pool_int64s: Optional[Iterable[int]] = None, pool_strings: Optional[Iterable[str]] = None, weights: Optional[Iterable[float]] = None, ) -> Var: + r""" +This transform extracts n-grams from the input sequence and save them as +a vector. Input can be either a 1-D or 2-D tensor. For 1-D input, output +is the n-gram representation of that input. For 2-D input, the output is +also a 2-D tensor whose i-th row is the n-gram representation of the +i-th input row. More specifically, if input shape is [C], the +corresponding output shape would be [max(ngram_indexes) + 1]. If input +shape is [N, C], this operator produces a [N, max(ngram_indexes) + +1]-tensor. + +In contrast to standard n-gram extraction, here, the indexes of +extracting an n-gram from the original sequence are not necessarily +consecutive numbers. The discontinuity between indexes are controlled by +the number of skips. If the number of skips is 2, we should skip two +tokens when scanning through the original sequence. Let's consider an +example. Assume that input sequence is [94, 17, 36, 12, 28] and the +number of skips is 2. The associated 2-grams are [94, 12] and [17, 28] +respectively indexed by [0, 3] and [1, 4]. If the number of skips +becomes 0, the 2-grams generated are [94, 17], [17, 36], [36, 12], [12, +28] indexed by [0, 1], [1, 2], [2, 3], [3, 4], respectively. + +The output vector (denoted by Y) stores the count of each n-gram; +Y[ngram_indexes[i]] indicates the times that the i-th n-gram is found. +The attribute ngram_indexes is used to determine the mapping between +index i and the corresponding n-gram's output coordinate. If pool_int64s +is [94, 17, 17, 36], ngram_indexes is [1, 0], ngram_counts=[0, 0], then +the Y[0] (first element in Y) and Y[1] (second element in Y) are the +counts of [17, 36] and [94, 17], respectively. An n-gram which cannot be +found in pool_strings/pool_int64s should be ignored and has no effect on +the output. Note that we may consider all skips up to S when generating +the n-grams. + +The examples used above are true if mode is "TF". If mode is "IDF", all +the counts larger than 1 would be truncated to 1 and the i-th element in +weights would be used to scale (by multiplication) the count of the i-th +n-gram in pool. If mode is "TFIDF", this operator first computes the +counts of all n-grams and then scale them by the associated values in +the weights attribute. + +Only one of pool_strings and pool_int64s can be set. If pool_int64s is +set, the input should be an integer tensor. If pool_strings is set, the +input must be a string tensor. + +Parameters +========== +X + Type T. + Input for n-gram extraction +max_gram_length + Attribute. + Maximum n-gram length. If this value is 3, 3-grams will be used to + generate the output. +max_skip_count + Attribute. + Maximum number of items (integers/strings) to be skipped when + constructing an n-gram from X. If max_skip_count=1, min_gram_length=2, + max_gram_length=3, this operator may generate 2-grams with skip_count=0 + and skip_count=1, and 3-grams with skip_count=0 and skip_count=1 +min_gram_length + Attribute. + Minimum n-gram length. If this value is 2 and max_gram_length is 3, + output may contain counts of 2-grams and 3-grams. +mode + Attribute. + The weighting criteria. It can be one of "TF" (term frequency), "IDF" + (inverse document frequency), and "TFIDF" (the combination of TF and + IDF) +ngram_counts + Attribute. + The starting indexes of 1-grams, 2-grams, and so on in pool. It is + useful when determining the boundary between two consecutive collections + of n-grams. For example, if ngram_counts is [0, 17, 36], the first index + (zero-based) of 1-gram/2-gram/3-gram in pool are 0/17/36. This format is + essentially identical to CSR (or CSC) sparse matrix format, and we + choose to use this due to its popularity. +ngram_indexes + Attribute. + list of int64s (type: AttributeProto::INTS). This list is parallel to + the specified 'pool\_\*' attribute. The i-th element in ngram_indexes + indicate the coordinate of the i-th n-gram in the output tensor. +pool_int64s + Attribute. + List of int64 n-grams learned from the training set. Either this or + pool_strings attributes must be present but not both. It's an 1-D tensor + starting with the collections of all 1-grams and ending with the + collections of n-grams. The i-th element in pool stores the n-gram that + should be mapped to coordinate ngram_indexes[i] in the output vector. +pool_strings + Attribute. + List of strings n-grams learned from the training set. Either this or + pool_int64s attributes must be present but not both. It's an 1-D tensor + starting with the collections of all 1-grams and ending with the + collections of n-grams. The i-th element in pool stores the n-gram that + should be mapped to coordinate ngram_indexes[i] in the output vector. +weights + Attribute. + list of floats. This attribute stores the weight of each n-gram in pool. + The i-th element in weights is the weight of the i-th n-gram in pool. + Its length equals to the size of ngram_indexes. By default, weights is + an all-one tensor.This attribute is used when mode is "IDF" or "TFIDF" + to scale the associated word counts. + +Returns +======= +Y : Var + Type T1. + Ngram results + +Notes +===== +Signature: ``ai.onnx@9::TfIdfVectorizer``. + +Type constraints: + - T: `tensor(int32)`, `tensor(int64)`, `tensor(string)` + - T1: `tensor(float)` + """ + return _TfIdfVectorizer( + _TfIdfVectorizer.Attributes( + max_gram_length=AttrInt64(max_gram_length, name="max_gram_length"), + max_skip_count=AttrInt64(max_skip_count, name="max_skip_count"), + min_gram_length=AttrInt64(min_gram_length, name="min_gram_length"), + mode=AttrString(mode, name="mode"), + ngram_counts=AttrInt64s(ngram_counts, name="ngram_counts"), + ngram_indexes=AttrInt64s(ngram_indexes, name="ngram_indexes"), + pool_int64s=AttrInt64s.maybe(pool_int64s, name="pool_int64s"), + pool_strings=AttrStrings.maybe(pool_strings, name="pool_strings"), + weights=AttrFloat32s.maybe(weights, name="weights"), + ), _TfIdfVectorizer.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def thresholded_relu(X: Var, *, alpha: float = 1.0, ) -> Var: + r""" +ThresholdedRelu takes one input data (Tensor) and produces one output +data (Tensor) where the rectified linear function, y = x for x > +alpha, y = 0 otherwise, is applied to the tensor elementwise. + +Parameters +========== +X + Type T. + Input tensor +alpha + Attribute. + Threshold value + +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@10::ThresholdedRelu``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _ThresholdedRelu( + _ThresholdedRelu.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + ), _ThresholdedRelu.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def tile(input: Var, repeats: Var, ) -> Var: + r""" +Constructs a tensor by tiling a given tensor. This is the same as +function ``tile`` in Numpy, but no broadcast. For example A = [[1, 2], +[3, 4]], B = [1, 2], tile(A, B) = [[1, 2, 1, 2], [3, 4, 3, 4]] + +Parameters +========== +input + Type T. + Input tensor of any shape. +repeats + Type T1. + 1D int64 tensor of the same length as input's dimension number, includes + numbers of repeated copies along input's dimensions. + +Returns +======= +output : Var + Type T. + Output tensor of the same dimensions and type as tensor input. + output_dim[i] = input_dim[i] \* repeats[i] + +Notes +===== +Signature: ``ai.onnx@13::Tile``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T1: `tensor(int64)` + """ + return _Tile( + _Tile.Attributes( + ), _Tile.Inputs( + input=unwrap_vars(input), repeats=unwrap_vars(repeats), ), ).get_output_vars( + input=get_value(input), repeats=get_value(repeats), ).output + + +def top_k(X: Var, K: Var, *, axis: int = -1, largest: int = 1, sorted: int = 1, ) -> tuple[Var, Var]: + r""" +Retrieve the top-K largest or smallest elements along a specified axis. +Given an input tensor of shape [a_0, a_1, ..., a\_{n-1}] and integer +argument k, return two outputs: + +- Value tensor of shape [a_0, a_1, ..., a\_{axis-1}, k, a\_{axis+1}, + ... a\_{n-1}] which contains the values of the top k elements along + the specified axis + +- Index tensor of shape [a_0, a_1, ..., a\_{axis-1}, k, a\_{axis+1}, + ... a\_{n-1}] which contains the indices of the top k elements + (original indices from the input tensor). + +- If "largest" is 1 (the default value) then the k largest elements are + returned. + +- If "sorted" is 1 (the default value) then the resulting k elements + will be sorted. + +- If "sorted" is 0, order of returned 'Values' and 'Indices' are + undefined. + +Given two equivalent values, this operator uses the indices along the +axis as a tiebreaker. That is, the element with the lower index will +appear first. + +Parameters +========== +X + Type T. + Tensor of shape [a_0, a_1, ..., a\_{n-1}] +K + Type tensor(int64). + A 1-D tensor containing a single positive value corresponding to the + number of top elements to retrieve +axis + Attribute. + Dimension on which to do the sort. Negative value means counting + dimensions from the back. Accepted range is [-r, r-1] where r = + rank(input). +largest + Attribute. + Whether to return the top-K largest or smallest elements. +sorted + Attribute. + Whether to return the elements in sorted order. + +Returns +======= +Values : Var + Type T. + Tensor of shape [a_0, a_1, ..., a\_{axis-1}, k, a\_{axis+1}, ... + a\_{n-1}] containing top K values from the input tensor +Indices : Var + Type I. + Tensor of shape [a_0, a_1, ..., a\_{axis-1}, k, a\_{axis+1}, ... + a\_{n-1}] containing the corresponding input tensor indices for the top + K values. + +Notes +===== +Signature: ``ai.onnx@11::TopK``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - I: `tensor(int64)` + """ + return _TopK( + _TopK.Attributes( + axis=AttrInt64(axis, name="axis"), + largest=AttrInt64(largest, name="largest"), + sorted=AttrInt64(sorted, name="sorted"), + ), _TopK.Inputs( + X=unwrap_vars(X), K=unwrap_vars(K), ), ).get_output_vars( + X=get_value(X), K=get_value(K), )._unpack_to_any() + + +def transpose(data: Var, *, perm: Optional[Iterable[int]] = None, ) -> Var: + r""" +Transpose the input tensor similar to numpy.transpose. For example, when +perm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output +shape will be (2, 1, 3). + +Parameters +========== +data + Type T. + An input tensor. +perm + Attribute. + A list of integers. By default, reverse the dimensions, otherwise + permute the axes according to the values given. + +Returns +======= +transposed : Var + Type T. + Transposed output. + +Notes +===== +Signature: ``ai.onnx@13::Transpose``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Transpose( + _Transpose.Attributes( + perm=AttrInt64s.maybe(perm, name="perm"), + ), _Transpose.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).transposed + + +def trilu(input: Var, k: Optional[Var] = None, *, upper: int = 1, ) -> Var: + r""" +Given a 2-D matrix or batches of 2-D matrices, returns the upper or +lower triangular part of the tensor(s). The attribute "upper" determines +whether the upper or lower part is retained. If set to true, the upper +triangular matrix is retained. Lower triangular matrix is retained +otherwise. Default value for the "upper" attribute is true. Trilu takes +one input tensor of shape [\*, N, M], where \* is zero or more batch +dimensions. The upper triangular part consists of the elements on and +above the given diagonal (k). The lower triangular part consists of +elements on and below the diagonal. All other elements in the matrix are +set to zero. If k = 0, the triangular part on and above/below the main +diagonal is retained. If upper is set to true, a positive k retains the +upper triangular matrix excluding the main diagonal and (k-1) diagonals +above it. A negative k value retains the main diagonal and \|k\| +diagonals below it. If upper is set to false, a positive k retains the +lower triangular matrix including the main diagonal and k diagonals +above it. A negative k value excludes the main diagonal and (\|k\|-1) +diagonals below it. + +Parameters +========== +input + Type T. + Input tensor of rank 2 or higher. +k + Type tensor(int64). + A 0-D tensor containing a single value corresponding to the number + diagonals above or below the main diagonal to exclude or include. + Default value is 0 if it's not specified. +upper + Attribute. + Boolean. Indicates whether upper or lower part of matrix is retained. + Default is true. + +Returns +======= +output : Var + Type T. + Output tensor of the same type and shape as the input tensor. + +Notes +===== +Signature: ``ai.onnx@14::Trilu``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Trilu( + _Trilu.Attributes( + upper=AttrInt64(upper, name="upper"), + ), _Trilu.Inputs( + input=unwrap_vars(input), k=unwrap_vars(k), ), ).get_output_vars( + input=get_value(input), k=get_value(k), ).output + + +def unique(X: Var, *, axis: Optional[int] = None, sorted: int = 1, ) -> tuple[Var, Var, Var, Var]: + r""" +Find the unique elements of a tensor. When an optional attribute 'axis' +is provided, unique subtensors sliced along the 'axis' are returned. +Otherwise the input tensor is flattened and unique values of the +flattened tensor are returned. -def clip( - input: Var, - min: Optional[Var] = None, - max: Optional[Var] = None, -) -> Var: - r""" - Clip operator limits the given input within an interval. The interval is - specified by the inputs 'min' and 'max'. They default to - numeric_limits::lowest() and numeric_limits::max(), respectively. - - Parameters - ========== - input - Type T. - Input tensor whose elements to be clipped - min - Type T. - Minimum value, under which element is replaced by min. It must be a - scalar(tensor of empty shape). - max - Type T. - Maximum value, above which element is replaced by max. It must be a - scalar(tensor of empty shape). - - Returns - ======= - output : Var - Type T. - Output tensor with clipped input elements - - Notes - ===== - Signature: ``ai.onnx@13::Clip``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Clip( - _Clip.Attributes(), - _Clip.Inputs( - input=unwrap_vars(input), - min=unwrap_vars(min), - max=unwrap_vars(max), - ), - ) - .get_output_vars( - input=get_value(input), - min=get_value(min), - max=get_value(max), - ) - .output - ) - - -def compress( - input: Var, - condition: Var, - *, - axis: Optional[int] = None, -) -> Var: - r""" - Selects slices from an input tensor along a given axis where condition - evaluates to True for each axis index. In case axis is not provided, - input is flattened before elements are selected. Compress behaves like - numpy.compress: - https://docs.scipy.org/doc/numpy/reference/generated/numpy.compress.html - - Parameters - ========== - input - Type T. - Tensor of rank r >= 1. - condition - Type T1. - Rank 1 tensor of booleans to indicate which slices or data elements to - be selected. Its length can be less than the input length along the axis - or the flattened input size if axis is not specified. In such cases data - slices or elements exceeding the condition length are discarded. - axis - Attribute. - (Optional) Axis along which to take slices. If not specified, input is - flattened before elements being selected. Negative value means counting - dimensions from the back. Accepted range is [-r, r-1] where r = - rank(input). - - Returns - ======= - output : Var - Type T. - Tensor of rank r if axis is specified. Otherwise output is a Tensor of - rank 1. - - Notes - ===== - Signature: ``ai.onnx@11::Compress``. - - Type constraints: - - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T1: `tensor(bool)` - """ - return ( - _Compress( - _Compress.Attributes( - axis=AttrInt64.maybe(axis, name="axis"), - ), - _Compress.Inputs( - input=unwrap_vars(input), - condition=unwrap_vars(condition), - ), - ) - .get_output_vars( - input=get_value(input), - condition=get_value(condition), - ) - .output - ) - - -def concat( - inputs: Sequence[Var], - *, - axis: int, -) -> Var: - r""" - Concatenate a list of tensors into a single tensor. All input tensors - must have the same shape, except for the dimension size of the axis to - concatenate on. - - Parameters - ========== - inputs - Type T. - List of tensors for concatenation - axis - Attribute. - Which axis to concat on. A negative value means counting dimensions from - the back. Accepted range is [-r, r-1] where r = rank(inputs).. - - Returns - ======= - concat_result : Var - Type T. - Concatenated tensor - - Notes - ===== - Signature: ``ai.onnx@13::Concat``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Concat( - _Concat.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Concat.Inputs( - inputs=unwrap_vars(inputs), - ), - ) - .get_output_vars( - inputs=get_value(inputs), - ) - .concat_result - ) - - -def concat_from_sequence( - input_sequence: Var, - *, - axis: int, - new_axis: int = 0, -) -> Var: - r""" - Concatenate a sequence of tensors into a single tensor. All input - tensors must have the same shape, except for the dimension size of the - axis to concatenate on. By default 'new_axis' is 0, the behavior is - similar to numpy.concatenate. When 'new_axis' is 1, the behavior is - similar to numpy.stack. - - Parameters - ========== - input_sequence - Type S. - Sequence of tensors for concatenation - axis - Attribute. - Which axis to concat on. Accepted range in ``[-r, r - 1]``, where ``r`` - is the rank of input tensors. When ``new_axis`` is 1, accepted range is - ``[-r - 1, r]``. - new_axis - Attribute. - Insert and concatenate on a new axis or not, default 0 means do not - insert new axis. - - Returns - ======= - concat_result : Var - Type T. - Concatenated tensor - - Notes - ===== - Signature: ``ai.onnx@11::ConcatFromSequence``. - - Type constraints: - - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` - - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _ConcatFromSequence( - _ConcatFromSequence.Attributes( - axis=AttrInt64(axis, name="axis"), - new_axis=AttrInt64(new_axis, name="new_axis"), - ), - _ConcatFromSequence.Inputs( - input_sequence=unwrap_vars(input_sequence), - ), - ) - .get_output_vars( - input_sequence=get_value(input_sequence), - ) - .concat_result - ) - - -def constant( - *, - value: Optional[np.ndarray] = None, - value_float: Optional[float] = None, - value_floats: Optional[Iterable[float]] = None, - value_int: Optional[int] = None, - value_ints: Optional[Iterable[int]] = None, - value_string: Optional[str] = None, - value_strings: Optional[Iterable[str]] = None, -) -> Var: - r""" - This operator produces a constant tensor. Exactly one of the provided - attributes, either value, sparse_value, or value\_\* must be specified. - - Parameters - ========== - sparse_value - Attribute. - The value for the elements of the output tensor in sparse format. - value - Attribute. - The value for the elements of the output tensor. - value_float - Attribute. - The value for the sole element for the scalar, float32, output tensor. - value_floats - Attribute. - The values for the elements for the 1D, float32, output tensor. - value_int - Attribute. - The value for the sole element for the scalar, int64, output tensor. - value_ints - Attribute. - The values for the elements for the 1D, int64, output tensor. - value_string - Attribute. - The value for the sole element for the scalar, UTF-8 string, output - tensor. - value_strings - Attribute. - The values for the elements for the 1D, UTF-8 string, output tensor. - - Returns - ======= - output : Var - Type T. - Output tensor containing the same value of the provided tensor. - - Notes - ===== - Signature: ``ai.onnx@13::Constant``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Constant( - _Constant.Attributes( - value=AttrTensor.maybe(value, name="value"), - value_float=AttrFloat32.maybe(value_float, name="value_float"), - value_floats=AttrFloat32s.maybe(value_floats, name="value_floats"), - value_int=AttrInt64.maybe(value_int, name="value_int"), - value_ints=AttrInt64s.maybe(value_ints, name="value_ints"), - value_string=AttrString.maybe(value_string, name="value_string"), - value_strings=AttrStrings.maybe(value_strings, name="value_strings"), - ), - _Constant.Inputs(), - ) - .get_output_vars() - .output - ) - - -def constant_of_shape( - input: Var, - *, - value: Optional[np.ndarray] = None, -) -> Var: - r""" - Generate a tensor with given value and shape. - - Parameters - ========== - input - Type T1. - 1D tensor. The shape of the expected output tensor. If empty tensor is - given, the output would be a scalar. All values must be >= 0. - value - Attribute. - (Optional) The value of the output elements.Should be a one-element - tensor. If not specified, it defaults to a tensor of value 0 and - datatype float32 - - Returns - ======= - output : Var - Type T2. - Output tensor of shape specified by 'input'.If attribute 'value' is - specified, the value and datatype of the output tensor is taken from - 'value'.If attribute 'value' is not specified, the value in the output - defaults to 0, and the datatype defaults to float32. - - Notes - ===== - Signature: ``ai.onnx@9::ConstantOfShape``. - - Type constraints: - - T1: `tensor(int64)` - - T2: `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _ConstantOfShape( - _ConstantOfShape.Attributes( - value=AttrTensor.maybe(value, name="value"), - ), - _ConstantOfShape.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def conv( - X: Var, - W: Var, - B: Optional[Var] = None, - *, - auto_pad: str = "NOTSET", - dilations: Optional[Iterable[int]] = None, - group: int = 1, - kernel_shape: Optional[Iterable[int]] = None, - pads: Optional[Iterable[int]] = None, - strides: Optional[Iterable[int]] = None, -) -> Var: - r""" - The convolution operator consumes an input tensor and a filter, and - computes the output. - - Parameters - ========== - X - Type T. - Input data tensor from previous layer; has size (N x C x H x W), where N - is the batch size, C is the number of channels, and H and W are the - height and width. Note that this is for the 2D image. Otherwise the size - is (N x C x D1 x D2 ... x Dn). Optionally, if dimension denotation is in - effect, the operation expects input data tensor to arrive with the - dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, - DATA_FEATURE ...]. - W - Type T. - The weight tensor that will be used in the convolutions; has size (M x - C/group x kH x kW), where C is the number of channels, and kH and kW are - the height and width of the kernel, and M is the number of feature maps. - For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x - k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. - Optionally, if dimension denotation is in effect, the operation expects - the weight tensor to arrive with the dimension denotation of - [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL - ...]. Assuming zero based indices for the shape array, X.shape[1] == - (W.shape[1] \* group) == C and W.shape[0] mod G == 0. Or in other words - FILTER_IN_CHANNEL multiplied by the number of groups should be equal to - DATA_CHANNEL and the number of feature maps M should be a multiple of - the number of groups G. - B - Type T. - Optional 1D bias to be added to the convolution, has size of M. - auto_pad - Attribute. - auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where - default value is NOTSET, which means explicit padding is used. - SAME_UPPER or SAME_LOWER mean pad the input so that - ``output_shape[i] = ceil(input_shape[i] / strides[i])`` for each axis - ``i``. The padding is split between the two sides equally or almost - equally (depending on whether it is even or odd). In case the padding is - an odd number, the extra padding is added at the end for SAME_UPPER and - at the beginning for SAME_LOWER. - dilations - Attribute. - dilation value along each spatial axis of the filter. If not present, - the dilation defaults is 1 along each spatial axis. - group - Attribute. - number of groups input channels and output channels are divided into. - kernel_shape - Attribute. - The shape of the convolution kernel. If not present, should be inferred - from input W. - pads - Attribute. - Padding for the beginning and ending along each spatial axis, it can - take any value greater than or equal to 0. The value represent the - number of pixels added to the beginning and end part of the - corresponding axis. ``pads`` format should be as follow [x1_begin, - x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels - added at the beginning of axis ``i`` and xi_end, the number of pixels - added at the end of axis ``i``. This attribute cannot be used - simultaneously with auto_pad attribute. If not present, the padding - defaults to 0 along start and end of each spatial axis. - strides - Attribute. - Stride along each spatial axis. If not present, the stride defaults is 1 - along each spatial axis. - - Returns - ======= - Y : Var - Type T. - Output data tensor that contains the result of the convolution. The - output dimensions are functions of the kernel size, stride size, and pad - lengths. - - Notes - ===== - Signature: ``ai.onnx@11::Conv``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Conv( - _Conv.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - group=AttrInt64(group, name="group"), - kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _Conv.Inputs( - X=unwrap_vars(X), - W=unwrap_vars(W), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - X=get_value(X), - W=get_value(W), - B=get_value(B), - ) - .Y - ) - - -def conv_integer( - x: Var, - w: Var, - x_zero_point: Optional[Var] = None, - w_zero_point: Optional[Var] = None, - *, - auto_pad: str = "NOTSET", - dilations: Optional[Iterable[int]] = None, - group: int = 1, - kernel_shape: Optional[Iterable[int]] = None, - pads: Optional[Iterable[int]] = None, - strides: Optional[Iterable[int]] = None, -) -> Var: - r""" - The integer convolution operator consumes an input tensor, its - zero-point, a filter, and its zero-point, and computes the output. The - production MUST never overflow. The accumulation may overflow if and - only if in 32 bits. - - Parameters - ========== - x - Type T1. - Input data tensor from previous layer; has size (N x C x H x W), where N - is the batch size, C is the number of channels, and H and W are the - height and width. Note that this is for the 2D image. Otherwise the size - is (N x C x D1 x D2 ... x Dn). Optionally, if dimension denotation is in - effect, the operation expects input data tensor to arrive with the - dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, - DATA_FEATURE ...]. - w - Type T2. - The weight tensor that will be used in the convolutions; has size (M x - C/group x kH x kW), where C is the number of channels, and kH and kW are - the height and width of the kernel, and M is the number of feature maps. - For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x - k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. - Optionally, if dimension denotation is in effect, the operation expects - the weight tensor to arrive with the dimension denotation of - [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL - ...]. X.shape[1] == (W.shape[1] \* group) == C (assuming zero based - indices for the shape array). Or in other words FILTER_IN_CHANNEL should - be equal to DATA_CHANNEL. - x_zero_point - Type T1. - Zero point tensor for input 'x'. It's optional and default value is 0. - It's a scalar, which means a per-tensor/layer quantization. - w_zero_point - Type T2. - Zero point tensor for input 'w'. It's optional and default value is 0. - It could be a scalar or a 1-D tensor, which means a per-tensor/layer or - per output channel quantization. If it's a 1-D tensor, its number of - elements should be equal to the number of output channels (M) - auto_pad - Attribute. - auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where - default value is NOTSET, which means explicit padding is used. - SAME_UPPER or SAME_LOWER mean pad the input so that - ``output_shape[i] = ceil(input_shape[i] / strides[i])`` for each axis - ``i``. The padding is split between the two sides equally or almost - equally (depending on whether it is even or odd). In case the padding is - an odd number, the extra padding is added at the end for SAME_UPPER and - at the beginning for SAME_LOWER. - dilations - Attribute. - dilation value along each spatial axis of the filter. If not present, - the dilation defaults to 1 along each axis. - group - Attribute. - number of groups input channels and output channels are divided into. - default is 1. - kernel_shape - Attribute. - The shape of the convolution kernel. If not present, should be inferred - from input 'w'. - pads - Attribute. - Padding for the beginning and ending along each spatial axis, it can - take any value greater than or equal to 0.The value represent the number - of pixels added to the beginning and end part of the corresponding - axis.\ ``pads`` format should be as follow [x1_begin, x2_begin...x1_end, - x2_end,...], where xi_begin the number ofpixels added at the beginning - of axis ``i`` and xi_end, the number of pixels added at the end of axis - ``i``.This attribute cannot be used simultaneously with auto_pad - attribute. If not present, the padding defaultsto 0 along start and end - of each spatial axis. - strides - Attribute. - Stride along each spatial axis. If not present, the stride defaults to 1 - along each axis. - - Returns - ======= - y : Var - Type T3. - Output data tensor that contains the result of the convolution. The - output dimensions are functions of the kernel size, stride size, and pad - lengths. - - Notes - ===== - Signature: ``ai.onnx@10::ConvInteger``. - - Type constraints: - - T1: `tensor(int8)`, `tensor(uint8)` - - T2: `tensor(int8)`, `tensor(uint8)` - - T3: `tensor(int32)` - """ - return ( - _ConvInteger( - _ConvInteger.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - group=AttrInt64(group, name="group"), - kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _ConvInteger.Inputs( - x=unwrap_vars(x), - w=unwrap_vars(w), - x_zero_point=unwrap_vars(x_zero_point), - w_zero_point=unwrap_vars(w_zero_point), - ), - ) - .get_output_vars( - x=get_value(x), - w=get_value(w), - x_zero_point=get_value(x_zero_point), - w_zero_point=get_value(w_zero_point), - ) - .y - ) - - -def conv_transpose( - X: Var, - W: Var, - B: Optional[Var] = None, - *, - auto_pad: str = "NOTSET", - dilations: Optional[Iterable[int]] = None, - group: int = 1, - kernel_shape: Optional[Iterable[int]] = None, - output_padding: Optional[Iterable[int]] = None, - output_shape: Optional[Iterable[int]] = None, - pads: Optional[Iterable[int]] = None, - strides: Optional[Iterable[int]] = None, -) -> Var: - r""" - The convolution transpose operator consumes an input tensor and a - filter, and computes the output. - - If the pads parameter is provided the shape of the output is calculated - via the following equation: - - output_shape[i] = stride[i] \* (input_size[i] - 1) + output_padding[i] + - ((kernel_shape[i] - 1) \* dilations[i] + 1) - pads[start_i] - - pads[end_i] - - output_shape can also be explicitly specified in which case pads values - are auto generated using these equations: - - total_padding[i] = stride[i] \* (input_size[i] - 1) + output_padding[i] - + ((kernel_shape[i] - 1) \* dilations[i] + 1) - output_shape[i] If - (auto_pads == SAME_UPPER): pads[start_i] = total_padding[i]/2; - pads[end_i] = total_padding[i] - (total_padding[i]/2) Else: - pads[start_i] = total_padding[i] - (total_padding[i]/2); pads[end_i] = - (total_padding[i]/2). - - Parameters - ========== - X - Type T. - Input data tensor from previous layer; has size (N x C x H x W), where N - is the batch size, C is the number of channels, and H and W are the - height and width. Note that this is for the 2D image. Otherwise the size - is (N x C x D1 x D2 ... x Dn) - W - Type T. - The weight tensor that will be used in the convolutions; has size (C x - M/group x kH x kW), where C is the number of channels, and kH and kW are - the height and width of the kernel, and M is the number of feature maps. - For more than 2 dimensions, the weight shape will be (C x M/group x k1 x - k2 x ... x kn), where (k1 x k2 x ... x kn) is the dimension of the - kernel. The number of channels in the output should be equal to - W.shape[1] \* group (assuming zero based indices of the shape array) - B - Type T. - Optional 1D bias to be added to the convolution, has size of M. - auto_pad - Attribute. - auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where - default value is NOTSET, which means explicit padding is used. - SAME_UPPER or SAME_LOWER mean pad the input so that - ``output_shape[i] = input_shape[i] * strides[i]`` for each axis ``i``. - The padding is split between the two sides equally or almost equally - (depending on whether it is even or odd). In case the padding is an odd - number, the extra padding is added at the end for SAME_UPPER and at the - beginning for SAME_LOWER. - dilations - Attribute. - dilation value along each spatial axis of the filter. If not present, - the dilation defaults to 1 along each spatial axis. - group - Attribute. - number of groups input channels and output channels are divided into. - kernel_shape - Attribute. - The shape of the convolution kernel. If not present, should be inferred - from input W. - output_padding - Attribute. - Additional elements added to the side with higher coordinate indices in - the output. Each padding value in "output_padding" must be less than the - corresponding stride/dilation dimension. By default, this attribute is a - zero vector. Note that this attribute doesn't directly affect the - computed output values. It only controls the selection of the computed - values, so changing this attribute only adds or removes output elements. - If "output_shape" is explicitly provided, "output_padding" does not - contribute additional size to "output_shape" but participates in the - computation of the needed padding amount. This is also called adjs or - adjustment in some frameworks. - output_shape - Attribute. - The shape of the output can be explicitly set which will cause pads - values to be auto generated. If output_shape is specified pads values - are ignored. See doc for details for equations to generate pads. Note - that the output_shape attribute value should not include dimensions for - batch size and channels, which are automatically inferred. - pads - Attribute. - Padding for the beginning and ending along each spatial axis, it can - take any value greater than or equal to 0. The value represent the - number of pixels added to the beginning and end part of the - corresponding axis. ``pads`` format should be as follow [x1_begin, - x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels - added at the beginning of axis ``i`` and xi_end, the number of pixels - added at the end of axis ``i``. This attribute cannot be used - simultaneously with auto_pad attribute. If not present, the padding - defaults to 0 along start and end of each spatial axis. - strides - Attribute. - Stride along each spatial axis. If not present, the stride defaults to 1 - along each spatial axis. - - Returns - ======= - Y : Var - Type T. - Output data tensor that contains the result of the convolution. The - output dimensions are functions of the kernel size, stride size, pad - lengths and group count. The number of channels in the output should be - equal to W.shape[1] \* group (assuming zero based indices of the shape - array) - - Notes - ===== - Signature: ``ai.onnx@11::ConvTranspose``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _ConvTranspose( - _ConvTranspose.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - group=AttrInt64(group, name="group"), - kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), - output_padding=AttrInt64s.maybe(output_padding, name="output_padding"), - output_shape=AttrInt64s.maybe(output_shape, name="output_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _ConvTranspose.Inputs( - X=unwrap_vars(X), - W=unwrap_vars(W), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - X=get_value(X), - W=get_value(W), - B=get_value(B), - ) - .Y - ) - - -def cos( - input: Var, -) -> Var: - r""" - Calculates the cosine of the given input tensor, element-wise. - - Parameters - ========== - input - Type T. - Input tensor - - Returns - ======= - output : Var - Type T. - The cosine of the input tensor computed element-wise - - Notes - ===== - Signature: ``ai.onnx@7::Cos``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Cos( - _Cos.Attributes(), - _Cos.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def cosh( - input: Var, -) -> Var: - r""" - Calculates the hyperbolic cosine of the given input tensor element-wise. - - Parameters - ========== - input - Type T. - Input tensor - - Returns - ======= - output : Var - Type T. - The hyperbolic cosine values of the input tensor computed element-wise - - Notes - ===== - Signature: ``ai.onnx@9::Cosh``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Cosh( - _Cosh.Attributes(), - _Cosh.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def cumsum( - x: Var, - axis: Var, - *, - exclusive: int = 0, - reverse: int = 0, -) -> Var: - r""" - Performs cumulative sum of the input elements along the given axis. By - default, it will do the sum inclusively meaning the first element is - copied as is. Through an ``exclusive`` attribute, this behavior can - change to exclude the first element. It can also perform summation in - the opposite direction of the axis. For that, set ``reverse`` attribute - to 1. - - Example: - - :: - - input_x = [1, 2, 3] - axis=0 - output = [1, 3, 6] - exclusive=1 - output = [0, 1, 3] - exclusive=0 - reverse=1 - output = [6, 5, 3] - exclusive=1 - reverse=1 - output = [5, 3, 0] - - Parameters - ========== - x - Type T. - An input tensor that is to be processed. - axis - Type T2. - A 0-D tensor. Must be in the range [-rank(x), rank(x)-1]. Negative value - means counting dimensions from the back. - exclusive - Attribute. - If set to 1 will return exclusive sum in which the top element is not - included. In other terms, if set to 1, the j-th output element would be - the sum of the first (j-1) elements. Otherwise, it would be the sum of - the first j elements. - reverse - Attribute. - If set to 1 will perform the sums in reverse direction. - - Returns - ======= - y : Var - Type T. - Output tensor of the same type as 'x' with cumulative sums of the x's - elements - - Notes - ===== - Signature: ``ai.onnx@14::CumSum``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` - - T2: `tensor(int32)`, `tensor(int64)` - """ - return ( - _CumSum( - _CumSum.Attributes( - exclusive=AttrInt64(exclusive, name="exclusive"), - reverse=AttrInt64(reverse, name="reverse"), - ), - _CumSum.Inputs( - x=unwrap_vars(x), - axis=unwrap_vars(axis), - ), - ) - .get_output_vars( - x=get_value(x), - axis=get_value(axis), - ) - .y - ) - - -def dft( - input: Var, - dft_length: Optional[Var] = None, - *, - axis: int = 1, - inverse: int = 0, - onesided: int = 0, -) -> Var: - r""" - Computes the discrete Fourier transform of input. - - Parameters - ========== - input - Type T1. - For real input, the following shape is expected: - [batch_idx][signal_dim1][signal_dim2]...[signal_dimN][1]. For complex - input, the following shape is expected: - [batch_idx][signal_dim1][signal_dim2]...[signal_dimN][2]. The first - dimension is the batch dimension. The following N dimensions correspond - to the signal's dimensions. The final dimension represents the real and - imaginary parts of the value in that order. - dft_length - Type T2. - The length of the signal as a scalar. If greater than the axis - dimension, the signal will be zero-padded up to dft_length. If less than - the axis dimension, only the first dft_length values will be used as the - signal. It's an optional value. - axis - Attribute. - The axis on which to perform the DFT. By default this value is set to 1, - which corresponds to the first dimension after the batch index. Negative - value means counting dimensions from the back. Accepted range is - :math:`[-r, -2] \cup [0, r-2]` where ``r = rank(input)``. The last - dimension is for representing complex numbers and thus is an invalid - axis. - inverse - Attribute. - Whether to perform the inverse discrete fourier transform. By default - this value is set to 0, which corresponds to false. - onesided - Attribute. - If onesided is 1, only values for w in [0, 1, 2, ..., floor(n_fft/2) + - 1] are returned because the real-to-complex Fourier transform satisfies - the conjugate symmetry, i.e., X[m, w] = X[m, n_fft-w]\*. Note if the - input or window tensors are complex, then onesided output is not - possible. Enabling onesided with real inputs performs a Real-valued fast - Fourier transform (RFFT). When invoked with real or complex valued - input, the default value is 0. Values can be 0 or 1. - - Returns - ======= - output : Var - Type T1. - The Fourier Transform of the input vector. If onesided is 0, the - following shape is expected: - [batch_idx][signal_dim1][signal_dim2]...[signal_dimN][2]. If axis=1 and - onesided is 1, the following shape is expected: - [batch_idx][floor(signal_dim1/2)+1][signal_dim2]...[signal_dimN][2]. If - axis=2 and onesided is 1, the following shape is expected: - [batch_idx][signal_dim1][floor(signal_dim2/2)+1]...[signal_dimN][2]. If - axis=N and onesided is 1, the following shape is expected: - [batch_idx][signal_dim1][signal_dim2]...[floor(signal_dimN/2)+1][2]. The - signal_dim at the specified axis is equal to the dft_length. - - Notes - ===== - Signature: ``ai.onnx@17::DFT``. - - Type constraints: - - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - - T2: `tensor(int32)`, `tensor(int64)` - """ - return ( - _DFT( - _DFT.Attributes( - axis=AttrInt64(axis, name="axis"), - inverse=AttrInt64(inverse, name="inverse"), - onesided=AttrInt64(onesided, name="onesided"), - ), - _DFT.Inputs( - input=unwrap_vars(input), - dft_length=unwrap_vars(dft_length), - ), - ) - .get_output_vars( - input=get_value(input), - dft_length=get_value(dft_length), - ) - .output - ) - - -def depth_to_space( - input: Var, - *, - blocksize: int, - mode: str = "DCR", -) -> Var: - r""" - DepthToSpace rearranges (permutes) data from depth into blocks of - spatial data. This is the reverse transformation of SpaceToDepth. More - specifically, this op outputs a copy of the input tensor where values - from the depth dimension are moved in spatial blocks to the height and - width dimensions. By default, ``mode`` = ``DCR``. In the DCR mode, - elements along the depth dimension from the input tensor are rearranged - in the following order: depth, column, and then row. The output y is - computed from the input x as below: - - :: - - b, c, h, w = x.shape - tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w]) - tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2]) - y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize]) - - In the CRD mode, elements along the depth dimension from the input - tensor are rearranged in the following order: column, row, and the - depth. The output y is computed from the input x as below: - - :: - - b, c, h, w = x.shape - tmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w]) - tmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3]) - y = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize]) - - Parameters - ========== - input - Type T. - Input tensor of [N,C,H,W], where N is the batch axis, C is the channel - or depth, H is the height and W is the width. - blocksize - Attribute. - Blocks of [blocksize, blocksize] are moved. - mode - Attribute. - DCR (default) for depth-column-row order re-arrangement. Use CRD for - column-row-depth order. - - Returns - ======= - output : Var - Type T. - Output tensor of [N, C/(blocksize \* blocksize), H \* blocksize, W \* - blocksize]. - - Notes - ===== - Signature: ``ai.onnx@13::DepthToSpace``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _DepthToSpace( - _DepthToSpace.Attributes( - blocksize=AttrInt64(blocksize, name="blocksize"), - mode=AttrString(mode, name="mode"), - ), - _DepthToSpace.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def dequantize_linear( - x: Var, - x_scale: Var, - x_zero_point: Optional[Var] = None, - *, - axis: int = 1, -) -> Var: - r""" - The linear dequantization operator. It consumes a quantized tensor, a - scale, and a zero point to compute the full precision tensor. The - dequantization formula is ``y = (x - x_zero_point) * x_scale``. - ``x_scale`` and ``x_zero_point`` must have same shape, and can be either - a scalar for per-tensor / per layer quantization, or a 1-D tensor for - per-axis quantization. ``x_zero_point`` and ``x`` must have same type. - ``x`` and ``y`` must have same shape. In the case of dequantizing int32, - there's no zero point (zero point is supposed to be 0). - - Parameters - ========== - x - Type T. - N-D quantized input tensor to be de-quantized. - x_scale - Type tensor(float). - Scale for input 'x'. It can be a scalar, which means a per-tensor/layer - dequantization, or a 1-D tensor for per-axis dequantization. - x_zero_point - Type T. - Zero point for input 'x'. Shape must match x_scale. It's optional. Zero - point is 0 when it's not specified. - axis - Attribute. - (Optional) The axis of the dequantizing dimension of the input tensor. - Ignored for per-tensor quantization. Negative value means counting - dimensions from the back. Accepted range is [-r, r-1] where r = - rank(input). - - Returns - ======= - y : Var - Type tensor(float). - N-D full precision output tensor. It has same shape as input 'x'. - - Notes - ===== - Signature: ``ai.onnx@13::DequantizeLinear``. - - Type constraints: - - T: `tensor(int32)`, `tensor(int8)`, `tensor(uint8)` - """ - return ( - _DequantizeLinear( - _DequantizeLinear.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _DequantizeLinear.Inputs( - x=unwrap_vars(x), - x_scale=unwrap_vars(x_scale), - x_zero_point=unwrap_vars(x_zero_point), - ), - ) - .get_output_vars( - x=get_value(x), - x_scale=get_value(x_scale), - x_zero_point=get_value(x_zero_point), - ) - .y - ) - - -def det( - X: Var, -) -> Var: - r""" - Det calculates determinant of a square matrix or batches of square - matrices. Det takes one input tensor of shape ``[*, M, M]``, where ``*`` - is zero or more batch dimensions, and the inner-most 2 dimensions form - square matrices. The output is a tensor of shape ``[*]``, containing the - determinants of all input submatrices. e.g., When the input is 2-D, the - output is a scalar(shape is empty: ``[]``). - - Parameters - ========== - X - Type T. - Input tensor - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@11::Det``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Det( - _Det.Attributes(), - _Det.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def div( - A: Var, - B: Var, -) -> Var: - r""" - Performs element-wise binary division (with Numpy-style broadcasting - support). - - This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - (Opset 14 change): Extend supported types to include uint8, int8, - uint16, and int16. - - Parameters - ========== - A - Type T. - First operand. - B - Type T. - Second operand. - - Returns - ======= - C : Var - Type T. - Result, has same element type as two inputs - - Notes - ===== - Signature: ``ai.onnx@14::Div``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Div( - _Div.Attributes(), - _Div.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - ) - .C - ) - - -def dropout( - data: Var, - ratio: Optional[Var] = None, - training_mode: Optional[Var] = None, - *, - seed: Optional[int] = None, -) -> tuple[Var, Var]: - r""" - Dropout takes an input floating-point tensor, an optional input ratio - (floating-point scalar) and an optional input training_mode (boolean - scalar). It produces two tensor outputs, output (floating-point tensor) - and mask (optional ``Tensor``). If ``training_mode`` is true then - the output Y will be a random dropout; Note that this Dropout scales the - masked input data by the following equation, so to convert the trained - model into inference mode, the user can simply not pass - ``training_mode`` input or set it to false. - - :: - - output = scale * data * mask, - - where - - :: - - scale = 1. / (1. - ratio). - - This operator has **optional** inputs/outputs. See `the - doc `__ for more - details about the representation of optional arguments. An empty string - may be used in the place of an actual argument's name to indicate a - missing argument. Trailing optional arguments (those not followed by an - argument that is present) may also be simply omitted. - - Parameters - ========== - data - Type T. - The input data as Tensor. - ratio - Type T1. - The ratio of random dropout, with value in [0, 1). If this input was not - set, or if it was set to 0, the output would be a simple copy of the - input. If it's non-zero, output will be a random dropout of the scaled - input, which is typically the case during training. It is an optional - value, if not specified it will default to 0.5. - training_mode - Type T2. - If set to true then it indicates dropout is being used for training. It - is an optional value hence unless specified explicitly, it is false. If - it is false, ratio is ignored and the operation mimics inference mode - where nothing will be dropped from the input data and if mask is - requested as output it will contain all ones. - seed - Attribute. - (Optional) Seed to the random generator, if not specified we will auto - generate one. - - Returns - ======= - output : Var - Type T. - The output. - mask : Var - Type T2. - The output mask. - - Notes - ===== - Signature: ``ai.onnx@13::Dropout``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` - - T2: `tensor(bool)` - """ - return ( - _Dropout( - _Dropout.Attributes( - seed=AttrInt64.maybe(seed, name="seed"), - ), - _Dropout.Inputs( - data=unwrap_vars(data), - ratio=unwrap_vars(ratio), - training_mode=unwrap_vars(training_mode), - ), - ) - .get_output_vars( - data=get_value(data), - ratio=get_value(ratio), - training_mode=get_value(training_mode), - ) - ._unpack_to_any() - ) - - -def dynamic_quantize_linear( - x: Var, -) -> tuple[Var, Var, Var]: - r""" - A Function to fuse calculation for Scale, Zero Point and FP32->8Bit - conversion of FP32 Input data. Outputs Scale, ZeroPoint and Quantized - Input for a given FP32 Input. Scale is calculated as: - - :: - - y_scale = (maximum(0, max(x)) - minimum(0, min(x))) / (qmax - qmin) - - - where qmax and qmin are max and min values for quantization range - i.e. [0, 255] in case of uint8 - - data range is adjusted to include 0. - - Zero point is calculated as: - - :: - - intermediate_zero_point = qmin - min(x)/y_scale - y_zero_point = cast(round(saturate(itermediate_zero_point))) - - - where qmax and qmin are max and min values for quantization range - .i.e [0, 255] in case of uint8 - - for saturation, it saturates to [0, 255] if it's uint8, or [-127, - 127] if it's int8. Right now only uint8 is supported. - - rounding to nearest ties to even. - - Data quantization formula is: - - :: - - y = saturate (round (x / y_scale) + y_zero_point) - - - for saturation, it saturates to [0, 255] if it's uint8, or [-127, - 127] if it's int8. Right now only uint8 is supported. - - rounding to nearest ties to even. - - Parameters - ========== - x - Type T1. - Input tensor - - Returns - ======= - y : Var - Type T2. - Quantized output tensor - y_scale : Var - Type tensor(float). - Output scale. It's a scalar, which means a per-tensor/layer - quantization. - y_zero_point : Var - Type T2. - Output zero point. It's a scalar, which means a per-tensor/layer - quantization. - - Notes - ===== - Signature: ``ai.onnx@11::DynamicQuantizeLinear``. - - Type constraints: - - T1: `tensor(float)` - - T2: `tensor(uint8)` - """ - return ( - _DynamicQuantizeLinear( - _DynamicQuantizeLinear.Attributes(), - _DynamicQuantizeLinear.Inputs( - x=unwrap_vars(x), - ), - ) - .get_output_vars( - x=get_value(x), - ) - ._unpack_to_any() - ) - - -def einsum( - Inputs: Sequence[Var], - *, - equation: str, -) -> Var: - r""" - An einsum of the form ``term1, term2 -> output-term`` produces an output - tensor using the following equation - - :: - - output[output-term] = reduce-sum( input1[term1] * input2[term2] ) - - where the reduce-sum performs a summation over all the indices occurring - in the input terms (term1, term2) that do not occur in the output-term. - - The Einsum operator evaluates algebraic tensor operations on a sequence - of tensors, using the Einstein summation convention. The equation string - contains a comma-separated sequence of lower case letters. Each term - corresponds to an operand tensor, and the characters within the terms - correspond to operands dimensions. - - This sequence may be followed by "->" to separate the left and right - hand side of the equation. If the equation contains "->" followed by the - right-hand side, the explicit (not classical) form of the Einstein - summation is performed, and the right-hand side indices indicate output - tensor dimensions. In other cases, output indices are (implicitly) set - to the alphabetically sorted sequence of indices appearing exactly once - in the equation. - - When a dimension character is repeated in the left-hand side, it - represents summation along the dimension. - - The equation may contain ellipsis ("...") to enable broadcasting. - Ellipsis must indicate a fixed number of dimensions. Specifically, every - occurrence of ellipsis in the equation must represent the same number of - dimensions. The right-hand side may contain exactly one ellipsis. In - implicit mode, the ellipsis dimensions are set to the beginning of the - output. The equation string may contain space (U+0020) character. - - Parameters - ========== - Inputs - Type T. - Operands - equation - Attribute. - Einsum expression string. - - Returns - ======= - Output : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@12::Einsum``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Einsum( - _Einsum.Attributes( - equation=AttrString(equation, name="equation"), - ), - _Einsum.Inputs( - Inputs=unwrap_vars(Inputs), - ), - ) - .get_output_vars( - Inputs=get_value(Inputs), - ) - .Output - ) - - -def elu( - X: Var, - *, - alpha: float = 1.0, -) -> Var: - r""" - Elu takes one input data (Tensor) and produces one output data - (Tensor) where the function - ``f(x) = alpha * (exp(x) - 1.) for x < 0``, ``f(x) = x for x >= 0``., is - applied to the tensor elementwise. - - Parameters - ========== - X - Type T. - 1D input tensor - alpha - Attribute. - Coefficient of ELU. - - Returns - ======= - Y : Var - Type T. - 1D output tensor - - Notes - ===== - Signature: ``ai.onnx@6::Elu``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Elu( - _Elu.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - ), - _Elu.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def equal( - A: Var, - B: Var, -) -> Var: - r""" - Returns the tensor resulted from performing the ``equal`` logical - operation elementwise on the input tensors ``A`` and ``B`` (with - Numpy-style broadcasting support). - - This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - Parameters - ========== - A - Type T. - First input operand for the logical operator. - B - Type T. - Second input operand for the logical operator. - - Returns - ======= - C : Var - Type T1. - Result tensor. - - Notes - ===== - Signature: ``ai.onnx@13::Equal``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T1: `tensor(bool)` - """ - return ( - _Equal( - _Equal.Attributes(), - _Equal.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - ) - .C - ) - - -def erf( - input: Var, -) -> Var: - r""" - Computes the error function of the given input tensor element-wise. - - Parameters - ========== - input - Type T. - Input tensor - - Returns - ======= - output : Var - Type T. - The error function of the input tensor computed element-wise. It has the - same shape and type of the input. - - Notes - ===== - Signature: ``ai.onnx@13::Erf``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Erf( - _Erf.Attributes(), - _Erf.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def exp( - input: Var, -) -> Var: - r""" - Calculates the exponential of the given input tensor, element-wise. - - Parameters - ========== - input - Type T. - Input tensor - - Returns - ======= - output : Var - Type T. - The exponential of the input tensor computed element-wise - - Notes - ===== - Signature: ``ai.onnx@13::Exp``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Exp( - _Exp.Attributes(), - _Exp.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def expand( - input: Var, - shape: Var, -) -> Var: - r""" - Broadcast the input tensor following the given shape and the broadcast - rule. The broadcast rule is similar to numpy.array(input) \* - numpy.ones(shape): Dimensions are right alignment; Two corresponding - dimensions must have the same value, or one of them is equal to 1. Also, - this operator is similar to numpy.broadcast_to(input, shape), but the - major difference is numpy.broadcast_to() does not allow shape to be - smaller than input.size(). It is possible that the output.shape is not - equal to shape, when some dimensions in shape is equal to 1, or the - shape.ndim < input.shape.ndim. - - Parameters - ========== - input - Type T. - Input tensor - shape - Type tensor(int64). - A 1-D tensor indicates the shape you want to expand to, following the - broadcast rule - - Returns - ======= - output : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@13::Expand``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Expand( - _Expand.Attributes(), - _Expand.Inputs( - input=unwrap_vars(input), - shape=unwrap_vars(shape), - ), - ) - .get_output_vars( - input=get_value(input), - shape=get_value(shape), - ) - .output - ) - - -def eye_like( - input: Var, - *, - dtype: Optional[npt.DTypeLike] = None, - k: int = 0, -) -> Var: - r""" - Generate a 2D tensor (matrix) with ones on the diagonal and zeros - everywhere else. Only 2D tensors are supported, i.e. input T1 must be of - rank 2. The shape of the output tensor is the same as the input tensor. - The data type can be specified by the 'dtype' argument. If 'dtype' is - not specified, then the type of input tensor is used. By default, the - main diagonal is populated with ones, but attribute 'k' can be used to - populate upper or lower diagonals. The 'dtype' argument must be one of - the data types specified in the 'DataType' enum field in the TensorProto - message and be valid as an output type. - - Parameters - ========== - input - Type T1. - 2D input tensor to copy shape, and optionally, type information from. - dtype - Attribute. - (Optional) The data type for the elements of the output tensor. If not - specified,the data type of the input tensor T1 is used. If input tensor - T1 is also notspecified, then type defaults to 'float'. - k - Attribute. - (Optional) Index of the diagonal to be populated with ones. Default is - 0. If T2 is the output, this op sets T2[i, i+k] = 1. k = 0 populates the - main diagonal, k > 0 populates an upper diagonal, and k < 0 populates a - lower diagonal. - - Returns - ======= - output : Var - Type T2. - Output tensor, same shape as input tensor T1. - - Notes - ===== - Signature: ``ai.onnx@9::EyeLike``. - - Type constraints: - - T1: `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T2: `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _EyeLike( - _EyeLike.Attributes( - dtype=AttrDtype.maybe(dtype, name="dtype"), - k=AttrInt64(k, name="k"), - ), - _EyeLike.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def flatten( - input: Var, - *, - axis: int = 1, -) -> Var: - r""" - Flattens the input tensor into a 2D matrix. If input tensor has shape - (d_0, d_1, ... d_n) then the output will have shape (d_0 X d_1 ... - d\_(axis-1), d_axis X d\_(axis+1) ... X dn). - - Parameters - ========== - input - Type T. - A tensor of rank >= axis. - axis - Attribute. - Indicate up to which input dimensions (exclusive) should be flattened to - the outer dimension of the output. The value for axis must be in the - range [-r, r], where r is the rank of the input tensor. Negative value - means counting dimensions from the back. When axis = 0, the shape of the - output tensor is (1, (d_0 X d_1 ... d_n), where the shape of the input - tensor is (d_0, d_1, ... d_n). - - Returns - ======= - output : Var - Type T. - A 2D tensor with the contents of the input tensor, with input dimensions - up to axis flattened to the outer dimension of the output and remaining - input dimensions flattened into the inner dimension of the output. - - Notes - ===== - Signature: ``ai.onnx@13::Flatten``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Flatten( - _Flatten.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Flatten.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def floor( - X: Var, -) -> Var: - r""" - Floor takes one input data (Tensor) and produces one output data - (Tensor) where the floor is, y = floor(x), is applied to the tensor - elementwise. If x is integral, +0, -0, NaN, or infinite, x itself is - returned. - - Parameters - ========== - X - Type T. - Input tensor - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@13::Floor``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Floor( - _Floor.Attributes(), - _Floor.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def gru( - X: Var, - W: Var, - R: Var, - B: Optional[Var] = None, - sequence_lens: Optional[Var] = None, - initial_h: Optional[Var] = None, - *, - activation_alpha: Optional[Iterable[float]] = None, - activation_beta: Optional[Iterable[float]] = None, - activations: Optional[Iterable[str]] = None, - clip: Optional[float] = None, - direction: str = "forward", - hidden_size: Optional[int] = None, - layout: int = 0, - linear_before_reset: int = 0, -) -> tuple[Var, Var]: - r""" - Computes an one-layer GRU. This operator is usually supported via some - custom implementation such as CuDNN. - - Notations: - - - ``X`` - input tensor - - ``z`` - update gate - - ``r`` - reset gate - - ``h`` - hidden gate - - ``t`` - time step (t-1 means previous time step) - - ``W[zrh]`` - W parameter weight matrix for update, reset, and hidden - gates - - ``R[zrh]`` - R recurrence weight matrix for update, reset, and hidden - gates - - ``Wb[zrh]`` - W bias vectors for update, reset, and hidden gates - - ``Rb[zrh]`` - R bias vectors for update, reset, and hidden gates - - ``WB[zrh]`` - W parameter weight matrix for backward update, reset, - and hidden gates - - ``RB[zrh]`` - R recurrence weight matrix for backward update, reset, - and hidden gates - - ``WBb[zrh]`` - W bias vectors for backward update, reset, and hidden - gates - - ``RBb[zrh]`` - R bias vectors for backward update, reset, and hidden - gates - - ``H`` - Hidden state - - ``num_directions`` - 2 if direction == bidirectional else 1 - - Activation functions: - - - Relu(x) - max(0, x) - - Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) - - Sigmoid(x) - 1/(1 + e^{-x}) - - NOTE: Below are optional - - - Affine(x) - alpha \* x + beta - - LeakyRelu(x) - x if x >= 0 else alpha \* x - - ThresholdedRelu(x) - x if x >= alpha else 0 - - ScaledTanh(x) - alpha \* Tanh(beta \* x) - - HardSigmoid(x) - min(max(alpha \* x + beta, 0), 1) - - Elu(x) - x if x >= 0 else alpha \* (e^x - 1) - - Softsign(x) - x/(1 + \|x\|) - - Softplus(x) - log(1 + e^x) - - Equations (Default: f=Sigmoid, g=Tanh): - - - zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz) - - rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr) - - ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when - linear_before_reset = 0 - - ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when - linear_before_reset != 0 - - Ht = (1 - zt) (.) ht + zt (.) Ht-1 This operator has **optional** - inputs/outputs. See `the - doc `__ for more - details about the representation of optional arguments. An empty - string may be used in the place of an actual argument's name to - indicate a missing argument. Trailing optional arguments (those not - followed by an argument that is present) may also be simply omitted. - - Parameters - ========== - X - Type T. - The input sequences packed (and potentially padded) into one 3-D tensor - with the shape of ``[seq_length, batch_size, input_size]``. - W - Type T. - The weight tensor for the gates. Concatenation of ``W[zrh]`` and - ``WB[zrh]`` (if bidirectional) along dimension 0. This tensor has shape - ``[num_directions, 3*hidden_size, input_size]``. - R - Type T. - The recurrence weight tensor. Concatenation of ``R[zrh]`` and - ``RB[zrh]`` (if bidirectional) along dimension 0. This tensor has shape - ``[num_directions, 3*hidden_size, hidden_size]``. - B - Type T. - The bias tensor for the gates. Concatenation of ``[Wb[zrh], Rb[zrh]]`` - and ``[WBb[zrh], RBb[zrh]]`` (if bidirectional) along dimension 0. This - tensor has shape ``[num_directions, 6*hidden_size]``. Optional: If not - specified - assumed to be 0 - sequence_lens - Type T1. - Optional tensor specifying lengths of the sequences in a batch. If not - specified - assumed all sequences in the batch to have length - ``seq_length``. It has shape ``[batch_size]``. - initial_h - Type T. - Optional initial value of the hidden. If not specified - assumed to be - 0. It has shape ``[num_directions, batch_size, hidden_size]``. - activation_alpha - Attribute. - Optional scaling values used by some activation functions. The values - are consumed in the order of activation functions, for example (f, g, h) - in LSTM. Default values are the same as of corresponding ONNX - operators.For example with LeakyRelu, the default alpha is 0.01. - activation_beta - Attribute. - Optional scaling values used by some activation functions. The values - are consumed in the order of activation functions, for example (f, g, h) - in LSTM. Default values are the same as of corresponding ONNX operators. - activations - Attribute. - A list of 2 (or 4 if bidirectional) activation functions for update, - reset, and hidden gates. The activation functions must be one of the - activation functions specified above. Optional: See the equations for - default if not specified. - clip - Attribute. - Cell clip threshold. Clipping bounds the elements of a tensor in the - range of [-threshold, +threshold] and is applied to the input of - activations. No clip if not specified. - direction - Attribute. - Specify if the RNN is forward, reverse, or bidirectional. Must be one of - forward (default), reverse, or bidirectional. - hidden_size - Attribute. - Number of neurons in the hidden layer - layout - Attribute. - The shape format of inputs X, initial_h and outputs Y, Y_h. If 0, the - following shapes are expected: X.shape = [seq_length, batch_size, - input_size], Y.shape = [seq_length, num_directions, batch_size, - hidden_size], initial_h.shape = Y_h.shape = [num_directions, batch_size, - hidden_size]. If 1, the following shapes are expected: X.shape = - [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, - num_directions, hidden_size], initial_h.shape = Y_h.shape = [batch_size, - num_directions, hidden_size]. - linear_before_reset - Attribute. - When computing the output of the hidden gate, apply the linear - transformation before multiplying by the output of the reset gate. - - Returns - ======= - Y : Var - Type T. - A tensor that concats all the intermediate output values of the hidden. - It has shape ``[seq_length, num_directions, batch_size, hidden_size]``. - Y_h : Var - Type T. - The last output value of the hidden. It has shape - ``[num_directions, batch_size, hidden_size]``. - - Notes - ===== - Signature: ``ai.onnx@14::GRU``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - - T1: `tensor(int32)` - """ - return ( - _GRU( - _GRU.Attributes( - activation_alpha=AttrFloat32s.maybe( - activation_alpha, name="activation_alpha" - ), - activation_beta=AttrFloat32s.maybe( - activation_beta, name="activation_beta" - ), - activations=AttrStrings.maybe(activations, name="activations"), - clip=AttrFloat32.maybe(clip, name="clip"), - direction=AttrString(direction, name="direction"), - hidden_size=AttrInt64.maybe(hidden_size, name="hidden_size"), - layout=AttrInt64(layout, name="layout"), - linear_before_reset=AttrInt64( - linear_before_reset, name="linear_before_reset" - ), - ), - _GRU.Inputs( - X=unwrap_vars(X), - W=unwrap_vars(W), - R=unwrap_vars(R), - B=unwrap_vars(B), - sequence_lens=unwrap_vars(sequence_lens), - initial_h=unwrap_vars(initial_h), - ), - ) - .get_output_vars( - X=get_value(X), - W=get_value(W), - R=get_value(R), - B=get_value(B), - sequence_lens=get_value(sequence_lens), - initial_h=get_value(initial_h), - ) - ._unpack_to_any() - ) - - -def gather( - data: Var, - indices: Var, - *, - axis: int = 0, -) -> Var: - r""" - Given ``data`` tensor of rank r >= 1, and ``indices`` tensor of rank q, - gather entries of the axis dimension of ``data`` (by default outer-most - one as axis=0) indexed by ``indices``, and concatenates them in an - output tensor of rank q + (r - 1). - - If ``axis = 0``, let ``k = indices[i_{0}, ..., i_{q-1}]`` then - ``output[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k , j_{0}, ..., j_{r-2}]``: - - :: - - data = [ - [1.0, 1.2], - [2.3, 3.4], - [4.5, 5.7], - ] - indices = [ - [0, 1], - [1, 2], - ] - output = [ - [ - [1.0, 1.2], - [2.3, 3.4], - ], - [ - [2.3, 3.4], - [4.5, 5.7], - ], - ] - - If ``axis = 1``, let ``k = indices[i_{0}, ..., i_{q-1}]`` then - ``output[j_{0}, i_{0}, ..., i_{q-1}, j_{1}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}]``: - - :: - - data = [ - [1.0, 1.2, 1.9], - [2.3, 3.4, 3.9], - [4.5, 5.7, 5.9], - ] - indices = [ - [0, 2], - ] - axis = 1, - output = [ - [[1.0, 1.9]], - [[2.3, 3.9]], - [[4.5, 5.9]], - ] - - Parameters - ========== - data - Type T. - Tensor of rank r >= 1. - indices - Type Tind. - Tensor of int32/int64 indices, of any rank q. All index values are - expected to be within bounds [-s, s-1] along axis of size s. It is an - error if any of the index values are out of bounds. - axis - Attribute. - Which axis to gather on. Negative value means counting dimensions from - the back. Accepted range is [-r, r-1] where r = rank(data). - - Returns - ======= - output : Var - Type T. - Tensor of rank q + (r - 1). - - Notes - ===== - Signature: ``ai.onnx@13::Gather``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - Tind: `tensor(int32)`, `tensor(int64)` - """ - return ( - _Gather( - _Gather.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Gather.Inputs( - data=unwrap_vars(data), - indices=unwrap_vars(indices), - ), - ) - .get_output_vars( - data=get_value(data), - indices=get_value(indices), - ) - .output - ) - - -def gather_elements( - data: Var, - indices: Var, - *, - axis: int = 0, -) -> Var: - r""" - GatherElements takes two inputs ``data`` and ``indices`` of the same - rank r >= 1 and an optional attribute ``axis`` that identifies an axis - of ``data`` (by default, the outer-most axis, that is axis 0). It is an - indexing operation that produces its output by indexing into the input - data tensor at index positions determined by elements of the ``indices`` - tensor. Its output shape is the same as the shape of ``indices`` and - consists of one value (gathered from the ``data``) for each element in - ``indices``. - - For instance, in the 3-D case (r = 3), the output produced is determined - by the following equations: - - :: - - out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0, - out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1, - out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2, - - This operator is also the inverse of ScatterElements. It is similar to - Torch's gather operation. - - Example 1: - - :: - - data = [ - [1, 2], - [3, 4], - ] - indices = [ - [0, 0], - [1, 0], - ] - axis = 1 - output = [ - [1, 1], - [4, 3], - ] - - Example 2: - - :: - - data = [ - [1, 2, 3], - [4, 5, 6], - [7, 8, 9], - ] - indices = [ - [1, 2, 0], - [2, 0, 0], - ] - axis = 0 - output = [ - [4, 8, 3], - [7, 2, 3], - ] - - Parameters - ========== - data - Type T. - Tensor of rank r >= 1. - indices - Type Tind. - Tensor of int32/int64 indices, with the same rank r as the input. All - index values are expected to be within bounds [-s, s-1] along axis of - size s. It is an error if any of the index values are out of bounds. - axis - Attribute. - Which axis to gather on. Negative value means counting dimensions from - the back. Accepted range is [-r, r-1] where r = rank(data). - - Returns - ======= - output : Var - Type T. - Tensor of the same shape as indices. - - Notes - ===== - Signature: ``ai.onnx@13::GatherElements``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - Tind: `tensor(int32)`, `tensor(int64)` - """ - return ( - _GatherElements( - _GatherElements.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _GatherElements.Inputs( - data=unwrap_vars(data), - indices=unwrap_vars(indices), - ), - ) - .get_output_vars( - data=get_value(data), - indices=get_value(indices), - ) - .output - ) - - -def gather_nd( - data: Var, - indices: Var, - *, - batch_dims: int = 0, -) -> Var: - r""" - Given ``data`` tensor of rank ``r`` >= 1, ``indices`` tensor of rank - ``q`` >= 1, and ``batch_dims`` integer ``b``, this operator gathers - slices of ``data`` into an output tensor of rank - ``q + r - indices_shape[-1] - 1 - b``. - - ``indices`` is an q-dimensional integer tensor, best thought of as a - ``(q-1)``-dimensional tensor of index-tuples into ``data``, where each - element defines a slice of ``data`` - - ``batch_dims`` (denoted as ``b``) is an integer indicating the number of - batch dimensions, i.e the leading ``b`` number of dimensions of ``data`` - tensor and ``indices`` are representing the batches, and the gather - starts from the ``b+1`` dimension. - - Some salient points about the inputs' rank and shape: - - 1) r >= 1 and q >= 1 are to be honored. There is no dependency condition - to be met between ranks ``r`` and ``q`` - - 2) The first ``b`` dimensions of the shape of ``indices`` tensor and - ``data`` tensor must be equal. - - 3) b < min(q, r) is to be honored. - - 4) The ``indices_shape[-1]`` should have a value between 1 (inclusive) - and rank ``r-b`` (inclusive) - - 5) All values in ``indices`` are expected to be within bounds [-s, s-1] - along axis of size ``s`` (i.e.) - ``-data_shape[i] <= indices[...,i] <= data_shape[i] - 1``. It is an - error if any of the index values are out of bounds. - - The output is computed as follows: - - The output tensor is obtained by mapping each index-tuple in the - ``indices`` tensor to the corresponding slice of the input ``data``. - - 1) If ``indices_shape[-1] > r-b`` => error condition - - 2) If ``indices_shape[-1] == r-b``, since the rank of ``indices`` is - ``q``, ``indices`` can be thought of as ``N`` ``(q-b-1)``-dimensional - tensors containing 1-D tensors of dimension ``r-b``, where ``N`` is - an integer equals to the product of 1 and all the elements in the - batch dimensions of the indices_shape. Let us think of each such - ``r-b`` ranked tensor as ``indices_slice``. Each *scalar value* - corresponding to ``data[0:b-1,indices_slice]`` is filled into the - corresponding location of the ``(q-b-1)``-dimensional tensor to form - the ``output`` tensor (Example 1 below) - - 3) If ``indices_shape[-1] < r-b``, since the rank of ``indices`` is - ``q``, ``indices`` can be thought of as ``N`` ``(q-b-1)``-dimensional - tensor containing 1-D tensors of dimension ``< r-b``. Let us think of - each such tensors as ``indices_slice``. Each *tensor slice* - corresponding to ``data[0:b-1, indices_slice , :]`` is filled into - the corresponding location of the ``(q-b-1)``-dimensional tensor to - form the ``output`` tensor (Examples 2, 3, 4 and 5 below) - - This operator is the inverse of ``ScatterND``. - - **Example 1** - - :: - - batch_dims = 0 - data = [[0,1],[2,3]] # data_shape = [2, 2] - indices = [[0,0],[1,1]] # indices_shape = [2, 2] - output = [0,3] # output_shape = [2] - - **Example 2** - - :: - - batch_dims = 0 - data = [[0,1],[2,3]] # data_shape = [2, 2] - indices = [[1],[0]] # indices_shape = [2, 1] - output = [[2,3],[0,1]] # output_shape = [2, 2] - - **Example 3** - - :: - - batch_dims = 0 - data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2] - indices = [[0,1],[1,0]] # indices_shape = [2, 2] - output = [[2,3],[4,5]] # output_shape = [2, 2] - - **Example 4** - - :: - - batch_dims = 0 - data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2] - indices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2] - output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2] - - **Example 5** - - :: - - batch_dims = 1 - data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2] - indices = [[1],[0]] # indices_shape = [2, 1] - output = [[2,3],[4,5]] # output_shape = [2, 2] - - Parameters - ========== - data - Type T. - Tensor of rank r >= 1. - indices - Type tensor(int64). - Tensor of rank q >= 1. All index values are expected to be within bounds - [-s, s-1] along axis of size s. It is an error if any of the index - values are out of bounds. - batch_dims - Attribute. - The number of batch dimensions. The gather of indexing starts from - dimension of data[batch_dims:] - - Returns - ======= - output : Var - Type T. - Tensor of rank q + r - indices_shape[-1] - 1. - - Notes - ===== - Signature: ``ai.onnx@13::GatherND``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _GatherND( - _GatherND.Attributes( - batch_dims=AttrInt64(batch_dims, name="batch_dims"), - ), - _GatherND.Inputs( - data=unwrap_vars(data), - indices=unwrap_vars(indices), - ), - ) - .get_output_vars( - data=get_value(data), - indices=get_value(indices), - ) - .output - ) - - -def gemm( - A: Var, - B: Var, - C: Optional[Var] = None, - *, - alpha: float = 1.0, - beta: float = 1.0, - transA: int = 0, - transB: int = 0, -) -> Var: - r""" - General Matrix multiplication: - https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3 - - - A' = transpose(A) if transA else A - - B' = transpose(B) if transB else B - - Compute Y = alpha \* A' \* B' + beta \* C, where input tensor A has - shape (M, K) or (K, M), input tensor B has shape (K, N) or (N, K), input - tensor C is broadcastable to shape (M, N), and output tensor Y has shape - (M, N). A will be transposed before doing the computation if attribute - transA is non-zero, same for B and transB. This operator supports - **unidirectional broadcasting** (tensor C should be unidirectional - broadcastable to tensor A \* B); for more details please check `the - doc `__. - This operator has **optional** inputs/outputs. See `the - doc `__ for more - details about the representation of optional arguments. An empty string - may be used in the place of an actual argument's name to indicate a - missing argument. Trailing optional arguments (those not followed by an - argument that is present) may also be simply omitted. - - Parameters - ========== - A - Type T. - Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, - M) if transA is non-zero. - B - Type T. - Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, - K) if transB is non-zero. - C - Type T. - Optional input tensor C. If not specified, the computation is done as if - C is a scalar 0. The shape of C should be unidirectional broadcastable - to (M, N). - alpha - Attribute. - Scalar multiplier for the product of input tensors A \* B. - beta - Attribute. - Scalar multiplier for input tensor C. - transA - Attribute. - Whether A should be transposed - transB - Attribute. - Whether B should be transposed - - Returns - ======= - Y : Var - Type T. - Output tensor of shape (M, N). - - Notes - ===== - Signature: ``ai.onnx@13::Gemm``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` - """ - return ( - _Gemm( - _Gemm.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - beta=AttrFloat32(beta, name="beta"), - transA=AttrInt64(transA, name="transA"), - transB=AttrInt64(transB, name="transB"), - ), - _Gemm.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - C=unwrap_vars(C), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - C=get_value(C), - ) - .Y - ) - - -def global_average_pool( - X: Var, -) -> Var: - r""" - GlobalAveragePool consumes an input tensor X and applies average pooling - across the values in the same channel. This is equivalent to AveragePool - with kernel size equal to the spatial dimension of input tensor. - - Parameters - ========== - X - Type T. - Input data tensor from the previous operator; dimensions for image case - are (N x C x H x W), where N is the batch size, C is the number of - channels, and H and W are the height and the width of the data. For non - image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), - where N is the batch size. - - Returns - ======= - Y : Var - Type T. - Output data tensor from pooling across the input tensor. The output - tensor has the same rank as the input. The first two dimensions of - output shape are the same as the input (N x C), while the other - dimensions are all 1. - - Notes - ===== - Signature: ``ai.onnx@1::GlobalAveragePool``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _GlobalAveragePool( - _GlobalAveragePool.Attributes(), - _GlobalAveragePool.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def global_lp_pool( - X: Var, - *, - p: int = 2, -) -> Var: - r""" - GlobalLpPool consumes an input tensor X and applies lp pool pooling - across the values in the same channel. This is equivalent to LpPool with - kernel size equal to the spatial dimension of input tensor. - - Parameters - ========== - X - Type T. - Input data tensor from the previous operator; dimensions for image case - are (N x C x H x W), where N is the batch size, C is the number of - channels, and H and W are the height and the width of the data. For non - image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), - where N is the batch size. - p - Attribute. - p value of the Lp norm used to pool over the input data. - - Returns - ======= - Y : Var - Type T. - Output data tensor from pooling across the input tensor. The output - tensor has the same rank as the input. The first two dimensions of - output shape are the same as the input (N x C), while the other - dimensions are all 1. - - Notes - ===== - Signature: ``ai.onnx@2::GlobalLpPool``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _GlobalLpPool( - _GlobalLpPool.Attributes( - p=AttrInt64(p, name="p"), - ), - _GlobalLpPool.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def global_max_pool( - X: Var, -) -> Var: - r""" - GlobalMaxPool consumes an input tensor X and applies max pooling across - the values in the same channel. This is equivalent to MaxPool with - kernel size equal to the spatial dimension of input tensor. - - Parameters - ========== - X - Type T. - Input data tensor from the previous operator; dimensions for image case - are (N x C x H x W), where N is the batch size, C is the number of - channels, and H and W are the height and the width of the data. For non - image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), - where N is the batch size. - - Returns - ======= - Y : Var - Type T. - Output data tensor from pooling across the input tensor. The output - tensor has the same rank as the input. The first two dimensions of - output shape are the same as the input (N x C), while the other - dimensions are all 1. - - Notes - ===== - Signature: ``ai.onnx@1::GlobalMaxPool``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _GlobalMaxPool( - _GlobalMaxPool.Attributes(), - _GlobalMaxPool.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def greater( - A: Var, - B: Var, -) -> Var: - r""" - Returns the tensor resulted from performing the ``greater`` logical - operation elementwise on the input tensors ``A`` and ``B`` (with - Numpy-style broadcasting support). - - This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - Parameters - ========== - A - Type T. - First input operand for the logical operator. - B - Type T. - Second input operand for the logical operator. - - Returns - ======= - C : Var - Type T1. - Result tensor. - - Notes - ===== - Signature: ``ai.onnx@13::Greater``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T1: `tensor(bool)` - """ - return ( - _Greater( - _Greater.Attributes(), - _Greater.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - ) - .C - ) - - -def greater_or_equal( - A: Var, - B: Var, -) -> Var: - r""" - Returns the tensor resulted from performing the ``greater_equal`` - logical operation elementwise on the input tensors ``A`` and ``B`` (with - Numpy-style broadcasting support). - - This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - Parameters - ========== - A - Type T. - First input operand for the logical operator. - B - Type T. - Second input operand for the logical operator. - - Returns - ======= - C : Var - Type T1. - Result tensor. - - Notes - ===== - Signature: ``ai.onnx@16::GreaterOrEqual``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T1: `tensor(bool)` - """ - return ( - _GreaterOrEqual( - _GreaterOrEqual.Attributes(), - _GreaterOrEqual.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - ) - .C - ) - - -def grid_sample( - X: Var, - grid: Var, - *, - align_corners: int = 0, - mode: str = "bilinear", - padding_mode: str = "zeros", -) -> Var: - r""" - Given an input ``X`` and a flow-field ``grid``, computes the output - ``Y`` using ``X`` values and pixel locations from ``grid``. Currently, - only spatial (4-D) inputs are supported. For input ``X`` with shape (N, - C, H, W) and ``grid`` with shape (N, H_out, W_out, 2), the output ``Y`` - will have shape (N, C, H_out, W_out). - - The tensor ``X`` contains values at centers of square pixels in a H by W - 2-dimensional image. The tensor ``grid`` describes normalized positions - where the output ``Y`` is to be computed using a specified interpolation - method (the mode) and a padding mode (for grid positions falling outside - the 2-dimensional image). - - Elements in ``grid[N, H_out, W_out]`` are size-2 vectors specifying - positions in the 2-dimensional space of ``X``. They are used to - interpolate output values of ``Y[N, C, H_out, W_out]``. - - The GridSample operator is often used in doing grid generator and - sampler in the `Spatial Transformer - Networks `__. See also in - `torch.nn.functional.grid_sample `__. - - Parameters - ========== - X - Type T1. - 4-D tensor of shape (N, C, H, W), where N is the batch size, C is the - numbers of channels, H and W are the height and width of the input data. - grid - Type T2. - Input offset, 4-D tensor of shape (N, H_out, W_out, 2), where H_out and - W_out are the height and width of grid and output, Grid specifies the - sampling pixel locations normalized by the input spatial dimensions. - Therefore, it should have most values in the range of [-1, 1]. If grid - has values outside the range of [-1, 1], the corresponding outputs will - be handled as defined by padding_mode. - align_corners - Attribute. - If align_corners=1, the extrema (-1 and 1) are considered as referring - to the center points of the input's corner pixels. If align_corners=0, - they are instead considered as referring to the corner points of the - input's corner pixels, making the sampling more resolution agnostic. - mode - Attribute. - Three interpolation modes: bilinear (default), nearest and bicubic. - padding_mode - Attribute. - Support padding modes for outside grid values: ``zeros``\ (default), - ``border``, ``reflection``. zeros: use 0 for out-of-bound grid - locations, border: use border values for out-of-bound grid locations, - reflection: use values at locations reflected by the border for - out-of-bound grid locations. If index 0 represents the margin pixel, the - reflected value at index -1 will be the same as the value at index 1. - For location far away from the border, it will keep being reflected - until becoming in bound. If pixel location x = -3.5 reflects by border - -1 and becomes x' = 1.5, then reflects by border 1 and becomes x'' = - 0.5. - - Returns - ======= - Y : Var - Type T1. - 4-D tensor of shape (N, C, H_out, W_out) of sampled values. For integer - input types, intermediate values are computed as floating point and cast - to integer at the end. - - Notes - ===== - Signature: ``ai.onnx@16::GridSample``. - - Type constraints: - - T1: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _GridSample( - _GridSample.Attributes( - align_corners=AttrInt64(align_corners, name="align_corners"), - mode=AttrString(mode, name="mode"), - padding_mode=AttrString(padding_mode, name="padding_mode"), - ), - _GridSample.Inputs( - X=unwrap_vars(X), - grid=unwrap_vars(grid), - ), - ) - .get_output_vars( - X=get_value(X), - grid=get_value(grid), - ) - .Y - ) - - -def hamming_window( - size: Var, - *, - output_datatype: int = 1, - periodic: int = 1, -) -> Var: - r""" - Generates a Hamming window as described in the paper - https://ieeexplore.ieee.org/document/1455106. - - Parameters - ========== - size - Type T1. - A scalar value indicating the length of the window. - output_datatype - Attribute. - The data type of the output tensor. Strictly must be one of the values - from DataType enum in TensorProto whose values correspond to T2. The - default value is 1 = FLOAT. - periodic - Attribute. - If 1, returns a window to be used as periodic function. If 0, return a - symmetric window. When 'periodic' is specified, hann computes a window - of length size + 1 and returns the first size points. The default value - is 1. - - Returns - ======= - output : Var - Type T2. - A Hamming window with length: size. The output has the shape: [size]. - - Notes - ===== - Signature: ``ai.onnx@17::HammingWindow``. - - Type constraints: - - T1: `tensor(int32)`, `tensor(int64)` - - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _HammingWindow( - _HammingWindow.Attributes( - output_datatype=AttrInt64(output_datatype, name="output_datatype"), - periodic=AttrInt64(periodic, name="periodic"), - ), - _HammingWindow.Inputs( - size=unwrap_vars(size), - ), - ) - .get_output_vars( - size=get_value(size), - ) - .output - ) - - -def hann_window( - size: Var, - *, - output_datatype: int = 1, - periodic: int = 1, -) -> Var: - r""" - Generates a Hann window as described in the paper - https://ieeexplore.ieee.org/document/1455106. - - Parameters - ========== - size - Type T1. - A scalar value indicating the length of the window. - output_datatype - Attribute. - The data type of the output tensor. Strictly must be one of the values - from DataType enum in TensorProto whose values correspond to T2. The - default value is 1 = FLOAT. - periodic - Attribute. - If 1, returns a window to be used as periodic function. If 0, return a - symmetric window. When 'periodic' is specified, hann computes a window - of length size + 1 and returns the first size points. The default value - is 1. - - Returns - ======= - output : Var - Type T2. - A Hann window with length: size. The output has the shape: [size]. - - Notes - ===== - Signature: ``ai.onnx@17::HannWindow``. - - Type constraints: - - T1: `tensor(int32)`, `tensor(int64)` - - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _HannWindow( - _HannWindow.Attributes( - output_datatype=AttrInt64(output_datatype, name="output_datatype"), - periodic=AttrInt64(periodic, name="periodic"), - ), - _HannWindow.Inputs( - size=unwrap_vars(size), - ), - ) - .get_output_vars( - size=get_value(size), - ) - .output - ) - - -def hard_sigmoid( - X: Var, - *, - alpha: float = 0.20000000298023224, - beta: float = 0.5, -) -> Var: - r""" - HardSigmoid takes one input data (Tensor) and produces one output - data (Tensor) where the HardSigmoid function, y = max(0, min(1, alpha - \* x + beta)), is applied to the tensor elementwise. - - Parameters - ========== - X - Type T. - Input tensor - alpha - Attribute. - Value of alpha. - beta - Attribute. - Value of beta. - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@6::HardSigmoid``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _HardSigmoid( - _HardSigmoid.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - beta=AttrFloat32(beta, name="beta"), - ), - _HardSigmoid.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def hard_swish( - X: Var, -) -> Var: - r""" - HardSwish takes one input data (Tensor) and produces one output data - (Tensor) where the HardSwish function, y = x \* max(0, min(1, alpha - \* x + beta)) = x \* HardSigmoid(x), where alpha = 1/6 and - beta = 0.5, is applied to the tensor elementwise. - - Parameters - ========== - X - Type T. - Input tensor - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@14::HardSwish``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _HardSwish( - _HardSwish.Attributes(), - _HardSwish.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def hardmax( - input: Var, - *, - axis: int = -1, -) -> Var: - r""" - The operator computes the hardmax values for the given input: - - Hardmax(element in input, axis) = 1 if the element is the first maximum - value along the specified axis, 0 otherwise - - The "axis" attribute indicates the dimension along which Hardmax will be - performed. The output tensor has the same shape and contains the Hardmax - values of the corresponding input. - - Parameters - ========== - input - Type T. - The input tensor of rank >= axis. - axis - Attribute. - Describes the dimension Hardmax will be performed on. Negative value - means counting dimensions from the back. Accepted range is [-r, r-1] - where r = rank(input). - - Returns - ======= - output : Var - Type T. - The output values with the same shape as the input tensor. - - Notes - ===== - Signature: ``ai.onnx@13::Hardmax``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Hardmax( - _Hardmax.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Hardmax.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def identity( - input: Var, -) -> Var: - r""" - Identity operator - - Parameters - ========== - input - Type V. - Input tensor - - Returns - ======= - output : Var - Type V. - Tensor to copy input into. - - Notes - ===== - Signature: ``ai.onnx@16::Identity``. - - Type constraints: - - V: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Identity( - _Identity.Attributes(), - _Identity.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def if_( - cond: Var, - *, - else_branch: Callable[[], Iterable[Var]], - then_branch: Callable[[], Iterable[Var]], -) -> Sequence[Var]: - r""" - If conditional - - Parameters - ========== - cond - Type B. - Condition for the if. The tensor must contain a single element. - else_branch - Attribute. - Graph to run if condition is false. Has N outputs: values you wish to be - live-out to the enclosing scope. The number of outputs must match the - number of outputs in the then_branch. - then_branch - Attribute. - Graph to run if condition is true. Has N outputs: values you wish to be - live-out to the enclosing scope. The number of outputs must match the - number of outputs in the else_branch. - - Returns - ======= - outputs : Sequence[Var] - Type V. - Values that are live-out to the enclosing scope. The return values in - the ``then_branch`` and ``else_branch`` must be of the same data type. - The ``then_branch`` and ``else_branch`` may produce tensors with the - same element type and different shapes. If corresponding outputs from - the then-branch and the else-branch have static shapes S1 and S2, then - the shape of the corresponding output variable of the if-node (if - present) must be compatible with both S1 and S2 as it represents the - union of both possible shapes.For example, if in a model file, the first - output of ``then_branch`` is typed float tensor with shape [2] and the - first output of ``else_branch`` is another float tensor with shape [3], - If's first output should have (a) no shape set, or (b) a shape of rank 1 - with neither ``dim_value`` nor ``dim_param`` set, or (c) a shape of rank - 1 with a unique ``dim_param``. In contrast, the first output cannot have - the shape [2] since [2] and [3] are not compatible. - - Notes - ===== - Signature: ``ai.onnx@16::If``. - - Type constraints: - - B: `tensor(bool)` - - V: `optional(seq(tensor(bfloat16)))`, `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bfloat16))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bfloat16))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - _else_branch_subgraph: Graph = subgraph((), else_branch) - _then_branch_subgraph: Graph = subgraph((), then_branch) - return ( - _If( - _If.Attributes( - else_branch=AttrGraph(_else_branch_subgraph, name="else_branch"), - then_branch=AttrGraph(_then_branch_subgraph, name="then_branch"), - ), - _If.Inputs( - cond=unwrap_vars(cond), - ), - out_variadic=len(_else_branch_subgraph.requested_results), - ) - .get_output_vars( - cond=get_value(cond), - ) - .outputs - ) - - -def instance_normalization( - input: Var, - scale: Var, - B: Var, - *, - epsilon: float = 9.999999747378752e-06, -) -> Var: - r""" - Carries out instance normalization as described in the paper - https://arxiv.org/abs/1607.08022. - - y = scale \* (x - mean) / sqrt(variance + epsilon) + B, where mean and - variance are computed per instance per channel. - - Parameters - ========== - input - Type T. - Input data tensor from the previous operator; dimensions for image case - are (N x C x H x W), where N is the batch size, C is the number of - channels, and H and W are the height and the width of the data. For non - image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), - where N is the batch size. - scale - Type T. - The input 1-dimensional scale tensor of size C. - B - Type T. - The input 1-dimensional bias tensor of size C. - epsilon - Attribute. - The epsilon value to use to avoid division by zero. - - Returns - ======= - output : Var - Type T. - The output tensor of the same shape as input. - - Notes - ===== - Signature: ``ai.onnx@6::InstanceNormalization``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _InstanceNormalization( - _InstanceNormalization.Attributes( - epsilon=AttrFloat32(epsilon, name="epsilon"), - ), - _InstanceNormalization.Inputs( - input=unwrap_vars(input), - scale=unwrap_vars(scale), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - input=get_value(input), - scale=get_value(scale), - B=get_value(B), - ) - .output - ) - - -def isinf( - X: Var, - *, - detect_negative: int = 1, - detect_positive: int = 1, -) -> Var: - r""" - Map infinity to true and other values to false. - - Parameters - ========== - X - Type T1. - input - detect_negative - Attribute. - (Optional) Whether map negative infinity to true. Default to 1 so that - negative infinity induces true. Set this attribute to 0 if negative - infinity should be mapped to false. - detect_positive - Attribute. - (Optional) Whether map positive infinity to true. Default to 1 so that - positive infinity induces true. Set this attribute to 0 if positive - infinity should be mapped to false. - - Returns - ======= - Y : Var - Type T2. - output - - Notes - ===== - Signature: ``ai.onnx@10::IsInf``. - - Type constraints: - - T1: `tensor(double)`, `tensor(float)` - - T2: `tensor(bool)` - """ - return ( - _IsInf( - _IsInf.Attributes( - detect_negative=AttrInt64(detect_negative, name="detect_negative"), - detect_positive=AttrInt64(detect_positive, name="detect_positive"), - ), - _IsInf.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def isnan( - X: Var, -) -> Var: - r""" - Returns which elements of the input are NaN. - - Parameters - ========== - X - Type T1. - input - - Returns - ======= - Y : Var - Type T2. - output - - Notes - ===== - Signature: ``ai.onnx@13::IsNaN``. - - Type constraints: - - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - - T2: `tensor(bool)` - """ - return ( - _IsNaN( - _IsNaN.Attributes(), - _IsNaN.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def lrn( - X: Var, - *, - alpha: float = 9.999999747378752e-05, - beta: float = 0.75, - bias: float = 1.0, - size: int, -) -> Var: - r""" - Local Response Normalization proposed in the `AlexNet - paper `__. - It normalizes over local input regions. The local region is defined - across the channels. For an element ``X[n, c, d1, ..., dk]`` in a tensor - of shape ``(N x C x D1 x D2, ..., Dk)``, its region is - ``{X[n, i, d1, ..., dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}``. - - ``square_sum[n, c, d1, ..., dk] = sum(X[n, i, d1, ..., dk] ^ 2)``, where - ``max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))``. - - ``Y[n, c, d1, ..., dk] = X[n, c, d1, ..., dk] / (bias + alpha / size * square_sum[n, c, d1, ..., dk] ) ^ beta`` - - Parameters - ========== - X - Type T. - Input data tensor from the previous operator; dimensions for image case - are (N x C x H x W), where N is the batch size, C is the number of - channels, and H and W are the height and the width of the data. For non - image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), - where N is the batch size. Optionally, if dimension denotation is in - effect, the operation expects the input data tensor to arrive with the - dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, - DATA_FEATURE ...]. - alpha - Attribute. - Scaling parameter. - beta - Attribute. - The exponent. - bias - Attribute. - - size - Attribute. - The number of channels to sum over - - Returns - ======= - Y : Var - Type T. - Output tensor, which has the shape and type as input tensor - - Notes - ===== - Signature: ``ai.onnx@13::LRN``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _LRN( - _LRN.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - beta=AttrFloat32(beta, name="beta"), - bias=AttrFloat32(bias, name="bias"), - size=AttrInt64(size, name="size"), - ), - _LRN.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def lstm( - X: Var, - W: Var, - R: Var, - B: Optional[Var] = None, - sequence_lens: Optional[Var] = None, - initial_h: Optional[Var] = None, - initial_c: Optional[Var] = None, - P: Optional[Var] = None, - *, - activation_alpha: Optional[Iterable[float]] = None, - activation_beta: Optional[Iterable[float]] = None, - activations: Optional[Iterable[str]] = None, - clip: Optional[float] = None, - direction: str = "forward", - hidden_size: Optional[int] = None, - input_forget: int = 0, - layout: int = 0, -) -> tuple[Var, Var, Var]: - r""" - Computes an one-layer LSTM. This operator is usually supported via some - custom implementation such as CuDNN. - - Notations: - - - ``X`` - input tensor - - ``i`` - input gate - - ``o`` - output gate - - ``f`` - forget gate - - ``c`` - cell gate - - ``t`` - time step (t-1 means previous time step) - - ``W[iofc]`` - W parameter weight matrix for input, output, forget, - and cell gates - - ``R[iofc]`` - R recurrence weight matrix for input, output, forget, - and cell gates - - ``Wb[iofc]`` - W bias vectors for input, output, forget, and cell - gates - - ``Rb[iofc]`` - R bias vectors for input, output, forget, and cell - gates - - ``P[iof]`` - P peephole weight vector for input, output, and forget - gates - - ``WB[iofc]`` - W parameter weight matrix for backward input, output, - forget, and cell gates - - ``RB[iofc]`` - R recurrence weight matrix for backward input, output, - forget, and cell gates - - ``WBb[iofc]`` - W bias vectors for backward input, output, forget, - and cell gates - - ``RBb[iofc]`` - R bias vectors for backward input, output, forget, - and cell gates - - ``PB[iof]`` - P peephole weight vector for backward input, output, - and forget gates - - ``H`` - Hidden state - - ``num_directions`` - 2 if direction == bidirectional else 1 - - Activation functions: - - - Relu(x) - max(0, x) - - Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) - - Sigmoid(x) - 1/(1 + e^{-x}) - - NOTE: Below are optional - - - Affine(x) - alpha*x + beta - - LeakyRelu(x) - x if x >= 0 else alpha \* x - - ThresholdedRelu(x) - x if x >= alpha else 0 - - ScaledTanh(x) - alpha\ *Tanh(beta*\ x) - - HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) - - Elu(x) - x if x >= 0 else alpha*(e^x - 1) - - Softsign(x) - x/(1 + \|x\|) - - Softplus(x) - log(1 + e^x) - - Equations (Default: f=Sigmoid, g=Tanh, h=Tanh): - - - it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) - - ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) - - ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) - - Ct = ft (.) Ct-1 + it (.) ct - - ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) - - Ht = ot (.) h(Ct) This operator has **optional** inputs/outputs. See - `the doc `__ for - more details about the representation of optional arguments. An empty - string may be used in the place of an actual argument's name to - indicate a missing argument. Trailing optional arguments (those not - followed by an argument that is present) may also be simply omitted. - - Parameters - ========== - X - Type T. - The input sequences packed (and potentially padded) into one 3-D tensor - with the shape of ``[seq_length, batch_size, input_size]``. - W - Type T. - The weight tensor for the gates. Concatenation of ``W[iofc]`` and - ``WB[iofc]`` (if bidirectional) along dimension 0. The tensor has shape - ``[num_directions, 4*hidden_size, input_size]``. - R - Type T. - The recurrence weight tensor. Concatenation of ``R[iofc]`` and - ``RB[iofc]`` (if bidirectional) along dimension 0. This tensor has shape - ``[num_directions, 4*hidden_size, hidden_size]``. - B - Type T. - The bias tensor for input gate. Concatenation of - ``[Wb[iofc], Rb[iofc]]``, and ``[WBb[iofc], RBb[iofc]]`` (if - bidirectional) along dimension 0. This tensor has shape - ``[num_directions, 8*hidden_size]``. Optional: If not specified - - assumed to be 0. - sequence_lens - Type T1. - Optional tensor specifying lengths of the sequences in a batch. If not - specified - assumed all sequences in the batch to have length - ``seq_length``. It has shape ``[batch_size]``. - initial_h - Type T. - Optional initial value of the hidden. If not specified - assumed to be - 0. It has shape ``[num_directions, batch_size, hidden_size]``. - initial_c - Type T. - Optional initial value of the cell. If not specified - assumed to be 0. - It has shape ``[num_directions, batch_size, hidden_size]``. - P - Type T. - The weight tensor for peepholes. Concatenation of ``P[iof]`` and - ``PB[iof]`` (if bidirectional) along dimension 0. It has shape - ``[num_directions, 3*hidde_size]``. Optional: If not specified - assumed - to be 0. - activation_alpha - Attribute. - Optional scaling values used by some activation functions. The values - are consumed in the order of activation functions, for example (f, g, h) - in LSTM. Default values are the same as of corresponding ONNX - operators.For example with LeakyRelu, the default alpha is 0.01. - activation_beta - Attribute. - Optional scaling values used by some activation functions. The values - are consumed in the order of activation functions, for example (f, g, h) - in LSTM. Default values are the same as of corresponding ONNX operators. - activations - Attribute. - A list of 3 (or 6 if bidirectional) activation functions for input, - output, forget, cell, and hidden. The activation functions must be one - of the activation functions specified above. Optional: See the equations - for default if not specified. - clip - Attribute. - Cell clip threshold. Clipping bounds the elements of a tensor in the - range of [-threshold, +threshold] and is applied to the input of - activations. No clip if not specified. - direction - Attribute. - Specify if the RNN is forward, reverse, or bidirectional. Must be one of - forward (default), reverse, or bidirectional. - hidden_size - Attribute. - Number of neurons in the hidden layer - input_forget - Attribute. - Couple the input and forget gates if 1. - layout - Attribute. - The shape format of inputs X, initial_h, initial_c and outputs Y, Y_h, - Y_c. If 0, the following shapes are expected: X.shape = [seq_length, - batch_size, input_size], Y.shape = [seq_length, num_directions, - batch_size, hidden_size], initial_h.shape = Y_h.shape = initial_c.shape - = Y_c.shape = [num_directions, batch_size, hidden_size]. If 1, the - following shapes are expected: X.shape = [batch_size, seq_length, - input_size], Y.shape = [batch_size, seq_length, num_directions, - hidden_size], initial_h.shape = Y_h.shape = initial_c.shape = Y_c.shape - = [batch_size, num_directions, hidden_size]. - - Returns - ======= - Y : Var - Type T. - A tensor that concats all the intermediate output values of the hidden. - It has shape ``[seq_length, num_directions, batch_size, hidden_size]``. - Y_h : Var - Type T. - The last output value of the hidden. It has shape - ``[num_directions, batch_size, hidden_size]``. - Y_c : Var - Type T. - The last output value of the cell. It has shape - ``[num_directions, batch_size, hidden_size]``. - - Notes - ===== - Signature: ``ai.onnx@14::LSTM``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - - T1: `tensor(int32)` - """ - return ( - _LSTM( - _LSTM.Attributes( - activation_alpha=AttrFloat32s.maybe( - activation_alpha, name="activation_alpha" - ), - activation_beta=AttrFloat32s.maybe( - activation_beta, name="activation_beta" - ), - activations=AttrStrings.maybe(activations, name="activations"), - clip=AttrFloat32.maybe(clip, name="clip"), - direction=AttrString(direction, name="direction"), - hidden_size=AttrInt64.maybe(hidden_size, name="hidden_size"), - input_forget=AttrInt64(input_forget, name="input_forget"), - layout=AttrInt64(layout, name="layout"), - ), - _LSTM.Inputs( - X=unwrap_vars(X), - W=unwrap_vars(W), - R=unwrap_vars(R), - B=unwrap_vars(B), - sequence_lens=unwrap_vars(sequence_lens), - initial_h=unwrap_vars(initial_h), - initial_c=unwrap_vars(initial_c), - P=unwrap_vars(P), - ), - ) - .get_output_vars( - X=get_value(X), - W=get_value(W), - R=get_value(R), - B=get_value(B), - sequence_lens=get_value(sequence_lens), - initial_h=get_value(initial_h), - initial_c=get_value(initial_c), - P=get_value(P), - ) - ._unpack_to_any() - ) - - -def layer_normalization( - X: Var, - Scale: Var, - B: Optional[Var] = None, - *, - axis: int = -1, - epsilon: float = 9.999999747378752e-06, - stash_type: int = 1, -) -> tuple[Var, Var, Var]: - r""" - This is layer normalization defined in ONNX as function. The overall - computation can be split into two stages. The first stage is - standardization, which makes the normalized elements have zero mean and - unit variances. The computation required by standardization can be - described by the following equations. - ``Mean = ReduceMean(X) D = Sub(X, Mean) DD = Mul(D, D) Var = ReduceMean(DD) VarEps = Add(Var, epsilon) StdDev = Sqrt(VarEps) InvStdDev = Reciprocal(StdDev) Normalized = Mul(D, InvStdDev)`` - where ``normalized_axes`` is ``[axis, ..., rank of X - 1]``. The - variables ``Var`` and ``StdDev`` stand for variance and standard - deviation, respectively. The second output is ``Mean`` and the last one - is ``InvStdDev``. Depending on ``stash_type`` attribute, the actual - computation must happen in different floating-point precision. For - example, if ``stash_type`` is 1, this operator casts all input variables - to 32-bit float, perform the computation, and finally cast - ``Normalized`` back to the original type of ``X``. The second stage then - scales and shifts the outcome of the first stage using - ``NormalizedScaled = Mul(Normalized, Scale) Y = Add(NormalizedScaled, B)`` - The second stage doesn't depends on ``stash_type``. All equations are in - `this syntax `__. - The same variable (i.e., input, output, and attribute) uses the same - name in the equations above and this operator's definition. Let ``d[i]`` - indicate the i-th dimension of ``X``. If ``X``'s shape is - ``[d[0], ..., d[axis-1], d[axis], ..., d[rank-1]]``, the shape of - ``Mean`` and ``InvStdDev`` is ``[d[0], ..., d[axis-1], 1, ..., 1]``. - ``Y`` and ``X`` have the same shape. This operator supports - unidirectional broadcasting (tensors ``Scale`` and ``B`` should be - unidirectional broadcastable to tensor ``X``); for more details please - check `the - doc `__. - - Parameters - ========== - X - Type T. - Tensor to be normalized. - Scale - Type T. - Scale tensor. - B - Type T. - Bias tensor. - axis - Attribute. - The first normalization dimension. If rank(X) is r, axis' allowed range - is [-r, r). Negative value means counting dimensions from the back. - epsilon - Attribute. - The epsilon value to use to avoid division by zero. - stash_type - Attribute. - Type of Mean and InvStdDev. This also specifies stage one's computation - precision. - - Returns - ======= - Y : Var - Type T. - Normalized tensor. - Mean : Var - Type U. - Saved mean used during training to speed up gradient computation - InvStdDev : Var - Type U. - Saved inverse standard deviation used during training to speed up - gradient computation. - - Notes - ===== - Signature: ``ai.onnx@17::LayerNormalization``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - - U: `tensor(bfloat16)`, `tensor(float)` - """ - return ( - _LayerNormalization( - _LayerNormalization.Attributes( - axis=AttrInt64(axis, name="axis"), - epsilon=AttrFloat32(epsilon, name="epsilon"), - stash_type=AttrInt64(stash_type, name="stash_type"), - ), - _LayerNormalization.Inputs( - X=unwrap_vars(X), - Scale=unwrap_vars(Scale), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - X=get_value(X), - Scale=get_value(Scale), - B=get_value(B), - ) - ._unpack_to_any() - ) - - -def leaky_relu( - X: Var, - *, - alpha: float = 0.009999999776482582, -) -> Var: - r""" - LeakyRelu takes input data (Tensor) and an argument alpha, and - produces one output data (Tensor) where the function - ``f(x) = alpha * x for x < 0``, ``f(x) = x for x >= 0``, is applied to - the data tensor elementwise. - - Parameters - ========== - X - Type T. - Input tensor - alpha - Attribute. - Coefficient of leakage. - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@16::LeakyRelu``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _LeakyRelu( - _LeakyRelu.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - ), - _LeakyRelu.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def less( - A: Var, - B: Var, -) -> Var: - r""" - Returns the tensor resulted from performing the ``less`` logical - operation elementwise on the input tensors ``A`` and ``B`` (with - Numpy-style broadcasting support). - - This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - Parameters - ========== - A - Type T. - First input operand for the logical operator. - B - Type T. - Second input operand for the logical operator. - - Returns - ======= - C : Var - Type T1. - Result tensor. - - Notes - ===== - Signature: ``ai.onnx@13::Less``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T1: `tensor(bool)` - """ - return ( - _Less( - _Less.Attributes(), - _Less.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - ) - .C - ) - - -def less_or_equal( - A: Var, - B: Var, -) -> Var: - r""" - Returns the tensor resulted from performing the ``less_equal`` logical - operation elementwise on the input tensors ``A`` and ``B`` (with - Numpy-style broadcasting support). - - This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - Parameters - ========== - A - Type T. - First input operand for the logical operator. - B - Type T. - Second input operand for the logical operator. - - Returns - ======= - C : Var - Type T1. - Result tensor. - - Notes - ===== - Signature: ``ai.onnx@16::LessOrEqual``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T1: `tensor(bool)` - """ - return ( - _LessOrEqual( - _LessOrEqual.Attributes(), - _LessOrEqual.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - ) - .C - ) - - -def log( - input: Var, -) -> Var: - r""" - Calculates the natural log of the given input tensor, element-wise. - - Parameters - ========== - input - Type T. - Input tensor - - Returns - ======= - output : Var - Type T. - The natural log of the input tensor computed element-wise - - Notes - ===== - Signature: ``ai.onnx@13::Log``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Log( - _Log.Attributes(), - _Log.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def log_softmax( - input: Var, - *, - axis: int = -1, -) -> Var: - r""" - The operator computes the log of softmax values for the given input: - - LogSoftmax(input, axis) = Log(Softmax(input, axis=axis)) - - The "axis" attribute indicates the dimension along which LogSoftmax will - be performed. The output tensor has the same shape and contains the - LogSoftmax values of the corresponding input. - - Parameters - ========== - input - Type T. - The input tensor of rank >= axis. - axis - Attribute. - Describes the dimension LogSoftmax will be performed on. Negative value - means counting dimensions from the back. Accepted range is [-r, r-1] - where r = rank(input). - - Returns - ======= - output : Var - Type T. - The output values with the same shape as the input tensor. - - Notes - ===== - Signature: ``ai.onnx@13::LogSoftmax``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _LogSoftmax( - _LogSoftmax.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _LogSoftmax.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def loop( - M: Optional[Var] = None, - cond: Optional[Var] = None, - v_initial: Sequence[Var] = (), - *, - body: Callable[..., Iterable[Var]], -) -> Sequence[Var]: - r""" - Generic Looping construct. This loop has multiple termination - conditions: - - 1) Trip count. Iteration count specified at runtime. Set by specifying - the input M. Optional. Set to empty string to omit. Note that a - static trip count (specified at graph construction time) can be - specified by passing in a constant node for input M. - 2) Loop termination condition. This is an input to the op that - determines whether to run the first iteration and also a loop-carried - dependency for the body graph. The body graph must yield a value for - the condition variable, whether this input is provided or not. - - This table summarizes the operating modes of this operator with - equivalent C-style code: - - Operator inputs defined as (max_trip_count, condition_var). - - - input ("", ""): for (int i=0; ; ++i) { cond = ... // Note this value - is ignored, but is required in the body } - - - input ("", cond) // Note this is analogous to a while loop bool cond - = ...; for (int i=0; cond; ++i) { cond = ...; } - - - input ("", 1) // Note this is analogous to a do-while loop bool cond - = true for (int i=0; cond; ++i) { cond = ...; } - - - input (trip_count, "") // Note this is analogous to a for loop int - trip_count = ... for (int i=0; i < trip_count; ++i) { cond = ...; // - ignored } - - - input (trip_count, cond) int trip_count = ...; bool cond = ...; for - (int i=0; i < trip_count && cond; ++i) { cond = ...; } - - *Sample usage - cond as well as trip count* - - :: - - graph predict-net { - %a = Constant[value = ]() - %b = Constant[value = ]() - %keepgoing = Constant[value = ]() - %max_trip_count = Constant[value = ]() - %keepgoing_out, %b_out, %user_defined_vals = Loop[body = ](%max_trip_count, %keepgoing, %b) - return - } - - graph body-net ( - %i[INT32, scalar] // iteration number - %keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used - %b_in[INT32, scalar] // incoming value of loop-carried-dependency b - ) { - %my_local = Add(%a, %b_in) - %b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b - %keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition - %user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated - return %keepgoing_out, %b_out, %user_defined_val - } - - *Sample equivalent C code* - - :: - - { - /* User-defined code (enclosing scope) */ - int a = 3, b = 6; - bool keepgoing = true; // Analogous to input cond - /* End user-defined code */ - - /* Implicitly-defined code */ - const int max_trip_count = 10; // Analogous to input M - int user_defined_vals[]; // Imagine this is resizable - /* End implicitly-defined code */ - /* initialize loop-carried variables and scan-output variables */ - bool keepgoing_out = keepgoing - int b_out = b - - for (int i=0; i < max_trip_count && keepgoing_out; ++i) { - /* Implicitly-defined code: bind actual parameter values - to formal parameter variables of loop-body */ - bool keepgoing_in = keepgoing_out; - bool b_in = b_out; - - /* User-defined code (loop body) */ - int my_local = a + b_in; // Reading value "a" from the enclosing scope is fine - b_out = a - b_in; - keepgoing_out = my_local > b_out; - user_defined_val = b_in + b_in; // b_in and b_out are different variables - /* End user-defined code */ - - /* Implicitly defined-code */ - user_defined_vals[i] = user_defined_val // accumulate scan-output values - } - // int t = my_local; // Can't do this. my_local is not accessible here. - - // The values below are bound to the output variables of the loop and therefore accessible - // b_out; user_defined_vals; keepgoing_out; - } - - There are several things of note in this code snippet: - - 1) Values from the enclosing scope (i.e. variable "a" here) are in scope - and can be referenced in the inputs of the loop. - 2) Any values computed in the loop body that needs to be used in a - subsequent iteration or after the loop are modelled using a pair of - variables in the loop-body, consisting of an input variable (eg., - b_in) and an output variable (eg., b_out). These are referred to as - loop-carried dependences. The loop operation node supplies the input - value of the input variable for the first iteration, and returns the - output value of the output variable produced by the final iteration. - 3) Scan_output variables are used to implicitly concatenate values - computed across all the iterations. In the above example, the value - of user_defined_val computed over all iterations are concatenated and - returned as the value of user_defined_vals after the loop. - 4) Values created in the body cannot be accessed in the enclosing scope, - except using the mechanism described above. - - Note that the semantics of this op support "diagonal" or "wavefront" - execution. (See Step 3 here for an example: - https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). - Frontends should emit multi-layer RNNs as a series of While operators - (with time being the inner looping dimension), with each successive - layer consuming the scan_outputs from the previous layer, possibly going - through several point-wise operators (e.g. dropout, residual - connections, linear layer). - - The input/output of subgraph (produced by loop node) matching is based - on order instead of name. The implementation will figure out the names - based on this order. - - Parameters - ========== - M - Type I. - A maximum trip-count for the loop specified at runtime. Optional. Pass - empty string to skip. - cond - Type B. - A boolean termination condition. Optional. Pass empty string to skip. - v_initial - Type V. - The initial values of any loop-carried dependencies (values that change - across loop iterations) - body - Attribute. - The graph run each iteration. It has 2+N inputs: (iteration_num, - condition, loop carried dependencies...). It has 1+N+K outputs: - (condition, loop carried dependencies..., scan_outputs...). Each - scan_output is created by concatenating the value of the specified - output value at the end of each iteration of the loop. It is an error if - the dimensions or data type of these scan_outputs change across loop - iterations. - - Returns - ======= - v_final_and_scan_outputs : Sequence[Var] - Type V. - Final N loop carried dependency values then K scan_outputs. Scan outputs - must be Tensors. - - Notes - ===== - Signature: ``ai.onnx@16::Loop``. - - Type constraints: - - I: `tensor(int64)` - - B: `tensor(bool)` - - V: `optional(seq(tensor(bfloat16)))`, `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bfloat16))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bfloat16))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - _body_subgraph: Graph = subgraph( - typing_cast(list[Type], [Tensor(np.int64, (1,)), Tensor(np.bool_, (1,))]) - + [var.unwrap_type() for var in v_initial], - body, - ) - return ( - _Loop( - _Loop.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - ), - _Loop.Inputs( - M=unwrap_vars(M), - cond=unwrap_vars(cond), - v_initial=unwrap_vars(v_initial), - ), - out_variadic=len(_body_subgraph.requested_results) - 1, - ) - .get_output_vars( - M=get_value(M), - cond=get_value(cond), - v_initial=get_value(v_initial), - ) - .v_final_and_scan_outputs - ) - - -def lp_normalization( - input: Var, - *, - axis: int = -1, - p: int = 2, -) -> Var: - r""" - Given a matrix, apply Lp-normalization along the provided axis. - - Parameters - ========== - input - Type T. - Input matrix - axis - Attribute. - The axis on which to apply normalization, -1 mean last axis. - p - Attribute. - The order of the normalization, only 1 or 2 are supported. - - Returns - ======= - output : Var - Type T. - Matrix after normalization - - Notes - ===== - Signature: ``ai.onnx@1::LpNormalization``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _LpNormalization( - _LpNormalization.Attributes( - axis=AttrInt64(axis, name="axis"), - p=AttrInt64(p, name="p"), - ), - _LpNormalization.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def lp_pool( - X: Var, - *, - auto_pad: str = "NOTSET", - kernel_shape: Iterable[int], - p: int = 2, - pads: Optional[Iterable[int]] = None, - strides: Optional[Iterable[int]] = None, -) -> Var: - r""" - LpPool consumes an input tensor X and applies Lp pooling across the - tensor according to kernel sizes, stride sizes, and pad lengths. Lp - pooling consisting of computing the Lp norm on all values of a subset of - the input tensor according to the kernel size and downsampling the data - into the output tensor Y for further processing. - - Parameters - ========== - X - Type T. - Input data tensor from the previous operator; dimensions for image case - are (N x C x H x W), where N is the batch size, C is the number of - channels, and H and W are the height and the width of the data. For non - image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), - where N is the batch size. - auto_pad - Attribute. - auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where - default value is NOTSET, which means explicit padding is used. - SAME_UPPER or SAME_LOWER mean pad the input so that - ``output_shape[i] = ceil(input_shape[i] / strides[i])`` for each axis - ``i``. The padding is split between the two sides equally or almost - equally (depending on whether it is even or odd). In case the padding is - an odd number, the extra padding is added at the end for SAME_UPPER and - at the beginning for SAME_LOWER. - kernel_shape - Attribute. - The size of the kernel along each axis. - p - Attribute. - p value of the Lp norm used to pool over the input data. - pads - Attribute. - Padding for the beginning and ending along each spatial axis, it can - take any value greater than or equal to 0. The value represent the - number of pixels added to the beginning and end part of the - corresponding axis. ``pads`` format should be as follow [x1_begin, - x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels - added at the beginning of axis ``i`` and xi_end, the number of pixels - added at the end of axis ``i``. This attribute cannot be used - simultaneously with auto_pad attribute. If not present, the padding - defaults to 0 along start and end of each spatial axis. - strides - Attribute. - Stride along each spatial axis. If not present, the stride defaults to 1 - along each spatial axis. - - Returns - ======= - Y : Var - Type T. - Output data tensor from Lp pooling across the input tensor. Dimensions - will vary based on various kernel, stride, and pad sizes. - - Notes - ===== - Signature: ``ai.onnx@11::LpPool``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _LpPool( - _LpPool.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), - p=AttrInt64(p, name="p"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _LpPool.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def matmul( - A: Var, - B: Var, -) -> Var: - r""" - Matrix product that behaves like numpy.matmul: - https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html - - Parameters - ========== - A - Type T. - N-dimensional matrix A - B - Type T. - N-dimensional matrix B - - Returns - ======= - Y : Var - Type T. - Matrix multiply results from A \* B - - Notes - ===== - Signature: ``ai.onnx@13::MatMul``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` - """ - return ( - _MatMul( - _MatMul.Attributes(), - _MatMul.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - ) - .Y - ) - - -def matmul_integer( - A: Var, - B: Var, - a_zero_point: Optional[Var] = None, - b_zero_point: Optional[Var] = None, -) -> Var: - r""" - Matrix product that behaves like numpy.matmul: - https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. - The production MUST never overflow. The accumulation may overflow if and - only if in 32 bits. - - Parameters - ========== - A - Type T1. - N-dimensional matrix A - B - Type T2. - N-dimensional matrix B - a_zero_point - Type T1. - Zero point tensor for input 'A'. It's optional and default value is 0. - It could be a scalar or N-D tensor. Scalar refers to per tensor - quantization whereas N-D refers to per row quantization. If the input is - 2D of shape [M, K] then zero point tensor may be an M element vector - [zp_1, zp_2, ..., zp_M]. If the input is N-D tensor with shape [D1, D2, - M, K] then zero point tensor may have shape [D1, D2, M, 1]. - b_zero_point - Type T2. - Zero point tensor for input 'B'. It's optional and default value is 0. - It could be a scalar or a N-D tensor, Scalar refers to per tensor - quantization whereas N-D refers to per col quantization. If the input is - 2D of shape [K, N] then zero point tensor may be an N element vector - [zp_1, zp_2, ..., zp_N]. If the input is N-D tensor with shape [D1, D2, - K, N] then zero point tensor may have shape [D1, D2, 1, N]. - - Returns - ======= - Y : Var - Type T3. - Matrix multiply results from A \* B - - Notes - ===== - Signature: ``ai.onnx@10::MatMulInteger``. - - Type constraints: - - T1: `tensor(int8)`, `tensor(uint8)` - - T2: `tensor(int8)`, `tensor(uint8)` - - T3: `tensor(int32)` - """ - return ( - _MatMulInteger( - _MatMulInteger.Attributes(), - _MatMulInteger.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - a_zero_point=unwrap_vars(a_zero_point), - b_zero_point=unwrap_vars(b_zero_point), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - a_zero_point=get_value(a_zero_point), - b_zero_point=get_value(b_zero_point), - ) - .Y - ) - - -def max( - data_0: Sequence[Var], -) -> Var: - r""" - Element-wise max of each of the input tensors (with Numpy-style - broadcasting support). All inputs and outputs must have the same data - type. This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - Parameters - ========== - data_0 - Type T. - List of tensors for max. - - Returns - ======= - max : Var - Type T. - Output tensor. - - Notes - ===== - Signature: ``ai.onnx@13::Max``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Max( - _Max.Attributes(), - _Max.Inputs( - data_0=unwrap_vars(data_0), - ), - ) - .get_output_vars( - data_0=get_value(data_0), - ) - .max - ) - - -def max_pool( - X: Var, - *, - auto_pad: str = "NOTSET", - ceil_mode: int = 0, - dilations: Optional[Iterable[int]] = None, - kernel_shape: Iterable[int], - pads: Optional[Iterable[int]] = None, - storage_order: int = 0, - strides: Optional[Iterable[int]] = None, -) -> tuple[Var, Var]: - r""" - MaxPool consumes an input tensor X and applies max pooling across the - tensor according to kernel sizes, stride sizes, and pad lengths. max - pooling consisting of computing the max on all values of a subset of the - input tensor according to the kernel size and downsampling the data into - the output tensor Y for further processing. The output spatial shape is - calculated differently depending on whether explicit padding is used, - where pads is employed, or auto padding is used, where auto_pad is - utilized. With explicit padding - (https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html?highlight=maxpool#torch.nn.MaxPool2d): - - :: - - output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1) - - or - - :: - - output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1) - - if ceil_mode is enabled. ``pad_shape[i]`` is the sum of pads along axis - ``i``. Sliding windows that would start in the right padded region are - ignored. - - ``auto_pad`` is a DEPRECATED attribute. If you are using them currently, - the output spatial shape will be following when ceil_mode is enabled: - - :: - - VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i]) - SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) - - or when ceil_mode is disabled - (https://www.tensorflow.org/api_docs/python/tf/keras/layers/AveragePooling2D): - - :: - - VALID: output_spatial_shape[i] = floor((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i]) + 1 - SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = floor((input_spatial_shape[i] - 1) / strides_spatial_shape[i]) + 1 - - And pad shape will be following if ``SAME_UPPER`` or ``SAME_LOWER``: - - :: - - pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i] - - The output of each pooling window is maximum number of elements exclude - pad. - - Parameters - ========== - X - Type T. - Input data tensor from the previous operator; dimensions for image case - are (N x C x H x W), where N is the batch size, C is the number of - channels, and H and W are the height and the width of the data. For non - image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), - where N is the batch size. Optionally, if dimension denotation is in - effect, the operation expects the input data tensor to arrive with the - dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, - DATA_FEATURE ...]. - auto_pad - Attribute. - auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where - default value is NOTSET, which means explicit padding is used. - SAME_UPPER or SAME_LOWER mean pad the input so that - ``output_shape[i] = ceil(input_shape[i] / strides[i])`` for each axis - ``i``. The padding is split between the two sides equally or almost - equally (depending on whether it is even or odd). In case the padding is - an odd number, the extra padding is added at the end for SAME_UPPER and - at the beginning for SAME_LOWER. - ceil_mode - Attribute. - Whether to use ceil or floor (default) to compute the output shape. - dilations - Attribute. - Dilation value along each spatial axis of filter. If not present, the - dilation defaults to 1 along each spatial axis. - kernel_shape - Attribute. - The size of the kernel along each axis. - pads - Attribute. - Padding for the beginning and ending along each spatial axis, it can - take any value greater than or equal to 0. The value represent the - number of pixels added to the beginning and end part of the - corresponding axis. ``pads`` format should be as follow [x1_begin, - x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels - added at the beginning of axis ``i`` and xi_end, the number of pixels - added at the end of axis ``i``. This attribute cannot be used - simultaneously with auto_pad attribute. If not present, the padding - defaults to 0 along start and end of each spatial axis. - storage_order - Attribute. - The storage order of the tensor. 0 is row major, and 1 is column major. - This attribute is used only to convert an n-tuple index value into a - single integer value for producing the second output. - strides - Attribute. - Stride along each spatial axis. If not present, the stride defaults to 1 - along each spatial axis. - - Returns - ======= - Y : Var - Type T. - Output data tensor from average or max pooling across the input tensor. - Dimensions will vary based on various kernel, stride, and pad sizes. - Floor value of the dimension is used - Indices : Var - Type I. - Indices tensor from max pooling across the input tensor. The dimensions - of indices are the same as output tensor. The values in indices of are - the indices of the selected values during pooling. The indices are - computed as flatten 1-D tensor, and the indices do not consider padding. - So the values in indices are in [0, N x C x D1 x ... x Dn). - - Notes - ===== - Signature: ``ai.onnx@12::MaxPool``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int8)`, `tensor(uint8)` - - I: `tensor(int64)` - """ - return ( - _MaxPool( - _MaxPool.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - storage_order=AttrInt64(storage_order, name="storage_order"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _MaxPool.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - ._unpack_to_any() - ) - - -def max_roi_pool( - X: Var, - rois: Var, - *, - pooled_shape: Iterable[int], - spatial_scale: float = 1.0, -) -> Var: - r""" - ROI max pool consumes an input tensor X and region of interests (RoIs) - to apply max pooling across each RoI, to produce output 4-D tensor of - shape (num_rois, channels, pooled_shape[0], pooled_shape[1]). - - Parameters - ========== - X - Type T. - Input data tensor from the previous operator; dimensions for image case - are (N x C x H x W), where N is the batch size, C is the number of - channels, and H and W are the height and the width of the data. - rois - Type T. - RoIs (Regions of Interest) to pool over. Should be a 2-D tensor of shape - (num_rois, 5) given as [[batch_id, x1, y1, x2, y2], ...]. - pooled_shape - Attribute. - ROI pool output shape (height, width). - spatial_scale - Attribute. - Multiplicative spatial scale factor to translate ROI coordinates from - their input scale to the scale used when pooling. - - Returns - ======= - Y : Var - Type T. - RoI pooled output 4-D tensor of shape (num_rois, channels, - pooled_shape[0], pooled_shape[1]). - - Notes - ===== - Signature: ``ai.onnx@1::MaxRoiPool``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _MaxRoiPool( - _MaxRoiPool.Attributes( - pooled_shape=AttrInt64s(pooled_shape, name="pooled_shape"), - spatial_scale=AttrFloat32(spatial_scale, name="spatial_scale"), - ), - _MaxRoiPool.Inputs( - X=unwrap_vars(X), - rois=unwrap_vars(rois), - ), - ) - .get_output_vars( - X=get_value(X), - rois=get_value(rois), - ) - .Y - ) - - -def max_unpool( - X: Var, - I: Var, - output_shape: Optional[Var] = None, - *, - kernel_shape: Iterable[int], - pads: Optional[Iterable[int]] = None, - strides: Optional[Iterable[int]] = None, -) -> Var: - r""" - MaxUnpool essentially computes the partial inverse of the MaxPool op. - The input information to this op is typically the output information - from a MaxPool op. The first input tensor X is the tensor that needs to - be unpooled, which is typically the pooled tensor (first output) from - MaxPool. The second input tensor, I, contains the indices to the - (locally maximal) elements corresponding to the elements in the first - input tensor X. Input tensor I is typically the second output of the - MaxPool op. The third (optional) input is a tensor that specifies the - output size of the unpooling operation. - - MaxUnpool is intended to do 'partial' inverse of the MaxPool op. - 'Partial' because all the non-maximal values from the original input to - MaxPool are set to zero in the output of the MaxUnpool op. Pooling the - result of an unpooling operation should give back the original input to - the unpooling op. - - MaxUnpool can produce the same output size for several input sizes, - which makes unpooling op ambiguous. The third input argument, - output_size, is meant to disambiguate the op and produce output tensor - of known/predictable size. - - In addition to the inputs, MaxUnpool takes three attributes, namely - kernel_shape, strides, and pads, which define the exact unpooling op. - The attributes typically have the same values as the corresponding - pooling op that the unpooling op is trying to invert. - - Parameters - ========== - X - Type T1. - Input data tensor that has to be unpooled. This tensor is typically the - first output of the MaxPool op.Dimensions for image case are (N x C x H - x W), where N is the batch size, C is the number of channels, and H and - W are the height and the width of the data. For non-image case, the - dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the - batch size. Optionally, if dimension denotation is in effect, the - operation expects the input data tensor to arrive with the dimension - denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE - ...]. - I - Type T2. - Input data tensor containing the indices corresponding to elements in - the first input tensor X.This tensor is typically the second output of - the MaxPool op.Dimensions must be the same as input tensor X. The - indices are linear, i.e. computed considering the tensor as flattened - 1-D tensor, assuming row-major storage. Also, the linear indices should - not consider padding. So the values in indices are in the range [0, N x - C x D1 x ... x Dn). - output_shape - Type T2. - The shape of the output can be explicitly set which will cause pads - values to be auto generated. If 'output_shape' is specified, 'pads' - values are ignored. - kernel_shape - Attribute. - The size of the kernel along each axis. - pads - Attribute. - Padding for the beginning and ending along each spatial axis, it can - take any value greater than or equal to 0. The value represent the - number of pixels added to the beginning and end part of the - corresponding axis. ``pads`` format should be as follow [x1_begin, - x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels - added at the beginning of axis ``i`` and xi_end, the number of pixels - added at the end of axis ``i``. This attribute cannot be used - simultaneously with auto_pad attribute. If not present, the padding - defaults to 0 along start and end of each spatial axis. - strides - Attribute. - Stride along each spatial axis. If not present, the stride defaults to 1 - along each spatial axis. - - Returns - ======= - output : Var - Type T1. - Output data tensor that contains the result of the unpooling. - - Notes - ===== - Signature: ``ai.onnx@11::MaxUnpool``. - - Type constraints: - - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` - - T2: `tensor(int64)` - """ - return ( - _MaxUnpool( - _MaxUnpool.Attributes( - kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _MaxUnpool.Inputs( - X=unwrap_vars(X), - I=unwrap_vars(I), - output_shape=unwrap_vars(output_shape), - ), - ) - .get_output_vars( - X=get_value(X), - I=get_value(I), - output_shape=get_value(output_shape), - ) - .output - ) - - -def mean( - data_0: Sequence[Var], -) -> Var: - r""" - Element-wise mean of each of the input tensors (with Numpy-style - broadcasting support). All inputs and outputs must have the same data - type. This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - Parameters - ========== - data_0 - Type T. - List of tensors for mean. - - Returns - ======= - mean : Var - Type T. - Output tensor. - - Notes - ===== - Signature: ``ai.onnx@13::Mean``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Mean( - _Mean.Attributes(), - _Mean.Inputs( - data_0=unwrap_vars(data_0), - ), - ) - .get_output_vars( - data_0=get_value(data_0), - ) - .mean - ) - - -def mean_variance_normalization( - X: Var, - *, - axes: Iterable[int] = (0, 2, 3), -) -> Var: - r""" - A MeanVarianceNormalization Function: Perform mean variance - normalization on the input tensor X using formula: - ``(X-EX)/sqrt(E(X-EX)^2)`` - - Parameters - ========== - X - Type T. - Input tensor - axes - Attribute. - A list of integers, along which to reduce. The default is to calculate - along axes [0,2,3] for calculating mean and variance along each channel. - Two variables with the same C-coordinate are associated with the same - mean and variance. - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@13::MeanVarianceNormalization``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _MeanVarianceNormalization( - _MeanVarianceNormalization.Attributes( - axes=AttrInt64s(axes, name="axes"), - ), - _MeanVarianceNormalization.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def mel_weight_matrix( - num_mel_bins: Var, - dft_length: Var, - sample_rate: Var, - lower_edge_hertz: Var, - upper_edge_hertz: Var, - *, - output_datatype: int = 1, -) -> Var: - r""" - Generate a MelWeightMatrix that can be used to re-weight a Tensor - containing a linearly sampled frequency spectra (from DFT or STFT) into - num_mel_bins frequency information based on the [lower_edge_hertz, - upper_edge_hertz] range on the mel scale. This function defines the mel - scale in terms of a frequency in hertz according to the following - formula: - - :: - - mel(f) = 2595 * log10(1 + f/700) - - In the returned matrix, all the triangles (filterbanks) have a peak - value of 1.0. - - The returned MelWeightMatrix can be used to right-multiply a spectrogram - S of shape [frames, num_spectrogram_bins] of linear scale spectrum - values (e.g. STFT magnitudes) to generate a "mel spectrogram" M of shape - [frames, num_mel_bins]. - - Parameters - ========== - num_mel_bins - Type T1. - The number of bands in the mel spectrum. - dft_length - Type T1. - The size of the original DFT. The size of the original DFT is used to - infer the size of the onesided DFT, which is understood to be - floor(dft_length/2) + 1, i.e. the spectrogram only contains the - nonredundant DFT bins. - sample_rate - Type T1. - Samples per second of the input signal used to create the spectrogram. - Used to figure out the frequencies corresponding to each spectrogram - bin, which dictates how they are mapped into the mel scale. - lower_edge_hertz - Type T2. - Lower bound on the frequencies to be included in the mel spectrum. This - corresponds to the lower edge of the lowest triangular band. - upper_edge_hertz - Type T2. - The desired top edge of the highest frequency band. - output_datatype - Attribute. - The data type of the output tensor. Strictly must be one of the values - from DataType enum in TensorProto whose values correspond to T3. The - default value is 1 = FLOAT. - - Returns - ======= - output : Var - Type T3. - The Mel Weight Matrix. The output has the shape: [floor(dft_length/2) + - 1][num_mel_bins]. - - Notes - ===== - Signature: ``ai.onnx@17::MelWeightMatrix``. - - Type constraints: - - T1: `tensor(int32)`, `tensor(int64)` - - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - - T3: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _MelWeightMatrix( - _MelWeightMatrix.Attributes( - output_datatype=AttrInt64(output_datatype, name="output_datatype"), - ), - _MelWeightMatrix.Inputs( - num_mel_bins=unwrap_vars(num_mel_bins), - dft_length=unwrap_vars(dft_length), - sample_rate=unwrap_vars(sample_rate), - lower_edge_hertz=unwrap_vars(lower_edge_hertz), - upper_edge_hertz=unwrap_vars(upper_edge_hertz), - ), - ) - .get_output_vars( - num_mel_bins=get_value(num_mel_bins), - dft_length=get_value(dft_length), - sample_rate=get_value(sample_rate), - lower_edge_hertz=get_value(lower_edge_hertz), - upper_edge_hertz=get_value(upper_edge_hertz), - ) - .output - ) - - -def min( - data_0: Sequence[Var], -) -> Var: - r""" - Element-wise min of each of the input tensors (with Numpy-style - broadcasting support). All inputs and outputs must have the same data - type. This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - Parameters - ========== - data_0 - Type T. - List of tensors for min. - - Returns - ======= - min : Var - Type T. - Output tensor. - - Notes - ===== - Signature: ``ai.onnx@13::Min``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Min( - _Min.Attributes(), - _Min.Inputs( - data_0=unwrap_vars(data_0), - ), - ) - .get_output_vars( - data_0=get_value(data_0), - ) - .min - ) - - -def mod( - A: Var, - B: Var, - *, - fmod: int = 0, -) -> Var: - r""" - Performs element-wise binary modulus (with Numpy-style broadcasting - support). The sign of the remainder is the same as that of the Divisor. - - Mod operator can also behave like C fmod() or numpy.fmod. In this case, - the sign of the remainder however, will be the same as the Dividend (in - contrast to integer mod). To force a behavior like numpy.fmod() an - 'fmod' Attribute is provided. This attribute is set to 0 by default - causing the behavior to be like integer mod. Setting this attribute to 1 - causes the remainder to be calculated similar to that of numpy.fmod(). - - If the input type is floating point, then ``fmod`` attribute must be set - to 1. - - In case of dividend being zero, the results will be platform dependent. - - This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - Parameters - ========== - A - Type T. - Dividend tensor - B - Type T. - Divisor tensor - fmod - Attribute. - Whether the operator should behave like fmod (default=0 meaning it will - do integer mods); Set this to 1 to force fmod treatment - - Returns - ======= - C : Var - Type T. - Remainder tensor - - Notes - ===== - Signature: ``ai.onnx@13::Mod``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Mod( - _Mod.Attributes( - fmod=AttrInt64(fmod, name="fmod"), - ), - _Mod.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - ) - .C - ) - - -def mul( - A: Var, - B: Var, -) -> Var: - r""" - Performs element-wise binary multiplication (with Numpy-style - broadcasting support). - - This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - (Opset 14 change): Extend supported types to include uint8, int8, - uint16, and int16. - - Parameters - ========== - A - Type T. - First operand. - B - Type T. - Second operand. - - Returns - ======= - C : Var - Type T. - Result, has same element type as two inputs - - Notes - ===== - Signature: ``ai.onnx@14::Mul``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Mul( - _Mul.Attributes(), - _Mul.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - ) - .C - ) - - -def multinomial( - input: Var, - *, - dtype: npt.DTypeLike = np.int32, - sample_size: int = 1, - seed: Optional[float] = None, -) -> Var: - r""" - Generate a tensor of samples from a multinomial distribution according - to the probabilities of each of the possible outcomes. - - Parameters - ========== - input - Type T1. - Input tensor with shape [batch_size, class_size], where class_size is - the number of all possible outcomes. Each value along the axis zero - represents the unnormalized log-probability of each corresponding - outcome in a batch. - dtype - Attribute. - (Optional) The data type for the elements of the output tensor, if not - specified, we will use int32. - sample_size - Attribute. - Number of times to sample. - seed - Attribute. - (Optional) Seed to the random generator, if not specified we will auto - generate one. - - Returns - ======= - output : Var - Type T2. - Output tensor with shape [batch_size, sample_size], where sample_size is - the number of times to sample. Each value along the axis zero represents - the outcome of the corresponding sample in a batch. - - Notes - ===== - Signature: ``ai.onnx@7::Multinomial``. - - Type constraints: - - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` - - T2: `tensor(int32)`, `tensor(int64)` - """ - return ( - _Multinomial( - _Multinomial.Attributes( - dtype=AttrDtype(dtype, name="dtype"), - sample_size=AttrInt64(sample_size, name="sample_size"), - seed=AttrFloat32.maybe(seed, name="seed"), - ), - _Multinomial.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def neg( - X: Var, -) -> Var: - r""" - Neg takes one input data (Tensor) and produces one output data - (Tensor) where each element flipped sign, y = -x, is applied to the - tensor elementwise. - - Parameters - ========== - X - Type T. - Input tensor - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@13::Neg``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)` - """ - return ( - _Neg( - _Neg.Attributes(), - _Neg.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def negative_log_likelihood_loss( - input: Var, - target: Var, - weight: Optional[Var] = None, - *, - ignore_index: Optional[int] = None, - reduction: str = "mean", -) -> Var: - r""" - A NegativeLogLikelihoodLoss operator computes (weighted) negative log - likelihood loss. Its "input" tensor has the shape of (N, C, d1, d2, ..., - dk) where k >= 0. The "input" tensor contains log-probabilities for - input[n, :, d_1, d_2,..., d_k] being in a class of [0, C). The - operator's "target" input tensor has the shape of (N, d1, d2, ..., dk). - It encodes class labels (one of C classes) or it may contain a special - value (indicated by an attribute ignore_index) for N x d1 x d2 x ... x - dk samples. The loss value for input[n, :, d_1, d_2,...d_k] being - classified as class c = target[n][d_1][d_2]...[d_k] is computed as: - - :: - - loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k]. - - When an optional "weight" is provided, the sample loss is calculated as: - - :: - - loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c]. - - loss is zero for the case when target-value equals ignore_index. - - :: - - loss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index - - If "reduction" attribute is set to "none", the operator's output will be - the above loss with shape (N, d1, d2, ..., dk). If "reduction" attribute - is set to "mean" (the default attribute value), the output loss is - (weight) averaged: - - :: - - mean(loss), if "weight" is not provided, - - or if weight is provided, - - :: - - sum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples. - - If "reduction" attribute is set to "sum", the output is a scalar: - ``sum(loss)``. - - See also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss. - - Example 1: - - :: - - // negative log likelihood loss, "none" reduction - N, C, d1 = 2, 3, 2 - input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], - [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] - target = [[2, 1], [0, 2]] - - loss = np.zeros((N, d1)) - for n in range(N): - for d_1 in range(d1): - c = target[n][d_1] - loss[n][d_1] = -input[n][c][d_1] - - // print(loss) - // [[-3. -2.] - // [-0. -2.]] - - Example 2: - - :: - - // weighted negative log likelihood loss, sum reduction - N, C, d1 = 2, 3, 2 - input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], - [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] - target = [[2, 1], [0, 2]] - weight = [0.2, 0.3, 0.1] - loss = np.zeros((N, d1)) - for n in range(N): - for d_1 in range(d1): - c = target[n][d_1] - loss[n][d_1] = -input[n][c][d_1] * weight[c] - - loss = np.sum(loss) - // print(loss) - // -1.1 - - Example 3: - - :: - - // weighted negative log likelihood loss, mean reduction - N, C, d1 = 2, 3, 2 - input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]], - [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]] - target = [[2, 1], [0, 2]] - weight = [0.2, 0.3, 0.1] - loss = np.zeros((N, d1)) - weight_total = 0 - for n in range(N): - for d_1 in range(d1): - c = target[n][d_1] - loss[n][d_1] = -input[n][c][d_1] * weight[c] - weight_total = weight_total + weight[c] - - loss = np.sum(loss) / weight_total - // print(loss) - // -1.57 - - Parameters - ========== - input - Type T. - Input tensor of shape (N, C) or (N, C, d1, d2, ..., dk). - target - Type Tind. - Target tensor of shape (N) or (N, d1, d2, ..., dk). Target element value - shall be in range of [0, C). If ignore_index is specified, it may have a - value outside [0, C) and the target values should either be in the range - [0, C) or have the value ignore_index. - weight - Type T. - Optional rescaling weight tensor. If given, it has to be a tensor of - size C. Otherwise, it is treated as if having all ones. - ignore_index - Attribute. - Specifies a target value that is ignored and does not contribute to the - input gradient. It's an optional value. - reduction - Attribute. - Type of reduction to apply to loss: none, sum, mean (default). 'none': - the output is the loss for each sample. 'sum': the output will be - summed. 'mean': the sum of the output will be divided by the sum of - applied weights. - - Returns - ======= - loss : Var - Type T. - The negative log likelihood loss - - Notes - ===== - Signature: ``ai.onnx@13::NegativeLogLikelihoodLoss``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - - Tind: `tensor(int32)`, `tensor(int64)` - """ - return ( - _NegativeLogLikelihoodLoss( - _NegativeLogLikelihoodLoss.Attributes( - ignore_index=AttrInt64.maybe(ignore_index, name="ignore_index"), - reduction=AttrString(reduction, name="reduction"), - ), - _NegativeLogLikelihoodLoss.Inputs( - input=unwrap_vars(input), - target=unwrap_vars(target), - weight=unwrap_vars(weight), - ), - ) - .get_output_vars( - input=get_value(input), - target=get_value(target), - weight=get_value(weight), - ) - .loss - ) - - -def non_max_suppression( - boxes: Var, - scores: Var, - max_output_boxes_per_class: Optional[Var] = None, - iou_threshold: Optional[Var] = None, - score_threshold: Optional[Var] = None, - *, - center_point_box: int = 0, -) -> Var: - r""" - Filter out boxes that have high intersection-over-union (IOU) overlap - with previously selected boxes. Bounding boxes with score less than - score_threshold are removed. Bounding box format is indicated by - attribute center_point_box. Note that this algorithm is agnostic to - where the origin is in the coordinate system and more generally is - invariant to orthogonal transformations and translations of the - coordinate system; thus translating or reflections of the coordinate - system result in the same boxes being selected by the algorithm. The - selected_indices output is a set of integers indexing into the input - collection of bounding boxes representing the selected boxes. The - bounding box coordinates corresponding to the selected indices can then - be obtained using the Gather or GatherND operation. - - Parameters - ========== - boxes - Type tensor(float). - An input tensor with shape [num_batches, spatial_dimension, 4]. The - single box data format is indicated by center_point_box. - scores - Type tensor(float). - An input tensor with shape [num_batches, num_classes, spatial_dimension] - max_output_boxes_per_class - Type tensor(int64). - Integer representing the maximum number of boxes to be selected per - batch per class. It is a scalar. Default to 0, which means no output. - iou_threshold - Type tensor(float). - Float representing the threshold for deciding whether boxes overlap too - much with respect to IOU. It is scalar. Value range [0, 1]. Default to - 0. - score_threshold - Type tensor(float). - Float representing the threshold for deciding when to remove boxes based - on score. It is a scalar. - center_point_box - Attribute. - Integer indicate the format of the box data. The default is 0. 0 - the - box data is supplied as [y1, x1, y2, x2] where (y1, x1) and (y2, x2) are - the coordinates of any diagonal pair of box corners and the coordinates - can be provided as normalized (i.e., lying in the interval [0, 1]) or - absolute. Mostly used for TF models. 1 - the box data is supplied as - [x_center, y_center, width, height]. Mostly used for Pytorch models. - - Returns - ======= - selected_indices : Var - Type tensor(int64). - selected indices from the boxes tensor. [num_selected_indices, 3], the - selected index format is [batch_index, class_index, box_index]. - - Notes - ===== - Signature: ``ai.onnx@11::NonMaxSuppression``. - - """ - return ( - _NonMaxSuppression( - _NonMaxSuppression.Attributes( - center_point_box=AttrInt64(center_point_box, name="center_point_box"), - ), - _NonMaxSuppression.Inputs( - boxes=unwrap_vars(boxes), - scores=unwrap_vars(scores), - max_output_boxes_per_class=unwrap_vars(max_output_boxes_per_class), - iou_threshold=unwrap_vars(iou_threshold), - score_threshold=unwrap_vars(score_threshold), - ), - ) - .get_output_vars( - boxes=get_value(boxes), - scores=get_value(scores), - max_output_boxes_per_class=get_value(max_output_boxes_per_class), - iou_threshold=get_value(iou_threshold), - score_threshold=get_value(score_threshold), - ) - .selected_indices - ) - - -def non_zero( - X: Var, -) -> Var: - r""" - Returns the indices of the elements that are non-zero (in row-major - order - by dimension). NonZero behaves similar to numpy.nonzero: - https://docs.scipy.org/doc/numpy/reference/generated/numpy.nonzero.html, - but for scalar input, NonZero produces output shape (0, N) instead of - (1, N), which is different from Numpy's behavior. - - Parameters - ========== - X - Type T. - input - - Returns - ======= - Y : Var - Type tensor(int64). - output - - Notes - ===== - Signature: ``ai.onnx@13::NonZero``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _NonZero( - _NonZero.Attributes(), - _NonZero.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def not_( - X: Var, -) -> Var: - r""" - Returns the negation of the input tensor element-wise. - - Parameters - ========== - X - Type T. - Input tensor - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@1::Not``. - - Type constraints: - - T: `tensor(bool)` - """ - return ( - _Not( - _Not.Attributes(), - _Not.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def one_hot( - indices: Var, - depth: Var, - values: Var, - *, - axis: int = -1, -) -> Var: - r""" - Produces a one-hot tensor based on inputs. The locations represented by - the index values in the 'indices' input tensor will have 'on_value' and - the other locations will have 'off_value' in the output tensor, where - 'on_value' and 'off_value' are specified as part of required input - argument 'values', which is a two-element tensor of format [off_value, - on_value]. The rank of the output tensor will be one greater than the - rank of the input tensor. The additional dimension is for one-hot - representation. The additional dimension will be inserted at the - position specified by 'axis'. If 'axis' is not specified then then - additional dimension will be inserted as the innermost dimension, i.e. - axis=-1. The size of the additional dimension is specified by required - scalar input 'depth'. The type of the output tensor is the same as the - type of the 'values' input. Any entries in the 'indices' input tensor - with values outside the range [-depth, depth-1] will result in one-hot - representation with all 'off_value' values in the output tensor. - - :: - - when axis = 0: - output[input[i, j, k], i, j, k] = 1 for all i, j, k and 0 otherwise. - - when axis = -1: - output[i, j, k, input[i, j, k]] = 1 for all i, j, k and 0 otherwise. - - Parameters - ========== - indices - Type T1. - Input tensor containing indices. Any entries in the 'indices' input - tensor with values outside the range [-depth, depth-1] will result in - one-hot representation with all 'off_value' values in the output - tensor.In case 'indices' is of non-integer type, the values will be - casted to int64 before use. - depth - Type T2. - Scalar or Rank 1 tensor containing exactly one element, specifying the - number of classes in one-hot tensor. This is also the size of the - one-hot dimension (specified by 'axis' attribute) added on in the output - tensor. The values in the 'indices' input tensor are expected to be in - the range [-depth, depth-1]. In case 'depth' is of non-integer type, it - will be casted to int64 before use. - values - Type T3. - Rank 1 tensor containing exactly two elements, in the format [off_value, - on_value], where 'on_value' is the value used for filling locations - specified in 'indices' input tensor, and 'off_value' is the value used - for filling locations other than those specified in 'indices' input - tensor. - axis - Attribute. - (Optional) Axis along which one-hot representation in added. Default: - axis=-1. axis=-1 means that the additional dimension will be inserted as - the innermost/last dimension in the output tensor. Negative value means - counting dimensions from the back. Accepted range is [-r-1, r] where r = - rank(indices). - - Returns - ======= - output : Var - Type T3. - Tensor of rank one greater than input tensor 'indices', i.e. - rank(output) = rank(indices) + 1. The data type for the elements of the - output tensor is the same as the type of input 'values' is used. - - Notes - ===== - Signature: ``ai.onnx@11::OneHot``. - - Type constraints: - - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T3: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _OneHot( - _OneHot.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _OneHot.Inputs( - indices=unwrap_vars(indices), - depth=unwrap_vars(depth), - values=unwrap_vars(values), - ), - ) - .get_output_vars( - indices=get_value(indices), - depth=get_value(depth), - values=get_value(values), - ) - .output - ) - - -def optional( - input: Optional[Var] = None, - *, - type: Optional[Type] = None, -) -> Var: - r""" - Constructs an optional-type value containing either an empty optional of - a certain type specified by the attribute, or a non-empty value - containing the input element. - - Parameters - ========== - input - Type V. - The input element. - type - Attribute. - Type of the element in the optional output - - Returns - ======= - output : Var - Type O. - The optional output enclosing the input element. - - Notes - ===== - Signature: ``ai.onnx@15::Optional``. - - Type constraints: - - V: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))` - """ - return ( - _Optional( - _Optional.Attributes( - type=AttrType.maybe(type, name="type"), - ), - _Optional.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def optional_get_element( - input: Var, -) -> Var: - r""" - Outputs the element in the optional-type input. It is an error if the - input value does not have an element and the behavior is undefined in - this case. - - Parameters - ========== - input - Type O. - The optional input. - - Returns - ======= - output : Var - Type V. - Output element in the optional input. - - Notes - ===== - Signature: ``ai.onnx@15::OptionalGetElement``. - - Type constraints: - - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))` - - V: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _OptionalGetElement( - _OptionalGetElement.Attributes(), - _OptionalGetElement.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def optional_has_element( - input: Var, -) -> Var: - r""" - Returns true if the optional-type input contains an element. If it is an - empty optional-type, this op returns false. - - Parameters - ========== - input - Type O. - The optional input. - - Returns - ======= - output : Var - Type B. - A scalar boolean tensor. If true, it indicates that optional-type input - contains an element. Otherwise, it is empty. - - Notes - ===== - Signature: ``ai.onnx@15::OptionalHasElement``. - - Type constraints: - - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))` - - B: `tensor(bool)` - """ - return ( - _OptionalHasElement( - _OptionalHasElement.Attributes(), - _OptionalHasElement.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def or_( - A: Var, - B: Var, -) -> Var: - r""" - Returns the tensor resulted from performing the ``or`` logical operation - elementwise on the input tensors ``A`` and ``B`` (with Numpy-style - broadcasting support). - - This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - Parameters - ========== - A - Type T. - First input operand for the logical operator. - B - Type T. - Second input operand for the logical operator. - - Returns - ======= - C : Var - Type T1. - Result tensor. - - Notes - ===== - Signature: ``ai.onnx@7::Or``. - - Type constraints: - - T: `tensor(bool)` - - T1: `tensor(bool)` - """ - return ( - _Or( - _Or.Attributes(), - _Or.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - ) - .C - ) - - -def prelu( - X: Var, - slope: Var, -) -> Var: - r""" - PRelu takes input data (Tensor) and slope tensor as input, and - produces one output data (Tensor) where the function - ``f(x) = slope * x for x < 0``, ``f(x) = x for x >= 0``., is applied to - the data tensor elementwise. This operator supports **unidirectional - broadcasting** (tensor slope should be unidirectional broadcastable to - input tensor X); for more details please check `the - doc `__. - - Parameters - ========== - X - Type T. - Input tensor - slope - Type T. - Slope tensor. The shape of slope can be smaller than first input X; if - so, its shape must be unidirectional broadcastable to X - - Returns - ======= - Y : Var - Type T. - Output tensor (same size as X) - - Notes - ===== - Signature: ``ai.onnx@16::PRelu``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` - """ - return ( - _PRelu( - _PRelu.Attributes(), - _PRelu.Inputs( - X=unwrap_vars(X), - slope=unwrap_vars(slope), - ), - ) - .get_output_vars( - X=get_value(X), - slope=get_value(slope), - ) - .Y - ) - - -def pad( - data: Var, - pads: Var, - constant_value: Optional[Var] = None, - *, - mode: str = "constant", -) -> Var: - r""" - Given a tensor containing the data to be padded (``data``), a tensor - containing the number of start and end pad values for axis (``pads``), - (optionally) a ``mode``, and (optionally) ``constant_value``, a padded - tensor (``output``) is generated. - - The three supported ``modes`` are (similar to corresponding modes - supported by ``numpy.pad``): - - 1) ``constant``\ (default) - pads with a given constant value as - specified by ``constant_value`` (which defaults to 0, empty string, - or False) - - 2) ``reflect`` - pads with the reflection of the vector mirrored on the - first and last values of the vector along each axis - - 3) ``edge`` - pads with the edge values of array - - Example 1 (``constant`` mode): Insert 0 pads to the beginning of the - second dimension. - - data = [ [1.0, 1.2], [2.3, 3.4], [4.5, 5.7], ] - - pads = [0, 2, 0, 0] - - mode = 'constant' - - constant_value = 0.0 - - output = [ [0.0, 0.0, 1.0, 1.2], [0.0, 0.0, 2.3, 3.4], [0.0, 0.0, 4.5, - 5.7], ] - - Example 2 (``reflect`` mode): data = [ [1.0, 1.2], [2.3, 3.4], [4.5, - 5.7], ] - - pads = [0, 2, 0, 0] - - mode = 'reflect' - - output = [ [1.0, 1.2, 1.0, 1.2], [2.3, 3.4, 2.3, 3.4], [4.5, 5.7, 4.5, - 5.7], ] - - Example 3 (``edge`` mode): data = [ [1.0, 1.2], [2.3, 3.4], [4.5, 5.7], - ] - - pads = [0, 2, 0, 0] - - mode = 'edge' - - output = [ [1.0, 1.0, 1.0, 1.2], [2.3, 2.3, 2.3, 3.4], [4.5, 4.5, 4.5, - 5.7], ] - - Parameters - ========== - data - Type T. - Input tensor. - pads - Type tensor(int64). - Tensor of integers indicating the number of padding elements to add or - remove (if negative) at the beginning and end of each axis. For 2D input - tensor, it is the number of pixels. ``pads`` should be a 1D tensor of - shape [2 \* input_rank]. ``pads`` format should be: [x1_begin, - x2_begin,...,x1_end, x2_end,...], where xi_begin is the number of pad - values added at the beginning of axis ``i`` and xi_end, the number of - pad values added at the end of axis ``i``. - constant_value - Type T. - (Optional) A scalar value to be used if the mode chosen is ``constant`` - (by default it is 0, empty string or False). - mode - Attribute. - Supported modes: ``constant``\ (default), ``reflect``, ``edge`` - - Returns - ======= - output : Var - Type T. - Tensor after padding. - - Notes - ===== - Signature: ``ai.onnx@13::Pad``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Pad( - _Pad.Attributes( - mode=AttrString(mode, name="mode"), - ), - _Pad.Inputs( - data=unwrap_vars(data), - pads=unwrap_vars(pads), - constant_value=unwrap_vars(constant_value), - ), - ) - .get_output_vars( - data=get_value(data), - pads=get_value(pads), - constant_value=get_value(constant_value), - ) - .output - ) - - -def pow( - X: Var, - Y: Var, -) -> Var: - r""" - Pow takes input data (Tensor) and exponent Tensor, and produces one - output data (Tensor) where the function ``f(x) = x^exponent``, is - applied to the data tensor elementwise. This operator supports - **multidirectional (i.e., Numpy-style) broadcasting**; for more details - please check `the - doc `__. - - Parameters - ========== - X - Type T. - First operand, base of the exponent. - Y - Type T1. - Second operand, power of the exponent. - - Returns - ======= - Z : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@15::Pow``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)` - - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Pow( - _Pow.Attributes(), - _Pow.Inputs( - X=unwrap_vars(X), - Y=unwrap_vars(Y), - ), - ) - .get_output_vars( - X=get_value(X), - Y=get_value(Y), - ) - .Z - ) - - -def qlinear_conv( - x: Var, - x_scale: Var, - x_zero_point: Var, - w: Var, - w_scale: Var, - w_zero_point: Var, - y_scale: Var, - y_zero_point: Var, - B: Optional[Var] = None, - *, - auto_pad: str = "NOTSET", - dilations: Optional[Iterable[int]] = None, - group: int = 1, - kernel_shape: Optional[Iterable[int]] = None, - pads: Optional[Iterable[int]] = None, - strides: Optional[Iterable[int]] = None, -) -> Var: - r""" - The convolution operator consumes a quantized input tensor, its scale - and zero point, a quantized filter, its scale and zero point, and - output's scale and zero point, and computes the quantized output. Each - scale and zero-point pair must have same shape. It means they must be - either scalars (per tensor) or 1-D tensors (per output channel). Each - input or output and its related zero point must have same type. When - bias is present it must be quantized using scale = input scale \* weight - scale and zero point as 0. - - Parameters - ========== - x - Type T1. - Input data tensor from previous layer; has size (N x C x H x W), where N - is the batch size, C is the number of channels, and H and W are the - height and width. Note that this is for the 2D image. Otherwise the size - is (N x C x D1 x D2 ... x Dn). Optionally, if dimension denotation is in - effect, the operation expects input data tensor to arrive with the - dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, - DATA_FEATURE ...]. - x_scale - Type tensor(float). - Scale tensor for input 'x'. It's a scalar, which means a - per-tensor/layer quantization. - x_zero_point - Type T1. - Zero point tensor for input 'x'. It's a scalar, which means a - per-tensor/layer quantization. - w - Type T2. - The weight tensor that will be used in the convolutions; has size (M x - C/group x kH x kW), where C is the number of channels, and kH and kW are - the height and width of the kernel, and M is the number of feature maps. - For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x - k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. - Optionally, if dimension denotation is in effect, the operation expects - the weight tensor to arrive with the dimension denotation of - [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL - ...]. X.shape[1] == (W.shape[1] \* group) == C (assuming zero based - indices for the shape array). Or in other words FILTER_IN_CHANNEL should - be equal to DATA_CHANNEL. - w_scale - Type tensor(float). - Scale tensor for input 'w'. It could be a scalar or a 1-D tensor, which - means a per-tensor/layer or per output channel quantization. If it's a - 1-D tensor, its number of elements should be equal to the number of - output channels (M). - w_zero_point - Type T2. - Zero point tensor for input 'w'. It could be a scalar or a 1-D tensor, - which means a per-tensor/layer or per output channel quantization. If - it's a 1-D tensor, its number of elements should be equal to the number - of output channels (M). - y_scale - Type tensor(float). - Scale tensor for output 'y'. It's a scalar, which means a - per-tensor/layer quantization. - y_zero_point - Type T3. - Zero point tensor for output 'y'. It's a scalar, which means a - per-tensor/layer quantization. - B - Type T4. - Optional 1D bias to be added to the convolution, has size of M. Bias - must be quantized using scale = x_scale \* w_scale and zero_point = 0 - auto_pad - Attribute. - auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where - default value is NOTSET, which means explicit padding is used. - SAME_UPPER or SAME_LOWER mean pad the input so that - ``output_shape[i] = ceil(input_shape[i] / strides[i])`` for each axis - ``i``. The padding is split between the two sides equally or almost - equally (depending on whether it is even or odd). In case the padding is - an odd number, the extra padding is added at the end for SAME_UPPER and - at the beginning for SAME_LOWER. - dilations - Attribute. - dilation value along each spatial axis of the filter. If not present, - the dilation defaults to 1 along each spatial axis. - group - Attribute. - number of groups input channels and output channels are divided into. - default is 1. - kernel_shape - Attribute. - The shape of the convolution kernel. If not present, should be inferred - from input 'w'. - pads - Attribute. - Padding for the beginning and ending along each spatial axis, it can - take any value greater than or equal to 0.The value represent the number - of pixels added to the beginning and end part of the corresponding - axis.\ ``pads`` format should be as follow [x1_begin, x2_begin...x1_end, - x2_end,...], where xi_begin the number ofpixels added at the beginning - of axis ``i`` and xi_end, the number of pixels added at the end of axis - ``i``.This attribute cannot be used simultaneously with auto_pad - attribute. If not present, the padding defaultsto 0 along start and end - of each spatial axis. - strides - Attribute. - Stride along each spatial axis. If not present, the stride defaults to 1 - along each spatial axis. - - Returns - ======= - y : Var - Type T3. - Output data tensor that contains the result of the convolution. The - output dimensions are functions of the kernel size, stride size, and pad - lengths. - - Notes - ===== - Signature: ``ai.onnx@10::QLinearConv``. - - Type constraints: - - T1: `tensor(int8)`, `tensor(uint8)` - - T2: `tensor(int8)`, `tensor(uint8)` - - T3: `tensor(int8)`, `tensor(uint8)` - - T4: `tensor(int32)` - """ - return ( - _QLinearConv( - _QLinearConv.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - group=AttrInt64(group, name="group"), - kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _QLinearConv.Inputs( - x=unwrap_vars(x), - x_scale=unwrap_vars(x_scale), - x_zero_point=unwrap_vars(x_zero_point), - w=unwrap_vars(w), - w_scale=unwrap_vars(w_scale), - w_zero_point=unwrap_vars(w_zero_point), - y_scale=unwrap_vars(y_scale), - y_zero_point=unwrap_vars(y_zero_point), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - x=get_value(x), - x_scale=get_value(x_scale), - x_zero_point=get_value(x_zero_point), - w=get_value(w), - w_scale=get_value(w_scale), - w_zero_point=get_value(w_zero_point), - y_scale=get_value(y_scale), - y_zero_point=get_value(y_zero_point), - B=get_value(B), - ) - .y - ) - - -def qlinear_matmul( - a: Var, - a_scale: Var, - a_zero_point: Var, - b: Var, - b_scale: Var, - b_zero_point: Var, - y_scale: Var, - y_zero_point: Var, -) -> Var: - r""" - Matrix product that behaves like numpy.matmul: - https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. - It consumes two quantized input tensors, their scales and zero points, - scale and zero point of output, and computes the quantized output. The - quantization formula is y = saturate((x / y_scale) + y_zero_point). For - (x / y_scale), it is rounding to nearest ties to even. Refer to - https://en.wikipedia.org/wiki/Rounding for details. Scale and zero point - must have same shape. They must be either scalar (per tensor) or N-D - tensor (per row for 'a' and per column for 'b'). Scalar refers to per - tensor quantization whereas N-D refers to per row or per column - quantization. If the input is 2D of shape [M, K] then zero point and - scale tensor may be an M element vector [v_1, v_2, ..., v_M] for per row - quantization and K element vector of shape [v_1, v_2, ..., v_K] for per - column quantization. If the input is N-D tensor with shape [D1, D2, M, - K] then zero point and scale tensor may have shape [D1, D2, M, 1] for - per row quantization and shape [D1, D2, 1, K] for per column - quantization. Production must never overflow, and accumulation may - overflow if and only if in 32 bits. - - Parameters - ========== - a - Type T1. - N-dimensional quantized matrix a - a_scale - Type tensor(float). - scale of quantized input a - a_zero_point - Type T1. - zero point of quantized input a - b - Type T2. - N-dimensional quantized matrix b - b_scale - Type tensor(float). - scale of quantized input b - b_zero_point - Type T2. - zero point of quantized input b - y_scale - Type tensor(float). - scale of quantized output y - y_zero_point - Type T3. - zero point of quantized output y - - Returns - ======= - y : Var - Type T3. - Quantized matrix multiply results from a \* b - - Notes - ===== - Signature: ``ai.onnx@10::QLinearMatMul``. - - Type constraints: - - T1: `tensor(int8)`, `tensor(uint8)` - - T2: `tensor(int8)`, `tensor(uint8)` - - T3: `tensor(int8)`, `tensor(uint8)` - """ - return ( - _QLinearMatMul( - _QLinearMatMul.Attributes(), - _QLinearMatMul.Inputs( - a=unwrap_vars(a), - a_scale=unwrap_vars(a_scale), - a_zero_point=unwrap_vars(a_zero_point), - b=unwrap_vars(b), - b_scale=unwrap_vars(b_scale), - b_zero_point=unwrap_vars(b_zero_point), - y_scale=unwrap_vars(y_scale), - y_zero_point=unwrap_vars(y_zero_point), - ), - ) - .get_output_vars( - a=get_value(a), - a_scale=get_value(a_scale), - a_zero_point=get_value(a_zero_point), - b=get_value(b), - b_scale=get_value(b_scale), - b_zero_point=get_value(b_zero_point), - y_scale=get_value(y_scale), - y_zero_point=get_value(y_zero_point), - ) - .y - ) - - -def quantize_linear( - x: Var, - y_scale: Var, - y_zero_point: Optional[Var] = None, - *, - axis: int = 1, -) -> Var: - r""" - The linear quantization operator. It consumes a high precision tensor, a - scale, and a zero point to compute the low precision / quantized tensor. - The scale factor and zero point must have same shape, and can be either - a scalar for per-tensor / per layer quantization, or a 1-D tensor for - per-axis quantization. The quantization formula is y = saturate ((x / - y_scale) + y_zero_point). For saturation, it saturates to [0, 255] if - it's uint8, or [-128, 127] if it's int8. For (x / y_scale), it's - rounding to the nearest even. Refer to - https://en.wikipedia.org/wiki/Rounding for details. 'y_zero_point' and - 'y' must have same type. - - Parameters - ========== - x - Type T1. - N-D full precision Input tensor to be quantized. - y_scale - Type tensor(float). - Scale for doing quantization to get 'y'. It can be a scalar, which means - per-tensor/layer quantization, or a 1-D Tensor for per-axis - quantization. - y_zero_point - Type T2. - Zero point for doing quantization to get 'y'. Shape must match y_scale. - Default is uint8 with zero point of 0 if it's not specified. - axis - Attribute. - (Optional) The axis of the quantization dimension of the input tensor. - Ignored for per-tensor quantization. Negative value means counting - dimensions from the back. Accepted range is [-r, r-1] where r = - rank(input). - - Returns - ======= - y : Var - Type T2. - N-D quantized output tensor. It has same shape as input 'x'. - - Notes - ===== - Signature: ``ai.onnx@13::QuantizeLinear``. - - Type constraints: - - T1: `tensor(float)`, `tensor(int32)` - - T2: `tensor(int8)`, `tensor(uint8)` - """ - return ( - _QuantizeLinear( - _QuantizeLinear.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _QuantizeLinear.Inputs( - x=unwrap_vars(x), - y_scale=unwrap_vars(y_scale), - y_zero_point=unwrap_vars(y_zero_point), - ), - ) - .get_output_vars( - x=get_value(x), - y_scale=get_value(y_scale), - y_zero_point=get_value(y_zero_point), - ) - .y - ) - - -def rnn( - X: Var, - W: Var, - R: Var, - B: Optional[Var] = None, - sequence_lens: Optional[Var] = None, - initial_h: Optional[Var] = None, - *, - activation_alpha: Optional[Iterable[float]] = None, - activation_beta: Optional[Iterable[float]] = None, - activations: Iterable[str] = ("Tanh", "Tanh"), - clip: Optional[float] = None, - direction: str = "forward", - hidden_size: Optional[int] = None, - layout: int = 0, -) -> tuple[Var, Var]: - r""" - Computes an one-layer simple RNN. This operator is usually supported via - some custom implementation such as CuDNN. - - Notations: - - - ``X`` - input tensor - - ``i`` - input gate - - ``t`` - time step (t-1 means previous time step) - - ``Wi`` - W parameter weight matrix for input gate - - ``Ri`` - R recurrence weight matrix for input gate - - ``Wbi`` - W parameter bias vector for input gate - - ``Rbi`` - R parameter bias vector for input gate - - ``WBi`` - W parameter weight matrix for backward input gate - - ``RBi`` - R recurrence weight matrix for backward input gate - - ``WBbi`` - WR bias vectors for backward input gate - - ``RBbi`` - RR bias vectors for backward input gate - - ``H`` - Hidden state - - ``num_directions`` - 2 if direction == bidirectional else 1 - - Activation functions: - - - Relu(x) - max(0, x) - - Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x}) - - Sigmoid(x) - 1/(1 + e^{-x}) - - NOTE: Below are optional - - - Affine(x) - alpha*x + beta - - LeakyRelu(x) - x if x >= 0 else alpha \* x - - ThresholdedRelu(x) - x if x >= alpha else 0 - - ScaledTanh(x) - alpha\ *Tanh(beta*\ x) - - HardSigmoid(x) - min(max(alpha*x + beta, 0), 1) - - Elu(x) - x if x >= 0 else alpha*(e^x - 1) - - Softsign(x) - x/(1 + \|x\|) - - Softplus(x) - log(1 + e^x) - - Equations (Default: f=Tanh): - - - Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) This operator has - **optional** inputs/outputs. See `the - doc `__ for more - details about the representation of optional arguments. An empty - string may be used in the place of an actual argument's name to - indicate a missing argument. Trailing optional arguments (those not - followed by an argument that is present) may also be simply omitted. - - Parameters - ========== - X - Type T. - The input sequences packed (and potentially padded) into one 3-D tensor - with the shape of ``[seq_length, batch_size, input_size]``. - W - Type T. - The weight tensor for input gate. Concatenation of ``Wi`` and ``WBi`` - (if bidirectional). The tensor has shape - ``[num_directions, hidden_size, input_size]``. - R - Type T. - The recurrence weight tensor. Concatenation of ``Ri`` and ``RBi`` (if - bidirectional). The tensor has shape - ``[num_directions, hidden_size, hidden_size]``. - B - Type T. - The bias tensor for input gate. Concatenation of ``[Wbi, Rbi]`` and - ``[WBbi, RBbi]`` (if bidirectional). The tensor has shape - ``[num_directions, 2*hidden_size]``. Optional: If not specified - - assumed to be 0. - sequence_lens - Type T1. - Optional tensor specifying lengths of the sequences in a batch. If not - specified - assumed all sequences in the batch to have length - ``seq_length``. It has shape ``[batch_size]``. - initial_h - Type T. - Optional initial value of the hidden. If not specified - assumed to be - 0. It has shape ``[num_directions, batch_size, hidden_size]``. - activation_alpha - Attribute. - Optional scaling values used by some activation functions. The values - are consumed in the order of activation functions, for example (f, g, h) - in LSTM. Default values are the same as of corresponding ONNX - operators.For example with LeakyRelu, the default alpha is 0.01. - activation_beta - Attribute. - Optional scaling values used by some activation functions. The values - are consumed in the order of activation functions, for example (f, g, h) - in LSTM. Default values are the same as of corresponding ONNX operators. - activations - Attribute. - One (or two if bidirectional) activation function for input gate. The - activation function must be one of the activation functions specified - above. Optional: Default ``Tanh`` if not specified. - clip - Attribute. - Cell clip threshold. Clipping bounds the elements of a tensor in the - range of [-threshold, +threshold] and is applied to the input of - activations. No clip if not specified. - direction - Attribute. - Specify if the RNN is forward, reverse, or bidirectional. Must be one of - forward (default), reverse, or bidirectional. - hidden_size - Attribute. - Number of neurons in the hidden layer - layout - Attribute. - The shape format of inputs X, initial_h and outputs Y, Y_h. If 0, the - following shapes are expected: X.shape = [seq_length, batch_size, - input_size], Y.shape = [seq_length, num_directions, batch_size, - hidden_size], initial_h.shape = Y_h.shape = [num_directions, batch_size, - hidden_size]. If 1, the following shapes are expected: X.shape = - [batch_size, seq_length, input_size], Y.shape = [batch_size, seq_length, - num_directions, hidden_size], initial_h.shape = Y_h.shape = [batch_size, - num_directions, hidden_size]. - - Returns - ======= - Y : Var - Type T. - A tensor that concats all the intermediate output values of the hidden. - It has shape ``[seq_length, num_directions, batch_size, hidden_size]``. - Y_h : Var - Type T. - The last output value of the hidden. It has shape - ``[num_directions, batch_size, hidden_size]``. - - Notes - ===== - Signature: ``ai.onnx@14::RNN``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - - T1: `tensor(int32)` - """ - return ( - _RNN( - _RNN.Attributes( - activation_alpha=AttrFloat32s.maybe( - activation_alpha, name="activation_alpha" - ), - activation_beta=AttrFloat32s.maybe( - activation_beta, name="activation_beta" - ), - activations=AttrStrings(activations, name="activations"), - clip=AttrFloat32.maybe(clip, name="clip"), - direction=AttrString(direction, name="direction"), - hidden_size=AttrInt64.maybe(hidden_size, name="hidden_size"), - layout=AttrInt64(layout, name="layout"), - ), - _RNN.Inputs( - X=unwrap_vars(X), - W=unwrap_vars(W), - R=unwrap_vars(R), - B=unwrap_vars(B), - sequence_lens=unwrap_vars(sequence_lens), - initial_h=unwrap_vars(initial_h), - ), - ) - .get_output_vars( - X=get_value(X), - W=get_value(W), - R=get_value(R), - B=get_value(B), - sequence_lens=get_value(sequence_lens), - initial_h=get_value(initial_h), - ) - ._unpack_to_any() - ) - - -def random_normal( - *, - dtype: npt.DTypeLike = np.float32, - mean: float = 0.0, - scale: float = 1.0, - seed: Optional[float] = None, - shape: Iterable[int], -) -> Var: - r""" - Generate a tensor with random values drawn from a normal distribution. - The shape of the tensor is specified by the ``shape`` argument and the - parameter of the normal distribution specified by ``mean`` and - ``scale``. - - The data type is specified by the 'dtype' argument. The 'dtype' argument - must be one of the data types specified in the 'DataType' enum field in - the TensorProto message. - - Parameters - ========== - dtype - Attribute. - The data type for the elements of the output tensor. Default is - TensorProto::FLOAT. - mean - Attribute. - The mean of the normal distribution. - scale - Attribute. - The standard deviation of the normal distribution. - seed - Attribute. - (Optional) Seed to the random generator, if not specified we will auto - generate one. - shape - Attribute. - The shape of the output tensor. - - Returns - ======= - output : Var - Type T. - Output tensor of random values drawn from normal distribution - - Notes - ===== - Signature: ``ai.onnx@1::RandomNormal``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _RandomNormal( - _RandomNormal.Attributes( - dtype=AttrDtype(dtype, name="dtype"), - mean=AttrFloat32(mean, name="mean"), - scale=AttrFloat32(scale, name="scale"), - seed=AttrFloat32.maybe(seed, name="seed"), - shape=AttrInt64s(shape, name="shape"), - ), - _RandomNormal.Inputs(), - ) - .get_output_vars() - .output - ) - - -def random_normal_like( - input: Var, - *, - dtype: Optional[npt.DTypeLike] = None, - mean: float = 0.0, - scale: float = 1.0, - seed: Optional[float] = None, -) -> Var: - r""" - Generate a tensor with random values drawn from a normal distribution. - The shape of the output tensor is copied from the shape of the input - tensor, and the parameters of the normal distribution are specified by - ``mean`` and ``scale``. - - The data type is specified by the 'dtype' argument, or copied from the - input tensor if not provided. The 'dtype' argument must be one of the - data types specified in the 'DataType' enum field in the TensorProto - message, and be valid as an output type. - - Parameters - ========== - input - Type T1. - Input tensor to copy shape and optionally type information from. - dtype - Attribute. - (Optional) The data type for the elements of the output tensor, if not - specified, we will use the data type of the input tensor. - mean - Attribute. - The mean of the normal distribution. - scale - Attribute. - The standard deviation of the normal distribution. - seed - Attribute. - (Optional) Seed to the random generator, if not specified we will auto - generate one. - - Returns - ======= - output : Var - Type T2. - Output tensor of random values drawn from normal distribution - - Notes - ===== - Signature: ``ai.onnx@1::RandomNormalLike``. - - Type constraints: - - T1: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _RandomNormalLike( - _RandomNormalLike.Attributes( - dtype=AttrDtype.maybe(dtype, name="dtype"), - mean=AttrFloat32(mean, name="mean"), - scale=AttrFloat32(scale, name="scale"), - seed=AttrFloat32.maybe(seed, name="seed"), - ), - _RandomNormalLike.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def random_uniform( - *, - dtype: npt.DTypeLike = np.float32, - high: float = 1.0, - low: float = 0.0, - seed: Optional[float] = None, - shape: Iterable[int], -) -> Var: - r""" - Generate a tensor with random values drawn from a uniform distribution. - The shape of the tensor is specified by the ``shape`` argument and the - range by ``low`` and ``high``. - - The data type is specified by the 'dtype' argument. The 'dtype' argument - must be one of the data types specified in the 'DataType' enum field in - the TensorProto message. - - Parameters - ========== - dtype - Attribute. - The data type for the elements of the output tensor. If not specified, - default is TensorProto::FLOAT. - high - Attribute. - Upper boundary of the output values. - low - Attribute. - Lower boundary of the output values. - seed - Attribute. - (Optional) Seed to the random generator, if not specified we will auto - generate one. - shape - Attribute. - The shape of the output tensor. - - Returns - ======= - output : Var - Type T. - Output tensor of random values drawn from uniform distribution - - Notes - ===== - Signature: ``ai.onnx@1::RandomUniform``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _RandomUniform( - _RandomUniform.Attributes( - dtype=AttrDtype(dtype, name="dtype"), - high=AttrFloat32(high, name="high"), - low=AttrFloat32(low, name="low"), - seed=AttrFloat32.maybe(seed, name="seed"), - shape=AttrInt64s(shape, name="shape"), - ), - _RandomUniform.Inputs(), - ) - .get_output_vars() - .output - ) - - -def random_uniform_like( - input: Var, - *, - dtype: Optional[npt.DTypeLike] = None, - high: float = 1.0, - low: float = 0.0, - seed: Optional[float] = None, -) -> Var: - r""" - Generate a tensor with random values drawn from a uniform distribution. - The shape of the output tensor is copied from the shape of the input - tensor, and the parameters of the uniform distribution are specified by - ``low`` and ``high``. - - The data type is specified by the 'dtype' argument, or copied from the - input tensor if not provided. The 'dtype' argument must be one of the - data types specified in the 'DataType' enum field in the TensorProto - message and be valid as an output type. - - Parameters - ========== - input - Type T1. - Input tensor to copy shape and optionally type information from. - dtype - Attribute. - (Optional) The data type for the elements of the output tensor, if not - specified, we will use the data type of the input tensor. - high - Attribute. - Upper boundary of the output values. - low - Attribute. - Lower boundary of the output values. - seed - Attribute. - (Optional) Seed to the random generator, if not specified we will auto - generate one. - - Returns - ======= - output : Var - Type T2. - Output tensor of random values drawn from uniform distribution - - Notes - ===== - Signature: ``ai.onnx@1::RandomUniformLike``. - - Type constraints: - - T1: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _RandomUniformLike( - _RandomUniformLike.Attributes( - dtype=AttrDtype.maybe(dtype, name="dtype"), - high=AttrFloat32(high, name="high"), - low=AttrFloat32(low, name="low"), - seed=AttrFloat32.maybe(seed, name="seed"), - ), - _RandomUniformLike.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def range( - start: Var, - limit: Var, - delta: Var, -) -> Var: - r""" - Generate a tensor containing a sequence of numbers that begin at - ``start`` and extends by increments of ``delta`` up to ``limit`` - (exclusive). - - The number of elements in the output of range is computed as below: - - :: - - number_of_elements = max( ceil( (limit - start) / delta ) , 0 ) - - The pseudocode determining the contents of the output is shown below: - - :: - - for(int i=0; i Var: - r""" - Reciprocal takes one input data (Tensor) and produces one output data - (Tensor) where the reciprocal is, y = 1/x, is applied to the tensor - elementwise. - - Parameters - ========== - X - Type T. - Input tensor - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@13::Reciprocal``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Reciprocal( - _Reciprocal.Attributes(), - _Reciprocal.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def reduce_l1( - data: Var, - *, - axes: Optional[Iterable[int]] = None, - keepdims: int = 1, -) -> Var: - r""" - Computes the L1 norm of the input tensor's elements along the provided - axes. The resulting tensor has the same rank as the input if - ``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting - tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. Reduction over an empty set of values yields 0. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Attribute. - A list of integers, along which to reduce. The default is to reduce over - all the dimensions of the input tensor. Accepted range is [-r, r-1] - where r = rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@13::ReduceL1``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` - """ - return ( - _ReduceL1( - _ReduceL1.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceL1.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .reduced - ) - - -def reduce_l2( - data: Var, - *, - axes: Optional[Iterable[int]] = None, - keepdims: int = 1, -) -> Var: - r""" - Computes the L2 norm of the input tensor's elements along the provided - axes. The resulting tensor has the same rank as the input if - ``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting - tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. Reduction over an empty set of values yields 0. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Attribute. - A list of integers, along which to reduce. The default is to reduce over - all the dimensions of the input tensor. Accepted range is [-r, r-1] - where r = rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@13::ReduceL2``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` - """ - return ( - _ReduceL2( - _ReduceL2.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceL2.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .reduced - ) - - -def reduce_log_sum( - data: Var, - *, - axes: Optional[Iterable[int]] = None, - keepdims: int = 1, -) -> Var: - r""" - Computes the log sum of the input tensor's elements along the provided - axes. The resulting tensor has the same rank as the input if - ``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting - tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. Reduction over an empty set of values yields minus infinity (if - supported by the datatype) or undefined otherwise. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Attribute. - A list of integers, along which to reduce. The default is to reduce over - all the dimensions of the input tensor. Accepted range is [-r, r-1] - where r = rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@13::ReduceLogSum``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` - """ - return ( - _ReduceLogSum( - _ReduceLogSum.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceLogSum.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .reduced - ) - - -def reduce_log_sum_exp( - data: Var, - *, - axes: Optional[Iterable[int]] = None, - keepdims: int = 1, -) -> Var: - r""" - Computes the log sum exponent of the input tensor's elements along the - provided axes. The resulting tensor has the same rank as the input if - ``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting - tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. Reduction over an empty set of values yields minus infinity (if - supported by the datatype) or undefined otherwise. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Attribute. - A list of integers, along which to reduce. The default is to reduce over - all the dimensions of the input tensor. Accepted range is [-r, r-1] - where r = rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@13::ReduceLogSumExp``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` - """ - return ( - _ReduceLogSumExp( - _ReduceLogSumExp.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceLogSumExp.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .reduced - ) - - -def reduce_max( - data: Var, - *, - axes: Optional[Iterable[int]] = None, - keepdims: int = 1, -) -> Var: - r""" - Computes the max of the input tensor's elements along the provided axes. - The resulting tensor has the same rank as the input if ``keepdims`` - equals 1. If ``keepdims`` equals 0, then the resulting tensor has the - reduced dimension pruned. Input tensors of rank zero are valid. - Reduction over an empty set of values yields minus infinity (if - supported by the datatype) or the minimum value of the data type - otherwise. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Attribute. - A list of integers, along which to reduce. The default is to reduce over - all the dimensions of the input tensor. Accepted range is [-r, r-1] - where r = rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@13::ReduceMax``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _ReduceMax( - _ReduceMax.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceMax.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .reduced - ) - - -def reduce_mean( - data: Var, - *, - axes: Optional[Iterable[int]] = None, - keepdims: int = 1, -) -> Var: - r""" - Computes the mean of the input tensor's elements along the provided - axes. The resulting tensor has the same rank as the input if - ``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting - tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. Reduction over an empty set of values yields undefined. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Attribute. - A list of integers, along which to reduce. The default is to reduce over - all the dimensions of the input tensor. Accepted range is [-r, r-1] - where r = rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@13::ReduceMean``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` - """ - return ( - _ReduceMean( - _ReduceMean.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceMean.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .reduced - ) - - -def reduce_min( - data: Var, - *, - axes: Optional[Iterable[int]] = None, - keepdims: int = 1, -) -> Var: - r""" - Computes the min of the input tensor's elements along the provided axes. - The resulting tensor has the same rank as the input if ``keepdims`` - equals 1. If ``keepdims`` equals 0, then the resulting tensor has the - reduced dimension pruned. Input tensors of rank zero are valid. - Reduction over an empty set of values yields plus infinity (if supported - by the datatype) or the maximum value of the data type otherwise. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Attribute. - A list of integers, along which to reduce. The default is to reduce over - all the dimensions of the input tensor. Accepted range is [-r, r-1] - where r = rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@13::ReduceMin``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _ReduceMin( - _ReduceMin.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceMin.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .reduced - ) - - -def reduce_prod( - data: Var, - *, - axes: Optional[Iterable[int]] = None, - keepdims: int = 1, -) -> Var: - r""" - Computes the product of the input tensor's elements along the provided - axes. The resulting tensor has the same rank as the input if - ``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting - tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. Reduction over an empty set of values yields 1. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Attribute. - A list of integers, along which to reduce. The default is to reduce over - all the dimensions of the input tensor. Accepted range is [-r, r-1] - where r = rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@13::ReduceProd``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` - """ - return ( - _ReduceProd( - _ReduceProd.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceProd.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .reduced - ) - - -def reduce_sum( - data: Var, - axes: Optional[Var] = None, - *, - keepdims: int = 1, - noop_with_empty_axes: int = 0, -) -> Var: - r""" - Computes the sum of the input tensor's elements along the provided axes. - The resulting tensor has the same rank as the input if ``keepdims`` - equals 1. If ``keepdims`` equals 0, then the resulting tensor has the - reduced dimension pruned. Input tensors of rank zero are valid. - Reduction over an empty set of values yields 0. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Type tensor(int64). - Optional input list of integers, along which to reduce. The default is - to reduce over all the dimensions of the input tensor if - 'noop_with_empty_axes' is false, else act as an Identity op when - 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = - rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - noop_with_empty_axes - Attribute. - Defines behavior if 'axes' is empty. Default behavior with 'false' is to - reduce all axes. When axes is empty and this attribute is set to true, - input tensor will not be reduced,and the output tensor would be - equivalent to input tensor. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@13::ReduceSum``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` - """ - return ( - _ReduceSum( - _ReduceSum.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" - ), - ), - _ReduceSum.Inputs( - data=unwrap_vars(data), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - axes=get_value(axes), - ) - .reduced - ) - - -def reduce_sum_square( - data: Var, - *, - axes: Optional[Iterable[int]] = None, - keepdims: int = 1, -) -> Var: - r""" - Computes the sum square of the input tensor's elements along the - provided axes. The resulting tensor has the same rank as the input if - ``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting - tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. Reduction over an empty set of values yields 0. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Attribute. - A list of integers, along which to reduce. The default is to reduce over - all the dimensions of the input tensor. Accepted range is [-r, r-1] - where r = rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@13::ReduceSumSquare``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` - """ - return ( - _ReduceSumSquare( - _ReduceSumSquare.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceSumSquare.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .reduced - ) - - -def relu( - X: Var, -) -> Var: - r""" - Relu takes one input data (Tensor) and produces one output data - (Tensor) where the rectified linear function, y = max(0, x), is - applied to the tensor elementwise. - - Parameters - ========== - X - Type T. - Input tensor - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@14::Relu``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)` - """ - return ( - _Relu( - _Relu.Attributes(), - _Relu.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def reshape( - data: Var, - shape: Var, - *, - allowzero: int = 0, -) -> Var: - r""" - Reshape the input tensor similar to numpy.reshape. First input is the - data tensor, second input is a shape tensor which specifies the output - shape. It outputs the reshaped tensor. At most one dimension of the new - shape can be -1. In this case, the value is inferred from the size of - the tensor and the remaining dimensions. A dimension could also be 0, in - which case the actual dimension value is unchanged (i.e. taken from the - input tensor). If 'allowzero' is set, and the new shape includes 0, the - dimension will be set explicitly to zero (i.e. not taken from input - tensor). Shape (second input) could be an empty shape, which means - converting to a scalar. The input tensor's shape and the output tensor's - shape are required to have the same number of elements. - - If the attribute 'allowzero' is set, it is invalid for the specified - shape to contain both a zero value and -1, as the value of the dimension - corresponding to -1 cannot be determined uniquely. - - Parameters - ========== - data - Type T. - An input tensor. - shape - Type tensor(int64). - Specified shape for output. - allowzero - Attribute. - (Optional) By default, when any value in the 'shape' input is equal to - zero the corresponding dimension value is copied from the input tensor - dynamically. allowzero=1 indicates that if any value in the 'shape' - input is set to zero, the zero value is honored, similar to NumPy. - - Returns - ======= - reshaped : Var - Type T. - Reshaped data. - - Notes - ===== - Signature: ``ai.onnx@14::Reshape``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Reshape( - _Reshape.Attributes( - allowzero=AttrInt64(allowzero, name="allowzero"), - ), - _Reshape.Inputs( - data=unwrap_vars(data), - shape=unwrap_vars(shape), - ), - ) - .get_output_vars( - data=get_value(data), - shape=get_value(shape), - ) - .reshaped - ) - - -def resize( - X: Var, - roi: Optional[Var] = None, - scales: Optional[Var] = None, - sizes: Optional[Var] = None, - *, - coordinate_transformation_mode: str = "half_pixel", - cubic_coeff_a: float = -0.75, - exclude_outside: int = 0, - extrapolation_value: float = 0.0, - mode: str = "nearest", - nearest_mode: str = "round_prefer_floor", -) -> Var: - r""" - Resize the input tensor. In general, it calculates every value in the - output tensor as a weighted average of neighborhood (a.k.a. sampling - locations) in the input tensor. Each dimension value of the output - tensor is: output_dimension = floor(input_dimension \* (roi_end - - roi_start) \* scale) if input "sizes" is not specified. - - Parameters - ========== - X - Type T1. - N-D tensor - roi - Type T2. - 1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is - the rank of X. The RoIs' coordinates are normalized in the coordinate - system of the input image. It only takes effect when - coordinate_transformation_mode is "tf_crop_and_resize" - scales - Type tensor(float). - The scale array along each dimension. It takes value greater than 0. If - it's less than 1, it's sampling down, otherwise, it's upsampling. The - number of elements of 'scales' should be the same as the rank of input - 'X'. One of 'scales' and 'sizes' MUST be specified and it is an error if - both are specified. If 'sizes' is needed, the user can use an empty - string as the name of 'scales' in this operator's input list. - sizes - Type tensor(int64). - The size of the output tensor. The number of elements of 'sizes' should - be the same as the rank of input 'X'. Only one of 'scales' and 'sizes' - can be specified. - coordinate_transformation_mode - Attribute. - This attribute describes how to transform the coordinate in the resized - tensor to the coordinate in the original tensor. - - The coordinate of each dimension is transformed individually. Let's - describe a case using axis x as an example. Denote x_resized as the - coordinate of axis x in the resized tensor, x_original as the coordinate - of axis x in the original tensor, length_original as the length of the - original tensor in axis x, length_resized as the length of the resized - tensor in axis x, roi_x = (start_x, end_x) of the axis x in input "roi", - scale = length_resized / length_original, - - if coordinate_transformation_mode is "half_pixel", x_original = - (x_resized + 0.5) / scale - 0.5, - - if coordinate_transformation_mode is "pytorch_half_pixel", x_original = - length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0, - - if coordinate_transformation_mode is "align_corners", x_original = - x_resized \* (length_original - 1) / (length_resized - 1), - - if coordinate_transformation_mode is "asymmetric", x_original = - x_resized / scale, - - if coordinate_transformation_mode is "tf_crop_and_resize", x_original = - length_resized > 1 ? start_x \* (length_original - 1) + x_resized \* - (end_x - start_x) \* (length_original - 1) / (length_resized - 1) : 0.5 - \* (start_x + end_x) \* (length_original - 1). - cubic_coeff_a - Attribute. - The coefficient 'a' used in cubic interpolation. Two common choice are - -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out - Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the - details. This attribute is valid only if "mode" is "cubic". - exclude_outside - Attribute. - If set to 1, the weight of sampling locations outside the tensor will be - set to 0 and the weight will be renormalized so that their sum is 1.0. - The default value is 0. - extrapolation_value - Attribute. - When coordinate_transformation_mode is "tf_crop_and_resize" and - x_original is outside the range [0, length_original - 1], this value is - used as the corresponding output value. Default is 0.0f. - mode - Attribute. - Three interpolation modes: nearest (default), linear and cubic. The - "linear" mode includes linear interpolation for 1D tensor and N-linear - interpolation for N-D tensor (for example, bilinear interpolation for 2D - tensor). The "cubic" mode includes cubic interpolation for 1D tensor and - N-cubic interpolation for N-D tensor (for example, bicubic interpolation - for 2D tensor). - nearest_mode - Attribute. - Four modes: round_prefer_floor (default, as known as round half down), - round_prefer_ceil (as known as round half up), floor, ceil. Only used by - nearest interpolation. It indicates how to get "nearest" pixel in input - tensor from x_original, so this attribute is valid only if "mode" is - "nearest". - - Returns - ======= - Y : Var - Type T1. - N-D tensor after resizing - - Notes - ===== - Signature: ``ai.onnx@13::Resize``. - - Type constraints: - - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Resize( - _Resize.Attributes( - coordinate_transformation_mode=AttrString( - coordinate_transformation_mode, - name="coordinate_transformation_mode", - ), - cubic_coeff_a=AttrFloat32(cubic_coeff_a, name="cubic_coeff_a"), - exclude_outside=AttrInt64(exclude_outside, name="exclude_outside"), - extrapolation_value=AttrFloat32( - extrapolation_value, name="extrapolation_value" - ), - mode=AttrString(mode, name="mode"), - nearest_mode=AttrString(nearest_mode, name="nearest_mode"), - ), - _Resize.Inputs( - X=unwrap_vars(X), - roi=unwrap_vars(roi), - scales=unwrap_vars(scales), - sizes=unwrap_vars(sizes), - ), - ) - .get_output_vars( - X=get_value(X), - roi=get_value(roi), - scales=get_value(scales), - sizes=get_value(sizes), - ) - .Y - ) - - -def reverse_sequence( - input: Var, - sequence_lens: Var, - *, - batch_axis: int = 1, - time_axis: int = 0, -) -> Var: - r""" - Reverse batch of sequences having different lengths specified by - ``sequence_lens``. - - For each slice i iterating on batch axis, the operator reverses the - first sequence_lens[i] elements on time axis, and copies elements whose - index's beyond sequence_lens[i] to the output. So the output slice i - contains reversed sequences on the first sequence_lens[i] elements, then - have original values copied for the other elements. - - Example 1: input = [[0.0, 4.0, 8.0, 12.0], [1.0, 5.0, 9.0, 13.0], [2.0, - 6.0, 10.0, 14.0], [3.0, 7.0, 11.0, 15.0]] sequence_lens = [4, 3, 2, 1] - time_axis = 0 batch_axis = 1 - - output = [[3.0, 6.0, 9.0, 12.0], [2.0, 5.0, 8.0, 13.0], [1.0, 4.0, 10.0, - 14.0], [0.0, 7.0, 11.0, 15.0]] - - Example 2: input = [[0.0, 1.0, 2.0, 3.0 ], [4.0, 5.0, 6.0, 7.0 ], [8.0, - 9.0, 10.0, 11.0], [12.0, 13.0, 14.0, 15.0]] sequence_lens = [1, 2, 3, 4] - time_axis = 1 batch_axis = 0 - - output = [[0.0, 1.0, 2.0, 3.0 ], [5.0, 4.0, 6.0, 7.0 ], [10.0, 9.0, 8.0, - 11.0], [15.0, 14.0, 13.0, 12.0]] - - Parameters - ========== - input - Type T. - Tensor of rank r >= 2. - sequence_lens - Type tensor(int64). - Tensor specifying lengths of the sequences in a batch. It has shape - ``[batch_size]``. - batch_axis - Attribute. - (Optional) Specify which axis is batch axis. Must be one of 1 (default), - or 0. - time_axis - Attribute. - (Optional) Specify which axis is time axis. Must be one of 0 (default), - or 1. - - Returns - ======= - Y : Var - Type T. - Tensor with same shape of input. - - Notes - ===== - Signature: ``ai.onnx@10::ReverseSequence``. - - Type constraints: - - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _ReverseSequence( - _ReverseSequence.Attributes( - batch_axis=AttrInt64(batch_axis, name="batch_axis"), - time_axis=AttrInt64(time_axis, name="time_axis"), - ), - _ReverseSequence.Inputs( - input=unwrap_vars(input), - sequence_lens=unwrap_vars(sequence_lens), - ), - ) - .get_output_vars( - input=get_value(input), - sequence_lens=get_value(sequence_lens), - ) - .Y - ) - - -def roi_align( - X: Var, - rois: Var, - batch_indices: Var, - *, - coordinate_transformation_mode: str = "half_pixel", - mode: str = "avg", - output_height: int = 1, - output_width: int = 1, - sampling_ratio: int = 0, - spatial_scale: float = 1.0, -) -> Var: - r""" - Region of Interest (RoI) align operation described in the `Mask R-CNN - paper `__. RoiAlign consumes an input - tensor X and region of interests (rois) to apply pooling across each - RoI; it produces a 4-D tensor of shape (num_rois, C, output_height, - output_width). - - RoiAlign is proposed to avoid the misalignment by removing quantizations - while converting from original image into feature map and from feature - map into RoI feature; in each ROI bin, the value of the sampled - locations are computed directly through bilinear interpolation. - - Parameters - ========== - X - Type T1. - Input data tensor from the previous operator; 4-D feature map of shape - (N, C, H, W), where N is the batch size, C is the number of channels, - and H and W are the height and the width of the data. - rois - Type T1. - RoIs (Regions of Interest) to pool over; rois is 2-D input of shape - (num_rois, 4) given as [[x1, y1, x2, y2], ...]. The RoIs' coordinates - are in the coordinate system of the input image. Each coordinate set has - a 1:1 correspondence with the 'batch_indices' input. - batch_indices - Type T2. - 1-D tensor of shape (num_rois,) with each element denoting the index of - the corresponding image in the batch. - coordinate_transformation_mode - Attribute. - Allowed values are 'half_pixel' and 'output_half_pixel'. Use the value - 'half_pixel' to pixel shift the input coordinates by -0.5 (the - recommended behavior). Use the value 'output_half_pixel' to omit the - pixel shift for the input (use this for a backward-compatible behavior). - mode - Attribute. - The pooling method. Two modes are supported: 'avg' and 'max'. Default is - 'avg'. - output_height - Attribute. - default 1; Pooled output Y's height. - output_width - Attribute. - default 1; Pooled output Y's width. - sampling_ratio - Attribute. - Number of sampling points in the interpolation grid used to compute the - output value of each pooled output bin. If > 0, then exactly - sampling_ratio x sampling_ratio grid points are used. If == 0, then an - adaptive number of grid points are used (computed as ceil(roi_width / - output_width), and likewise for height). Default is 0. - spatial_scale - Attribute. - Multiplicative spatial scale factor to translate ROI coordinates from - their input spatial scale to the scale used when pooling, i.e., spatial - scale of the input feature map X relative to the input image. E.g.; - default is 1.0f. - - Returns - ======= - Y : Var - Type T1. - RoI pooled output, 4-D tensor of shape (num_rois, C, output_height, - output_width). The r-th batch element Y[r-1] is a pooled feature map - corresponding to the r-th RoI X[r-1]. - - Notes - ===== - Signature: ``ai.onnx@16::RoiAlign``. - - Type constraints: - - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` - - T2: `tensor(int64)` - """ - return ( - _RoiAlign( - _RoiAlign.Attributes( - coordinate_transformation_mode=AttrString( - coordinate_transformation_mode, - name="coordinate_transformation_mode", - ), - mode=AttrString(mode, name="mode"), - output_height=AttrInt64(output_height, name="output_height"), - output_width=AttrInt64(output_width, name="output_width"), - sampling_ratio=AttrInt64(sampling_ratio, name="sampling_ratio"), - spatial_scale=AttrFloat32(spatial_scale, name="spatial_scale"), - ), - _RoiAlign.Inputs( - X=unwrap_vars(X), - rois=unwrap_vars(rois), - batch_indices=unwrap_vars(batch_indices), - ), - ) - .get_output_vars( - X=get_value(X), - rois=get_value(rois), - batch_indices=get_value(batch_indices), - ) - .Y - ) - - -def round( - X: Var, -) -> Var: - r""" - Round takes one input Tensor and rounds the values, element-wise, - meaning it finds the nearest integer for each value. In case of halves, - the rule is to round them to the nearest even integer. If input x is - integral, +0, -0, NaN, or infinite, x itself is returned. The output - tensor has the same shape and type as the input. - - Examples: - - :: - - round([0.9]) = [1.0] - round([2.5]) = [2.0] - round([2.3]) = [2.0] - round([1.5]) = [2.0] - round([-4.5]) = [-4.0] - - Parameters - ========== - X - Type T. - Input tensor - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@11::Round``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Round( - _Round.Attributes(), - _Round.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def stft( - signal: Var, - frame_step: Var, - window: Optional[Var] = None, - frame_length: Optional[Var] = None, - *, - onesided: int = 1, -) -> Var: - r""" - Computes the Short-time Fourier Transform of the signal. - - Parameters - ========== - signal - Type T1. - Input tensor representing a real or complex valued signal. For real - input, the following shape is expected: [batch_size][signal_length][1]. - For complex input, the following shape is expected: - [batch_size][signal_length][2], where [batch_size][signal_length][0] - represents the real component and [batch_size][signal_length][1] - represents the imaginary component of the signal. - frame_step - Type T2. - The number of samples to step between successive DFTs. - window - Type T1. - A tensor representing the window that will be slid over the signal.The - window must have rank 1 with shape: [window_shape]. It's an optional - value. - frame_length - Type T2. - A scalar representing the size of the DFT. It's an optional value. - onesided - Attribute. - If onesided is 1, only values for w in [0, 1, 2, ..., floor(n_fft/2) + - 1] are returned because the real-to-complex Fourier transform satisfies - the conjugate symmetry, i.e., X[m, w] = X[m,w]=X[m,n_fft-w]\*. Note if - the input or window tensors are complex, then onesided output is not - possible. Enabling onesided with real inputs performs a Real-valued fast - Fourier transform (RFFT).When invoked with real or complex valued input, - the default value is 1. Values can be 0 or 1. - - Returns - ======= - output : Var - Type T1. - The Short-time Fourier Transform of the signals.If onesided is 1, the - output has the shape: [batch_size][frames][dft_unique_bins][2], where - dft_unique_bins is frame_length // 2 + 1 (the unique components of the - DFT) If onesided is 0, the output has the shape: - [batch_size][frames][frame_length][2], where frame_length is the length - of the DFT. - - Notes - ===== - Signature: ``ai.onnx@17::STFT``. - - Type constraints: - - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - - T2: `tensor(int32)`, `tensor(int64)` - """ - return ( - _STFT( - _STFT.Attributes( - onesided=AttrInt64(onesided, name="onesided"), - ), - _STFT.Inputs( - signal=unwrap_vars(signal), - frame_step=unwrap_vars(frame_step), - window=unwrap_vars(window), - frame_length=unwrap_vars(frame_length), - ), - ) - .get_output_vars( - signal=get_value(signal), - frame_step=get_value(frame_step), - window=get_value(window), - frame_length=get_value(frame_length), - ) - .output - ) - - -def scan( - initial_state_and_scan_inputs: Sequence[Var], - *, - body: Callable[..., Iterable[Var]], - num_scan_inputs: int, - scan_input_axes: Optional[Iterable[int]] = None, - scan_input_directions: Optional[Iterable[int]] = None, - scan_output_axes: Optional[Iterable[int]] = None, - scan_output_directions: Optional[Iterable[int]] = None, -) -> Sequence[Var]: - r""" - Scan can be used to iterate over one or more scan_input tensors, - constructing zero or more scan_output tensors. It combines ideas from - general recurrences, functional programming constructs such as scan, - fold, map, and zip, and is intended to enable generalizations of - RNN-like constructs for sequence-to-sequence processing. Other tensors - (referred to as state_variables here) can be used to carry a state when - iterating from one element to another (similar to hidden-state in RNNs, - also referred to as loop-carried dependences in the context of loops). - Many common usages involve a single scan_input tensor (where - functionality similar to scan, fold and map can be obtained). When more - than one scan_input is used, a behavior similar to zip is obtained. - - The attribute body must be a graph, specifying the computation to be - performed in every iteration. It takes as input the current values of - the state_variables and the current iterated element of the scan_inputs. - It must return the (updated) values of the state_variables and zero or - more scan_output_element tensors. The values of the scan_output_element - tensors are concatenated over all the iterations to produce the - scan_output values of the scan construct (similar to the concatenated - intermediate hidden-state values of RNN-like constructs). All the output - tensors (state_variables as well as scan_output_element tensors) are - required to have the same shape in each iteration of the loop (a - restriction imposed to enable efficient memory allocation). - - Note that the iterated element passed to the body subgraph does not have - a sequence axis. It will have a rank one less than the rank of the - corresponding scan_input. - - The scan operation returns the final values of the state_variables as - well as the scan_outputs. - - The optional attribute scan_input_directions specifies the direction - (forward or backward) for each scan input. If this attribute is omitted, - all sequences are scanned in the forward direction. A bidirectional scan - may be performed by specifying the same tensor input twice in the - scan_inputs, once with a forward direction, and once with a backward - direction. - - The scan_output of the operation is produced by concatenating the - scan_output_element values produced by the body in each iteration. The - optional attribute scan_output_directions specifies the direction in - which scan_output is constructed (by appending or prepending the - scan_output_element to scan_output in each iteration) for each - scan_output. If this attribute is omitted, the scan_output_element is - appended to the scan_output in each iteration. - - The optional attribute scan_input_axes specifies the axis to be scanned - for each scan_input. If omitted, every scan_input will be scanned in - axis 0. For example, if axis 0 is the batch axis and axis 1 is the time - axis (to be scanned), specify an axis value of 1. Note that scanning a - non-zero axis may be less efficient than scanning axis zero. - - The optional attribute scan_output_axes specifies the axis along which - the scan_outputs are accumulated for each scan_output. For example, if - axis 1 is the time axis (to be scanned) for both inputs and outputs, - specify a scan_input axis and scan_output axis value of 1. - - Note that because of the ONNX restriction that only the last parameter - of an operator can be variadic, the initial-states and scan-inputs are - listed together as one input parameter. Similarly, the final-states and - scan-outputs are listed together as one output parameter. The attribute - num_scan_inputs indicates the number M of scan-inputs. - - The behavior of - - :: - - Scan < - num_scan_inputs = m, - body = loop-body, - scan_input_axes = [axis_1, ..., axis_m] - > (init_1, ..., init_n, scan_1, ..., scan_m) - - is equivalent to the following pseudo-code: - - :: - - // scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i - // scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j. - sequence_length = scan_1.shape[axis_1]; - - // initialize state-variables - st_1 = init_1; ... st_n = init_n; - // initialize scan-output variables: [] denotes an empty tensor - scan_out_1 = []; ...; scan_out_k = []; - // identify number of iterations: - - // execute loop - for (int t = 0; t < sequence_length; ++t) { - // generate the scan-input elements: the notation T[t] indicates the sub-tensor - // of rank one less than T obtained by indexing T at position t along axis k. - si_1 = scan_1[t]; - ... ; - si_m = scan_m[t]; - // execute loop-body - st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m) - // accumulate the scan-output elements - scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k); - } - - return st_1, ..., st_n, scan_out_1, ..., scan_out_k; - - *Sample usage: Encoding RNN using a Scan* - - The following example shows how a simple RNN over an input tensor %X, - with weight tensor %Wi, recurrence weight tensor %Ri, bias tensors %Wbi - and %Rbi, and initial hidden-state %H_0 can be encoded as a ScanLoop. - Note that the loop-body is a nested graph, and it directly computes %Wi, - %Ri, %Wbi, and %Rbi (typically constants or initializers in the body - graph). If these values are computed in the outer graph, they need to be - passed in as extra state_variables. - - :: - - graph rnn-encoding { - %H_0 = ... - %X = ... - %Y_h, %Y = Scan[body = , num_scan_inputs=1](%H_0, %X) - return %Y, %Y_h - } - - graph rnn-cell-1 ( - %H_tminus1[FLOAT, tensor] - %X_t[FLOAT, tensor] - ) { - %Wi = ... - %Ri = ... - %Wbi = ... - %Rbi = ... - %t1 = X_t * (Wi^T) - %t2 = H_tminus1*(Ri^T) - %t3 = Add(%t1, %t2) - %t4 = Add(%t3, %Wbi) - %t5 = Add(%t4, %Rbi) - %Ht = Tanh(%t5) - %Accumulate = Identity(%Ht) - return %Ht, %Accumulate - } - - Parameters - ========== - initial_state_and_scan_inputs - Type V. - Initial values of the loop's N state variables followed by M scan_inputs - body - Attribute. - The graph run each iteration. It has N+M inputs: (loop state - variables..., scan_input_elts...). It has N+K outputs: (loop state - variables..., scan_output_elts...). Each scan_output is created by - concatenating the value of the specified scan_output_elt value at the - end of each iteration of the loop. It is an error if the dimensions of - these values change across loop iterations. - num_scan_inputs - Attribute. - An attribute specifying the number of scan_inputs M. - scan_input_axes - Attribute. - An optional list of M flags. The i-th element of the list specifies the - axis to be scanned (the sequence axis) for the i-th scan_input. If - omitted, 0 will be used as the scan axis for every scan_input. Negative - value for an axis means counting dimensions from the back. Accepted - range is [-r, r-1] where r = rank(input). - scan_input_directions - Attribute. - An optional list of M flags. The i-th element of the list specifies the - direction to be scanned for the i-th scan_input tensor: 0 indicates - forward direction and 1 indicates reverse direction. If omitted, all - scan_input tensors will be scanned in the forward direction. - scan_output_axes - Attribute. - An optional list of K flags. The i-th element of the list specifies the - axis for the i-th scan_output. The scan outputs are accumulated along - the specified axis. If omitted, 0 will be used as the scan axis for - every scan_output. Negative value for an axis means counting dimensions - from the back. Accepted range is [-r, r-1]. - scan_output_directions - Attribute. - An optional list of K flags, one for each scan_output. The i-th element - of the list specifies whether the i-th scan_output should be constructed - by appending or prepending a new value in each iteration: 0 indicates - appending and 1 indicates prepending. If omitted, all scan_output - tensors will be produced by appending a value in each iteration. - - Returns - ======= - final_state_and_scan_outputs : Sequence[Var] - Type V. - Final values of the loop's N state variables followed by K scan_outputs - - Notes - ===== - Signature: ``ai.onnx@16::Scan``. - - Type constraints: - - V: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - _body_subgraph: Graph = subgraph( - [ - Tensor( - var.unwrap_tensor().dtype, - (lambda x: x[1:] if x is not None else None)(var.unwrap_tensor().shape), - ) - for var in initial_state_and_scan_inputs[:num_scan_inputs] - ] - + [ - Tensor(var.unwrap_tensor().dtype) - for var in initial_state_and_scan_inputs[num_scan_inputs:] - ], - body, - ) - return ( - _Scan( - _Scan.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - num_scan_inputs=AttrInt64(num_scan_inputs, name="num_scan_inputs"), - scan_input_axes=AttrInt64s.maybe( - scan_input_axes, name="scan_input_axes" - ), - scan_input_directions=AttrInt64s.maybe( - scan_input_directions, name="scan_input_directions" - ), - scan_output_axes=AttrInt64s.maybe( - scan_output_axes, name="scan_output_axes" - ), - scan_output_directions=AttrInt64s.maybe( - scan_output_directions, name="scan_output_directions" - ), - ), - _Scan.Inputs( - initial_state_and_scan_inputs=unwrap_vars( - initial_state_and_scan_inputs - ), - ), - out_variadic=len(_body_subgraph.requested_results), - ) - .get_output_vars( - initial_state_and_scan_inputs=get_value(initial_state_and_scan_inputs), - ) - .final_state_and_scan_outputs - ) - - -def scatter_elements( - data: Var, - indices: Var, - updates: Var, - *, - axis: int = 0, - reduction: str = "none", -) -> Var: - r""" - ScatterElements takes three inputs ``data``, ``updates``, and - ``indices`` of the same rank r >= 1 and an optional attribute axis that - identifies an axis of ``data`` (by default, the outer-most axis, that is - axis 0). The output of the operation is produced by creating a copy of - the input ``data``, and then updating its value to values specified by - ``updates`` at specific index positions specified by ``indices``. Its - output shape is the same as the shape of ``data``. For each entry in - ``updates``, the target index in ``data`` is obtained by combining the - corresponding entry in ``indices`` with the index of the entry itself: - the index-value for dimension = axis is obtained from the value of the - corresponding entry in ``indices`` and the index-value for dimension != - axis is obtained from the index of the entry itself. ``reduction`` - allows specification of an optional reduction operation, which is - applied to all values in ``updates`` tensor into ``output`` at the - specified ``indices``. In cases where ``reduction`` is set to "none", - indices should not have duplicate entries: that is, if idx1 != idx2, - then indices[idx1] != indices[idx2]. For instance, in a 2-D tensor case, - the update corresponding to the [i][j] entry is performed as below: - - :: - - output[indices[i][j]][j] = updates[i][j] if axis = 0, - output[i][indices[i][j]] = updates[i][j] if axis = 1, - - When ``reduction`` is set to "add", the update corresponding to the - [i][j] entry is performed as below: - - :: - - output[indices[i][j]][j] += updates[i][j] if axis = 0, - output[i][indices[i][j]] += updates[i][j] if axis = 1, - - When ``reduction`` is set to "mul", the update corresponding to the - [i][j] entry is performed as below: - - :: - - output[indices[i][j]][j] *= updates[i][j] if axis = 0, - output[i][indices[i][j]] *= updates[i][j] if axis = 1, - - This operator is the inverse of GatherElements. It is similar to Torch's - Scatter operation. Example 1: - - :: - - data = [ - [0.0, 0.0, 0.0], - [0.0, 0.0, 0.0], - [0.0, 0.0, 0.0], - ] - indices = [ - [1, 0, 2], - [0, 2, 1], - ] - updates = [ - [1.0, 1.1, 1.2], - [2.0, 2.1, 2.2], - ] - output = [ - [2.0, 1.1, 0.0] - [1.0, 0.0, 2.2] - [0.0, 2.1, 1.2] - ] - - Example 2: - - :: - - data = [[1.0, 2.0, 3.0, 4.0, 5.0]] - indices = [[1, 3]] - updates = [[1.1, 2.1]] - axis = 1 - output = [[1.0, 1.1, 3.0, 2.1, 5.0]] - - Parameters - ========== - data - Type T. - Tensor of rank r >= 1. - indices - Type Tind. - Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index - values are expected to be within bounds [-s, s-1] along axis of size s. - It is an error if any of the index values are out of bounds. - updates - Type T. - Tensor of rank r >=1 (same rank and shape as indices) - axis - Attribute. - Which axis to scatter on. Negative value means counting dimensions from - the back. Accepted range is [-r, r-1] where r = rank(data). - reduction - Attribute. - Type of reduction to apply: none (default), add, mul. 'none': no - reduction applied. 'add': reduction using the addition operation. 'mul': - reduction using the multiplication operation. - - Returns - ======= - output : Var - Type T. - Tensor of rank r >= 1 (same rank as input). - - Notes - ===== - Signature: ``ai.onnx@16::ScatterElements``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - Tind: `tensor(int32)`, `tensor(int64)` - """ - return ( - _ScatterElements( - _ScatterElements.Attributes( - axis=AttrInt64(axis, name="axis"), - reduction=AttrString(reduction, name="reduction"), - ), - _ScatterElements.Inputs( - data=unwrap_vars(data), - indices=unwrap_vars(indices), - updates=unwrap_vars(updates), - ), - ) - .get_output_vars( - data=get_value(data), - indices=get_value(indices), - updates=get_value(updates), - ) - .output - ) - - -def scatter_nd( - data: Var, - indices: Var, - updates: Var, - *, - reduction: str = "none", -) -> Var: - r""" - ScatterND takes three inputs ``data`` tensor of rank r >= 1, ``indices`` - tensor of rank q >= 1, and ``updates`` tensor of rank q + r - - indices.shape[-1] - 1. The output of the operation is produced by - creating a copy of the input ``data``, and then updating its value to - values specified by ``updates`` at specific index positions specified by - ``indices``. Its output shape is the same as the shape of ``data``. - - ``indices`` is an integer tensor. Let k denote indices.shape[-1], the - last dimension in the shape of ``indices``. ``indices`` is treated as a - (q-1)-dimensional tensor of k-tuples, where each k-tuple is a - partial-index into ``data``. Hence, k can be a value at most the rank of - ``data``. When k equals rank(data), each update entry specifies an - update to a single element of the tensor. When k is less than rank(data) - each update entry specifies an update to a slice of the tensor. Index - values are allowed to be negative, as per the usual convention for - counting backwards from the end, but are expected in the valid range. - - ``updates`` is treated as a (q-1)-dimensional tensor of - replacement-slice-values. Thus, the first (q-1) dimensions of - updates.shape must match the first (q-1) dimensions of indices.shape. - The remaining dimensions of ``updates`` correspond to the dimensions of - the replacement-slice-values. Each replacement-slice-value is a (r-k) - dimensional tensor, corresponding to the trailing (r-k) dimensions of - ``data``. Thus, the shape of ``updates`` must equal indices.shape[0:q-1] - ++ data.shape[k:r-1], where ++ denotes the concatenation of shapes. - - The ``output`` is calculated via the following equation: output = - np.copy(data) update_indices = indices.shape[:-1] for idx in - np.ndindex(update_indices): output[indices[idx]] = updates[idx] The - order of iteration in the above loop is not specified. In particular, - indices should not have duplicate entries: that is, if idx1 != idx2, - then indices[idx1] != indices[idx2]. This ensures that the output value - does not depend on the iteration order. - - ``reduction`` allows specification of an optional reduction operation, - which is applied to all values in ``updates`` tensor into ``output`` at - the specified ``indices``. In cases where ``reduction`` is set to - "none", indices should not have duplicate entries: that is, if idx1 != - idx2, then indices[idx1] != indices[idx2]. This ensures that the output - value does not depend on the iteration order. When ``reduction`` is set - to "add", ``output`` is calculated as follows: output = np.copy(data) - update_indices = indices.shape[:-1] for idx in - np.ndindex(update_indices): output[indices[idx]] += updates[idx] When - ``reduction`` is set to "mul", ``output`` is calculated as follows: - output = np.copy(data) update_indices = indices.shape[:-1] for idx in - np.ndindex(update_indices): output[indices[idx]] \*= updates[idx] This - operator is the inverse of GatherND. Example 1: - - :: - - data = [1, 2, 3, 4, 5, 6, 7, 8] - indices = [[4], [3], [1], [7]] - updates = [9, 10, 11, 12] - output = [1, 11, 3, 10, 9, 6, 7, 12] - - Example 2: - - :: - - data = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], - [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], - [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]], - [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] - indices = [[0], [2]] - updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]] - output = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], - [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]], - [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] - - Parameters - ========== - data - Type T. - Tensor of rank r >= 1. - indices - Type tensor(int64). - Tensor of rank q >= 1. - updates - Type T. - Tensor of rank q + r - indices_shape[-1] - 1. - reduction - Attribute. - Type of reduction to apply: none (default), add, mul. 'none': no - reduction applied. 'add': reduction using the addition operation. 'mul': - reduction using the multiplication operation. - - Returns - ======= - output : Var - Type T. - Tensor of rank r >= 1. - - Notes - ===== - Signature: ``ai.onnx@16::ScatterND``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _ScatterND( - _ScatterND.Attributes( - reduction=AttrString(reduction, name="reduction"), - ), - _ScatterND.Inputs( - data=unwrap_vars(data), - indices=unwrap_vars(indices), - updates=unwrap_vars(updates), - ), - ) - .get_output_vars( - data=get_value(data), - indices=get_value(indices), - updates=get_value(updates), - ) - .output - ) - - -def selu( - X: Var, - *, - alpha: float = 1.6732631921768188, - gamma: float = 1.0507010221481323, -) -> Var: - r""" - Selu takes one input data (Tensor) and produces one output data - (Tensor) where the scaled exponential linear unit function, - ``y = gamma * (alpha * e^x - alpha) for x <= 0``, - ``y = gamma * x for x > 0``, is applied to the tensor elementwise. - - Parameters - ========== - X - Type T. - Input tensor - alpha - Attribute. - Coefficient of SELU default to 1.67326319217681884765625 (i.e., float32 - approximation of 1.6732632423543772848170429916717). - gamma - Attribute. - Coefficient of SELU default to 1.05070102214813232421875 (i.e., float32 - approximation of 1.0507009873554804934193349852946). - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@6::Selu``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Selu( - _Selu.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - gamma=AttrFloat32(gamma, name="gamma"), - ), - _Selu.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def sequence_at( - input_sequence: Var, - position: Var, -) -> Var: - r""" - Outputs a tensor copy from the tensor at 'position' in 'input_sequence'. - Accepted range for 'position' is in ``[-n, n - 1]``, where ``n`` is the - number of tensors in 'input_sequence'. Negative value means counting - positions from the back. - - Parameters - ========== - input_sequence - Type S. - Input sequence. - position - Type I. - Position of the tensor in the sequence. Negative value means counting - positions from the back. Accepted range in ``[-n, n - 1]``, where ``n`` - is the number of tensors in 'input_sequence'. It is an error if any of - the index values are out of bounds. It must be a scalar(tensor of empty - shape). - - Returns - ======= - tensor : Var - Type T. - Output tensor at the specified position in the input sequence. - - Notes - ===== - Signature: ``ai.onnx@11::SequenceAt``. - - Type constraints: - - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` - - I: `tensor(int32)`, `tensor(int64)` - - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _SequenceAt( - _SequenceAt.Attributes(), - _SequenceAt.Inputs( - input_sequence=unwrap_vars(input_sequence), - position=unwrap_vars(position), - ), - ) - .get_output_vars( - input_sequence=get_value(input_sequence), - position=get_value(position), - ) - .tensor - ) - - -def sequence_construct( - inputs: Sequence[Var], -) -> Var: - r""" - Construct a tensor sequence containing 'inputs' tensors. All tensors in - 'inputs' must have the same data type. - - Parameters - ========== - inputs - Type T. - Tensors. - - Returns - ======= - output_sequence : Var - Type S. - Sequence enclosing the input tensors. - - Notes - ===== - Signature: ``ai.onnx@11::SequenceConstruct``. - - Type constraints: - - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` - """ - return ( - _SequenceConstruct( - _SequenceConstruct.Attributes(), - _SequenceConstruct.Inputs( - inputs=unwrap_vars(inputs), - ), - ) - .get_output_vars( - inputs=get_value(inputs), - ) - .output_sequence - ) - - -def sequence_empty( - *, - dtype: Optional[npt.DTypeLike] = None, -) -> Var: - r""" - Construct an empty tensor sequence, with given data type. - - Parameters - ========== - dtype - Attribute. - (Optional) The data type of the tensors in the output sequence. The - default type is 'float'. - - Returns - ======= - output : Var - Type S. - Empty sequence. - - Notes - ===== - Signature: ``ai.onnx@11::SequenceEmpty``. - - Type constraints: - - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` - """ - return ( - _SequenceEmpty( - _SequenceEmpty.Attributes( - dtype=AttrDtype.maybe(dtype, name="dtype"), - ), - _SequenceEmpty.Inputs(), - ) - .get_output_vars() - .output - ) - - -def sequence_erase( - input_sequence: Var, - position: Optional[Var] = None, -) -> Var: - r""" - Outputs a tensor sequence that removes the tensor at 'position' from - 'input_sequence'. Accepted range for 'position' is in ``[-n, n - 1]``, - where ``n`` is the number of tensors in 'input_sequence'. Negative value - means counting positions from the back. 'position' is optional, by - default it erases the last tensor from 'input_sequence'. - - Parameters - ========== - input_sequence - Type S. - Input sequence. - position - Type I. - Position of the tensor in the sequence. Negative value means counting - positions from the back. Accepted range in ``[-n, n - 1]``, where ``n`` - is the number of tensors in 'input_sequence'. It is an error if any of - the index values are out of bounds. It must be a scalar(tensor of empty - shape). - - Returns - ======= - output_sequence : Var - Type S. - Output sequence that has the tensor at the specified position removed. - - Notes - ===== - Signature: ``ai.onnx@11::SequenceErase``. - - Type constraints: - - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` - - I: `tensor(int32)`, `tensor(int64)` - """ - return ( - _SequenceErase( - _SequenceErase.Attributes(), - _SequenceErase.Inputs( - input_sequence=unwrap_vars(input_sequence), - position=unwrap_vars(position), - ), - ) - .get_output_vars( - input_sequence=get_value(input_sequence), - position=get_value(position), - ) - .output_sequence - ) - - -def sequence_insert( - input_sequence: Var, - tensor: Var, - position: Optional[Var] = None, -) -> Var: - r""" - Outputs a tensor sequence that inserts 'tensor' into 'input_sequence' at - 'position'. 'tensor' must have the same data type as 'input_sequence'. - Accepted range for 'position' is in ``[-n, n]``, where ``n`` is the - number of tensors in 'input_sequence'. Negative value means counting - positions from the back. 'position' is optional, by default it inserts - 'tensor' to the back of 'input_sequence'. - - Parameters - ========== - input_sequence - Type S. - Input sequence. - tensor - Type T. - Input tensor to be inserted into the input sequence. - position - Type I. - Position in the sequence where the new tensor is inserted. It is - optional and default is to insert to the back of the sequence. Negative - value means counting positions from the back. Accepted range in - ``[-n, n]``, where ``n`` is the number of tensors in 'input_sequence'. - It is an error if any of the index values are out of bounds. It must be - a scalar(tensor of empty shape). - - Returns - ======= - output_sequence : Var - Type S. - Output sequence that contains the inserted tensor at given position. - - Notes - ===== - Signature: ``ai.onnx@11::SequenceInsert``. - - Type constraints: - - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` - - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - I: `tensor(int32)`, `tensor(int64)` - """ - return ( - _SequenceInsert( - _SequenceInsert.Attributes(), - _SequenceInsert.Inputs( - input_sequence=unwrap_vars(input_sequence), - tensor=unwrap_vars(tensor), - position=unwrap_vars(position), - ), - ) - .get_output_vars( - input_sequence=get_value(input_sequence), - tensor=get_value(tensor), - position=get_value(position), - ) - .output_sequence - ) - - -def sequence_length( - input_sequence: Var, -) -> Var: - r""" - Produces a scalar(tensor of empty shape) containing the number of - tensors in 'input_sequence'. - - Parameters - ========== - input_sequence - Type S. - Input sequence. - - Returns - ======= - length : Var - Type I. - Length of input sequence. It must be a scalar(tensor of empty shape). - - Notes - ===== - Signature: ``ai.onnx@11::SequenceLength``. - - Type constraints: - - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` - - I: `tensor(int64)` - """ - return ( - _SequenceLength( - _SequenceLength.Attributes(), - _SequenceLength.Inputs( - input_sequence=unwrap_vars(input_sequence), - ), - ) - .get_output_vars( - input_sequence=get_value(input_sequence), - ) - .length - ) - - -def sequence_map( - input_sequence: Var, - additional_inputs: Sequence[Var] = (), - *, - body: Callable[..., Iterable[Var]], -) -> Sequence[Var]: - r""" - Applies a sub-graph to each sample in the input sequence(s). - - Inputs can be either tensors or sequences, with the exception of the - first input which must be a sequence. The length of the first input - sequence will determine the number of samples in the outputs. Any other - sequence inputs should have the same number of samples. The number of - inputs and outputs, should match the one of the subgraph. - - For each i-th element in the output, a sample will be extracted from the - input sequence(s) at the i-th position and the sub-graph will be applied - to it. The outputs will contain the outputs of the sub-graph for each - sample, in the same order as in the input. - - This operator assumes that processing each sample is independent and - could executed in parallel or in any order. Users cannot expect any - specific ordering in which each subgraph is computed. - - Parameters - ========== - input_sequence - Type S. - Input sequence. - additional_inputs - Type V. - Additional inputs to the graph - body - Attribute. - The graph to be run for each sample in the sequence(s). It should have - as many inputs and outputs as inputs and outputs to the SequenceMap - function. - - Returns - ======= - out_sequence : Sequence[Var] - Type S. - Output sequence(s) - - Notes - ===== - Signature: ``ai.onnx@17::SequenceMap``. - - Type constraints: - - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` - - V: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - _body_subgraph: Graph = subgraph( - [typing_cast(SpoxSequence, input_sequence.unwrap_type()).elem_type] - + [ - typing_cast(SpoxSequence, var.unwrap_type()).elem_type - for var in additional_inputs - ], - body, - ) - return ( - _SequenceMap( - _SequenceMap.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - ), - _SequenceMap.Inputs( - input_sequence=unwrap_vars(input_sequence), - additional_inputs=unwrap_vars(additional_inputs), - ), - out_variadic=len(_body_subgraph.requested_results), - ) - .get_output_vars( - input_sequence=get_value(input_sequence), - additional_inputs=get_value(additional_inputs), - ) - .out_sequence - ) - - -def shape( - data: Var, - *, - end: Optional[int] = None, - start: int = 0, -) -> Var: - r""" - Takes a tensor as input and outputs an 1D int64 tensor containing the - shape of the input tensor. Optional attributes start and end can be used - to compute a slice of the input tensor's shape. If start axis is - omitted, the slice starts from axis 0. The end axis, if specified, is - exclusive (and the returned value will not include the size of that - axis). If the end axis is omitted, the axes upto the last one will be - included. Negative axes indicate counting back from the last axis. Note - that axes will be clamped to the range [0, r-1], where r is the rank of - the input tensor if they are out-of-range (after adding r in the case of - negative axis). Thus, specifying any end value > r is equivalent to - specifying an end value of r, and specifying any start value < -r is - equivalent to specifying a start value of 0. - - Examples: - - :: - - Input tensor with shape: [2, 3, 4] - No attributes specified. - Output: [2, 3, 4] - - :: - - Input tensor with shape: [2, 3, 4] - start: -1 - Output: [4] - - :: - - Input tensor with shape: [2, 3, 4] - end: -1 - Output: [2, 3] - - :: - - Input tensor with shape: [2, 3, 4] - start: 1 - end: 2 - Output: [3] - - Parameters - ========== - data - Type T. - An input tensor. - end - Attribute. - (Optional) Ending axis for slicing the shape. Negative value means - counting dimensions from the back. If omitted, sizes of all axes upto - (including) the last one will be included. - start - Attribute. - (Optional) Starting axis for slicing the shape. Default value is - 0.Negative value means counting dimensions from the back. - - Returns - ======= - shape : Var - Type T1. - Shape of the input tensor - - Notes - ===== - Signature: ``ai.onnx@15::Shape``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T1: `tensor(int64)` - """ - return ( - _Shape( - _Shape.Attributes( - end=AttrInt64.maybe(end, name="end"), - start=AttrInt64(start, name="start"), - ), - _Shape.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .shape - ) - - -def shrink( - input: Var, - *, - bias: float = 0.0, - lambd: float = 0.5, -) -> Var: - r""" - Shrink takes one input data (Tensor) and produces one Tensor output, - having same datatype and shape with input. It has two attributes, lambd - and bias. The formula of this operator is: If x < -lambd, y = x + bias; - If x > lambd, y = x - bias; Otherwise, y = 0. - - Parameters - ========== - input - Type T. - The input data as Tensor. - bias - Attribute. - The bias value added to output. Default is 0. - lambd - Attribute. - The lambd value for the Shrink formulation. Default is 0.5. - - Returns - ======= - output : Var - Type T. - The output. - - Notes - ===== - Signature: ``ai.onnx@9::Shrink``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Shrink( - _Shrink.Attributes( - bias=AttrFloat32(bias, name="bias"), - lambd=AttrFloat32(lambd, name="lambd"), - ), - _Shrink.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def sigmoid( - X: Var, -) -> Var: - r""" - Sigmoid takes one input data (Tensor) and produces one output data - (Tensor) where the sigmoid function, y = 1 / (1 + exp(-x)), is - applied to the tensor elementwise. - - Parameters - ========== - X - Type T. - Input tensor - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@13::Sigmoid``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Sigmoid( - _Sigmoid.Attributes(), - _Sigmoid.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def sign( - input: Var, -) -> Var: - r""" - Calculate the sign of the given input tensor element-wise. If input > 0, - output 1. if input < 0, output -1. if input == 0, output 0. - - Parameters - ========== - input - Type T. - Input tensor - - Returns - ======= - output : Var - Type T. - The sign of the input tensor computed element-wise. It has the same - shape and type of the input. - - Notes - ===== - Signature: ``ai.onnx@13::Sign``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Sign( - _Sign.Attributes(), - _Sign.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def sin( - input: Var, -) -> Var: - r""" - Calculates the sine of the given input tensor, element-wise. - - Parameters - ========== - input - Type T. - Input tensor - - Returns - ======= - output : Var - Type T. - The sine of the input tensor computed element-wise - - Notes - ===== - Signature: ``ai.onnx@7::Sin``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Sin( - _Sin.Attributes(), - _Sin.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def sinh( - input: Var, -) -> Var: - r""" - Calculates the hyperbolic sine of the given input tensor element-wise. - - Parameters - ========== - input - Type T. - Input tensor - - Returns - ======= - output : Var - Type T. - The hyperbolic sine values of the input tensor computed element-wise - - Notes - ===== - Signature: ``ai.onnx@9::Sinh``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Sinh( - _Sinh.Attributes(), - _Sinh.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def size( - data: Var, -) -> Var: - r""" - Takes a tensor as input and outputs a int64 scalar that equals to the - total number of elements of the input tensor. - - Parameters - ========== - data - Type T. - An input tensor. - - Returns - ======= - size : Var - Type T1. - Total number of elements of the input tensor - - Notes - ===== - Signature: ``ai.onnx@13::Size``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T1: `tensor(int64)` - """ - return ( - _Size( - _Size.Attributes(), - _Size.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .size - ) - - -def slice( - data: Var, - starts: Var, - ends: Var, - axes: Optional[Var] = None, - steps: Optional[Var] = None, -) -> Var: - r""" - Produces a slice of the input tensor along multiple axes. Similar to - numpy: - https://numpy.org/doc/stable/user/basics.indexing.html?highlight=slice#slicing-and-striding - - Slice uses the ``starts``, ``ends``, ``axes`` and ``steps`` inputs to - select a sub-tensor of its input ``data`` tensor. - - An effective ``starts[i]``, ``ends[i]``, and ``steps[i]`` must be - computed for each ``i`` in ``[0, ... r-1]`` where ``r = rank(input)`` as - follows: - - If ``axes`` are omitted, they are set to ``[0, ..., r-1]``. If ``steps`` - are omitted, they are set to ``[1, ..., 1]`` of length ``len(starts)`` - - The effective values are initialized as ``start[i] = 0``, - ``ends[i] = dims[i]`` where ``dims`` are the dimensions of ``input`` and - ``steps[i] = 1``. - - All negative elements of ``axes`` are made non-negative by adding ``r`` - to them, where ``r =rank(input)``. - - All negative values in ``starts[i]`` and ``ends[i]`` have - ``dims[axes[i]]`` added to them, where ``dims`` are the dimensions of - ``input``. Then ``start[axes[i]]`` is the adjusted ``starts[i]`` is - clamped into the range ``[0, dims[axes[i]]]`` for positive stepping and - ``[0, dims[axes[i]]-1]`` for negative stepping. - - The clamping for the adjusted ``ends[i]`` depends on the sign of - ``steps[i]`` and must accommodate copying 0 through ``dims[axes[i]]`` - elements, so for positive stepping ``ends[axes[i]]`` is clamped to - ``[0, dims[axes[i]]]``, while for negative stepping it is clamped to - ``[-1, dims[axes[i]]-1]``. - - Finally, ``steps[axes[i]] = steps[i]``. - - For slicing to the end of a dimension with unknown size, it is - recommended to pass in ``INT_MAX`` when slicing forward and 'INT_MIN' - when slicing backward. - - Example 1: - - :: - - data = [ - [1, 2, 3, 4], - [5, 6, 7, 8], - ] - axes = [0, 1] - starts = [1, 0] - ends = [2, 3] - steps = [1, 2] - result = [ - [5, 7], - ] - - Example 2: - - :: - - data = [ - [1, 2, 3, 4], - [5, 6, 7, 8], - ] - starts = [0, 1] - ends = [-1, 1000] - result = [ - [2, 3, 4], - ] - - Parameters - ========== - data - Type T. - Tensor of data to extract slices from. - starts - Type Tind. - 1-D tensor of starting indices of corresponding axis in ``axes`` - ends - Type Tind. - 1-D tensor of ending indices (exclusive) of corresponding axis in - ``axes`` - axes - Type Tind. - 1-D tensor of axes that ``starts`` and ``ends`` apply to. Negative value - means counting dimensions from the back. Accepted range is [-r, r-1] - where r = rank(data). Behavior is undefined if an axis is repeated. - steps - Type Tind. - 1-D tensor of slice step of corresponding axis in ``axes``. Negative - value means slicing backward. 'steps' cannot be 0. Defaults to 1s. - - Returns - ======= - output : Var - Type T. - Sliced data tensor. - - Notes - ===== - Signature: ``ai.onnx@13::Slice``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - Tind: `tensor(int32)`, `tensor(int64)` - """ - return ( - _Slice( - _Slice.Attributes(), - _Slice.Inputs( - data=unwrap_vars(data), - starts=unwrap_vars(starts), - ends=unwrap_vars(ends), - axes=unwrap_vars(axes), - steps=unwrap_vars(steps), - ), - ) - .get_output_vars( - data=get_value(data), - starts=get_value(starts), - ends=get_value(ends), - axes=get_value(axes), - steps=get_value(steps), - ) - .output - ) - - -def softmax( - input: Var, - *, - axis: int = -1, -) -> Var: - r""" - The operator computes the normalized exponential values for the given - input: - - Softmax(input, axis) = Exp(input) / ReduceSum(Exp(input), axis=axis, - keepdims=1) - - The "axis" attribute indicates the dimension along which Softmax will be - performed. The output tensor has the same shape and contains the Softmax - values of the corresponding input. - - Parameters - ========== - input - Type T. - The input tensor of rank >= axis. - axis - Attribute. - Describes the dimension Softmax will be performed on. Negative value - means counting dimensions from the back. Accepted range is [-r, r-1] - where r = rank(input). - - Returns - ======= - output : Var - Type T. - The output values with the same shape as the input tensor. - - Notes - ===== - Signature: ``ai.onnx@13::Softmax``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Softmax( - _Softmax.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Softmax.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def softmax_cross_entropy_loss( - scores: Var, - labels: Var, - weights: Optional[Var] = None, - *, - ignore_index: Optional[int] = None, - reduction: str = "mean", -) -> tuple[Var, Var]: - r""" - Loss function that measures the softmax cross entropy between 'scores' - and 'labels'. This operator first computes a loss tensor whose shape is - identical to the labels input. If the input is 2-D with shape (N, C), - the loss tensor may be a N-element vector L = (l_1, l_2, ..., l_N). If - the input is N-D tensor with shape (N, C, D1, D2, ..., Dk), the loss - tensor L may have (N, D1, D2, ..., Dk) as its shape and - L[i,][j_1][j_2]...[j_k] denotes a scalar element in L. After L is - available, this operator can optionally do a reduction operator. - - - shape(scores): (N, C) where C is the number of classes, or (N, C, D1, - D2,..., Dk), with K >= 1 in case of K-dimensional loss. - - shape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, - D1, D2,..., Dk), with K >= 1 in case of K-dimensional loss. - - The loss for one sample, l_i, can calculated as follows: - - :: - - l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes. - - or - - :: - - l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if 'weights' is provided. - - loss is zero for the case when label-value equals ignore_index. - - :: - - l[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk] = ignore_index - - where: - - :: - - p = Softmax(scores) - y = Log(p) - c = labels[i][d1][d2]...[dk] - - Finally, L is optionally reduced: - - - If reduction = 'none', the output is L with shape (N, D1, D2, ..., - Dk). - - If reduction = 'sum', the output is scalar: Sum(L). - - If reduction = 'mean', the output is scalar: ReduceMean(L), or if - weight is provided: ``ReduceSum(L) / ReduceSum(W)``, where tensor W - is of shape ``(N, D1, D2, ..., Dk)`` and - ``W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]]``. - - Parameters - ========== - scores - Type T. - The predicted outputs with shape [batch_size, class_size], or - [batch_size, class_size, D1, D2 , ..., Dk], where K is the number of - dimensions. - labels - Type Tind. - The ground truth output tensor, with shape [batch_size], or [batch_size, - D1, D2, ..., Dk], where K is the number of dimensions. Labels element - value shall be in range of [0, C). If ignore_index is specified, it may - have a value outside [0, C) and the label values should either be in the - range [0, C) or have the value ignore_index. - weights - Type T. - A manual rescaling weight given to each class. If given, it has to be a - 1D Tensor assigning weight to each of the classes. Otherwise, it is - treated as if having all ones. - ignore_index - Attribute. - Specifies a target value that is ignored and does not contribute to the - input gradient. It's an optional value. - reduction - Attribute. - Type of reduction to apply to loss: none, sum, mean(default). 'none': no - reduction will be applied, 'sum': the output will be summed. 'mean': the - sum of the output will be divided by the number of elements in the - output. - - Returns - ======= - output : Var - Type T. - Weighted loss float Tensor. If reduction is 'none', this has the shape - of [batch_size], or [batch_size, D1, D2, ..., Dk] in case of - K-dimensional loss. Otherwise, it is a scalar. - log_prob : Var - Type T. - Log probability tensor. If the output of softmax is prob, its value is - log(prob). - - Notes - ===== - Signature: ``ai.onnx@13::SoftmaxCrossEntropyLoss``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - - Tind: `tensor(int32)`, `tensor(int64)` - """ - return ( - _SoftmaxCrossEntropyLoss( - _SoftmaxCrossEntropyLoss.Attributes( - ignore_index=AttrInt64.maybe(ignore_index, name="ignore_index"), - reduction=AttrString(reduction, name="reduction"), - ), - _SoftmaxCrossEntropyLoss.Inputs( - scores=unwrap_vars(scores), - labels=unwrap_vars(labels), - weights=unwrap_vars(weights), - ), - ) - .get_output_vars( - scores=get_value(scores), - labels=get_value(labels), - weights=get_value(weights), - ) - ._unpack_to_any() - ) - - -def softplus( - X: Var, -) -> Var: - r""" - Softplus takes one input data (Tensor) and produces one output data - (Tensor) where the softplus function, y = ln(exp(x) + 1), is applied - to the tensor elementwise. - - Parameters - ========== - X - Type T. - 1D input tensor - - Returns - ======= - Y : Var - Type T. - 1D input tensor - - Notes - ===== - Signature: ``ai.onnx@1::Softplus``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Softplus( - _Softplus.Attributes(), - _Softplus.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def softsign( - input: Var, -) -> Var: - r""" - Calculates the softsign (x/(1+|x\|)) of the given input tensor - element-wise. - - Parameters - ========== - input - Type T. - Input tensor - - Returns - ======= - output : Var - Type T. - The softsign (x/(1+|x\|)) values of the input tensor computed - element-wise - - Notes - ===== - Signature: ``ai.onnx@1::Softsign``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Softsign( - _Softsign.Attributes(), - _Softsign.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def space_to_depth( - input: Var, - *, - blocksize: int, -) -> Var: - r""" - SpaceToDepth rearranges blocks of spatial data into depth. More - specifically, this op outputs a copy of the input tensor where values - from the height and width dimensions are moved to the depth dimension. - - Parameters - ========== - input - Type T. - Input tensor of [N,C,H,W], where N is the batch axis, C is the channel - or depth, H is the height and W is the width. - blocksize - Attribute. - Blocks of [blocksize, blocksize] are moved. - - Returns - ======= - output : Var - Type T. - Output tensor of [N, C \* blocksize \* blocksize, H/blocksize, - W/blocksize]. - - Notes - ===== - Signature: ``ai.onnx@13::SpaceToDepth``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _SpaceToDepth( - _SpaceToDepth.Attributes( - blocksize=AttrInt64(blocksize, name="blocksize"), - ), - _SpaceToDepth.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def split( - input: Var, - split: Optional[Var] = None, - *, - outputs_count: int, - axis: int = 0, -) -> Sequence[Var]: - r""" - Split a tensor into a list of tensors, along the specified 'axis'. - Lengths of the parts can be specified using input 'split'. Otherwise, - the tensor is split to equal sized parts. - - Parameters - ========== - input - Type T. - The tensor to split - split - Type tensor(int64). - Optional length of each output. Values should be >= 0.Sum of the values - must be equal to the dim value at 'axis' specified. - axis - Attribute. - Which axis to split on. A negative value means counting dimensions from - the back. Accepted range is [-rank, rank-1] where r = rank(input). - outputs_count - Specifies the number of variadic outputs of this operator. - Non-standard parameter created by the opset generator, as inference (a solution) it was not implemented or is impossible. - - Returns - ======= - outputs : Sequence[Var] - Type T. - One or more outputs forming list of tensors after splitting - - Notes - ===== - Signature: ``ai.onnx@13::Split``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Split( - _Split.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Split.Inputs( - input=unwrap_vars(input), - split=unwrap_vars(split), - ), - out_variadic=outputs_count, - ) - .get_output_vars( - input=get_value(input), - split=get_value(split), - ) - .outputs - ) - - -def split_to_sequence( - input: Var, - split: Optional[Var] = None, - *, - axis: int = 0, - keepdims: int = 1, -) -> Var: - r""" - Split a tensor into a sequence of tensors, along the specified 'axis'. - Lengths of the parts can be specified using the optional argument - 'split'. If the argument - ``split' is not specified, a default scalar value of 1 is used as the value of``\ split'. - 'split' must contain only positive numbers. 'split' is either a scalar - (tensor of empty shape), or a 1-D tensor. If 'split' is a scalar, then - 'input' will be split into chunks all of size 'split' if possible. The - last chunk alone may be smaller than 'split' if the 'input' size along - the given axis 'axis' is not divisible by 'split'. If 'split' is a - 1-dimensional tensor, the input tensor is split into 'size(split)' - chunks, with lengths of the parts on 'axis' specified in 'split'. In - this scenario, the sum of entries in 'split' must be equal to the - dimension size of input tensor on 'axis'. - - Parameters - ========== - input - Type T. - The tensor to split - split - Type I. - Length of each output. It can be either a scalar(tensor of empty shape), - or a 1-D tensor. All values must be >= 0. - axis - Attribute. - Which axis to split on. A negative value means counting dimensions from - the back. Accepted range is [-rank, rank-1]. - keepdims - Attribute. - Keep the split dimension or not. Default 1, which means we keep split - dimension. If input 'split' is specified, this attribute is ignored. - - Returns - ======= - output_sequence : Var - Type S. - One or more outputs forming a sequence of tensors after splitting - - Notes - ===== - Signature: ``ai.onnx@11::SplitToSequence``. - - Type constraints: - - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - I: `tensor(int32)`, `tensor(int64)` - - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` - """ - return ( - _SplitToSequence( - _SplitToSequence.Attributes( - axis=AttrInt64(axis, name="axis"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _SplitToSequence.Inputs( - input=unwrap_vars(input), - split=unwrap_vars(split), - ), - ) - .get_output_vars( - input=get_value(input), - split=get_value(split), - ) - .output_sequence - ) - - -def sqrt( - X: Var, -) -> Var: - r""" - Square root takes one input data (Tensor) and produces one output - data (Tensor) where the square root is, y = x^0.5, is applied to the - tensor elementwise. If x is negative, then it will return NaN. - - Parameters - ========== - X - Type T. - Input tensor - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@13::Sqrt``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Sqrt( - _Sqrt.Attributes(), - _Sqrt.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def squeeze( - data: Var, - axes: Optional[Var] = None, -) -> Var: - r""" - Remove single-dimensional entries from the shape of a tensor. Takes an - input ``axes`` with a list of axes to squeeze. If ``axes`` is not - provided, all the single dimensions will be removed from the shape. If - an axis is selected with shape entry not equal to one, an error is - raised. - - Parameters - ========== - data - Type T. - Tensors with at least max(dims) dimensions. - axes - Type tensor(int64). - List of integers indicating the dimensions to squeeze. Negative value - means counting dimensions from the back. Accepted range is [-r, r-1] - where r = rank(data). - - Returns - ======= - squeezed : Var - Type T. - Reshaped tensor with same data as input. - - Notes - ===== - Signature: ``ai.onnx@13::Squeeze``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Squeeze( - _Squeeze.Attributes(), - _Squeeze.Inputs( - data=unwrap_vars(data), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - axes=get_value(axes), - ) - .squeezed - ) - - -def string_normalizer( - X: Var, - *, - case_change_action: str = "NONE", - is_case_sensitive: int = 0, - locale: Optional[str] = None, - stopwords: Optional[Iterable[str]] = None, -) -> Var: - r""" - StringNormalization performs string operations for basic cleaning. This - operator has only one input (denoted by X) and only one output (denoted - by Y). This operator first examines the elements in the X, and removes - elements specified in "stopwords" attribute. After removing stop words, - the intermediate result can be further lowercased, uppercased, or just - returned depending the "case_change_action" attribute. This operator - only accepts [C]- and [1, C]-tensor. If all elements in X are dropped, - the output will be the empty value of string tensor with shape [1] if - input shape is [C] and shape [1, 1] if input shape is [1, C]. - - Parameters - ========== - X - Type tensor(string). - UTF-8 strings to normalize - case_change_action - Attribute. - string enum that cases output to be lowercased/uppercases/unchanged. - Valid values are "LOWER", "UPPER", "NONE". Default is "NONE" - is_case_sensitive - Attribute. - Boolean. Whether the identification of stop words in X is - case-sensitive. Default is false - locale - Attribute. - Environment dependent string that denotes the locale according to which - output strings needs to be upper/lowercased.Default en_US or platform - specific equivalent as decided by the implementation. - stopwords - Attribute. - List of stop words. If not set, no word would be removed from X. - - Returns - ======= - Y : Var - Type tensor(string). - UTF-8 Normalized strings - - Notes - ===== - Signature: ``ai.onnx@10::StringNormalizer``. - - """ - return ( - _StringNormalizer( - _StringNormalizer.Attributes( - case_change_action=AttrString( - case_change_action, name="case_change_action" - ), - is_case_sensitive=AttrInt64( - is_case_sensitive, name="is_case_sensitive" - ), - locale=AttrString.maybe(locale, name="locale"), - stopwords=AttrStrings.maybe(stopwords, name="stopwords"), - ), - _StringNormalizer.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def sub( - A: Var, - B: Var, -) -> Var: - r""" - Performs element-wise binary subtraction (with Numpy-style broadcasting - support). - - This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - (Opset 14 change): Extend supported types to include uint8, int8, - uint16, and int16. - - Parameters - ========== - A - Type T. - First operand. - B - Type T. - Second operand. - - Returns - ======= - C : Var - Type T. - Result, has same element type as two inputs - - Notes - ===== - Signature: ``ai.onnx@14::Sub``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Sub( - _Sub.Attributes(), - _Sub.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - ) - .C - ) - - -def sum( - data_0: Sequence[Var], -) -> Var: - r""" - Element-wise sum of each of the input tensors (with Numpy-style - broadcasting support). All inputs and outputs must have the same data - type. This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - Parameters - ========== - data_0 - Type T. - List of tensors for sum. - - Returns - ======= - sum : Var - Type T. - Output tensor. - - Notes - ===== - Signature: ``ai.onnx@13::Sum``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Sum( - _Sum.Attributes(), - _Sum.Inputs( - data_0=unwrap_vars(data_0), - ), - ) - .get_output_vars( - data_0=get_value(data_0), - ) - .sum - ) - - -def tan( - input: Var, -) -> Var: - r""" - Calculates the tangent of the given input tensor, element-wise. - - Parameters - ========== - input - Type T. - Input tensor - - Returns - ======= - output : Var - Type T. - The tangent of the input tensor computed element-wise - - Notes - ===== - Signature: ``ai.onnx@7::Tan``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Tan( - _Tan.Attributes(), - _Tan.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def tanh( - input: Var, -) -> Var: - r""" - Calculates the hyperbolic tangent of the given input tensor - element-wise. - - Parameters - ========== - input - Type T. - Input tensor - - Returns - ======= - output : Var - Type T. - The hyperbolic tangent values of the input tensor computed element-wise - - Notes - ===== - Signature: ``ai.onnx@13::Tanh``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Tanh( - _Tanh.Attributes(), - _Tanh.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def tf_idf_vectorizer( - X: Var, - *, - max_gram_length: int, - max_skip_count: int, - min_gram_length: int, - mode: str, - ngram_counts: Iterable[int], - ngram_indexes: Iterable[int], - pool_int64s: Optional[Iterable[int]] = None, - pool_strings: Optional[Iterable[str]] = None, - weights: Optional[Iterable[float]] = None, -) -> Var: - r""" - This transform extracts n-grams from the input sequence and save them as - a vector. Input can be either a 1-D or 2-D tensor. For 1-D input, output - is the n-gram representation of that input. For 2-D input, the output is - also a 2-D tensor whose i-th row is the n-gram representation of the - i-th input row. More specifically, if input shape is [C], the - corresponding output shape would be [max(ngram_indexes) + 1]. If input - shape is [N, C], this operator produces a [N, max(ngram_indexes) + - 1]-tensor. - - In contrast to standard n-gram extraction, here, the indexes of - extracting an n-gram from the original sequence are not necessarily - consecutive numbers. The discontinuity between indexes are controlled by - the number of skips. If the number of skips is 2, we should skip two - tokens when scanning through the original sequence. Let's consider an - example. Assume that input sequence is [94, 17, 36, 12, 28] and the - number of skips is 2. The associated 2-grams are [94, 12] and [17, 28] - respectively indexed by [0, 3] and [1, 4]. If the number of skips - becomes 0, the 2-grams generated are [94, 17], [17, 36], [36, 12], [12, - 28] indexed by [0, 1], [1, 2], [2, 3], [3, 4], respectively. - - The output vector (denoted by Y) stores the count of each n-gram; - Y[ngram_indexes[i]] indicates the times that the i-th n-gram is found. - The attribute ngram_indexes is used to determine the mapping between - index i and the corresponding n-gram's output coordinate. If pool_int64s - is [94, 17, 17, 36], ngram_indexes is [1, 0], ngram_counts=[0, 0], then - the Y[0] (first element in Y) and Y[1] (second element in Y) are the - counts of [17, 36] and [94, 17], respectively. An n-gram which cannot be - found in pool_strings/pool_int64s should be ignored and has no effect on - the output. Note that we may consider all skips up to S when generating - the n-grams. - - The examples used above are true if mode is "TF". If mode is "IDF", all - the counts larger than 1 would be truncated to 1 and the i-th element in - weights would be used to scale (by multiplication) the count of the i-th - n-gram in pool. If mode is "TFIDF", this operator first computes the - counts of all n-grams and then scale them by the associated values in - the weights attribute. - - Only one of pool_strings and pool_int64s can be set. If pool_int64s is - set, the input should be an integer tensor. If pool_strings is set, the - input must be a string tensor. - - Parameters - ========== - X - Type T. - Input for n-gram extraction - max_gram_length - Attribute. - Maximum n-gram length. If this value is 3, 3-grams will be used to - generate the output. - max_skip_count - Attribute. - Maximum number of items (integers/strings) to be skipped when - constructing an n-gram from X. If max_skip_count=1, min_gram_length=2, - max_gram_length=3, this operator may generate 2-grams with skip_count=0 - and skip_count=1, and 3-grams with skip_count=0 and skip_count=1 - min_gram_length - Attribute. - Minimum n-gram length. If this value is 2 and max_gram_length is 3, - output may contain counts of 2-grams and 3-grams. - mode - Attribute. - The weighting criteria. It can be one of "TF" (term frequency), "IDF" - (inverse document frequency), and "TFIDF" (the combination of TF and - IDF) - ngram_counts - Attribute. - The starting indexes of 1-grams, 2-grams, and so on in pool. It is - useful when determining the boundary between two consecutive collections - of n-grams. For example, if ngram_counts is [0, 17, 36], the first index - (zero-based) of 1-gram/2-gram/3-gram in pool are 0/17/36. This format is - essentially identical to CSR (or CSC) sparse matrix format, and we - choose to use this due to its popularity. - ngram_indexes - Attribute. - list of int64s (type: AttributeProto::INTS). This list is parallel to - the specified 'pool\_\*' attribute. The i-th element in ngram_indexes - indicate the coordinate of the i-th n-gram in the output tensor. - pool_int64s - Attribute. - List of int64 n-grams learned from the training set. Either this or - pool_strings attributes must be present but not both. It's an 1-D tensor - starting with the collections of all 1-grams and ending with the - collections of n-grams. The i-th element in pool stores the n-gram that - should be mapped to coordinate ngram_indexes[i] in the output vector. - pool_strings - Attribute. - List of strings n-grams learned from the training set. Either this or - pool_int64s attributes must be present but not both. It's an 1-D tensor - starting with the collections of all 1-grams and ending with the - collections of n-grams. The i-th element in pool stores the n-gram that - should be mapped to coordinate ngram_indexes[i] in the output vector. - weights - Attribute. - list of floats. This attribute stores the weight of each n-gram in pool. - The i-th element in weights is the weight of the i-th n-gram in pool. - Its length equals to the size of ngram_indexes. By default, weights is - an all-one tensor.This attribute is used when mode is "IDF" or "TFIDF" - to scale the associated word counts. - - Returns - ======= - Y : Var - Type T1. - Ngram results - - Notes - ===== - Signature: ``ai.onnx@9::TfIdfVectorizer``. - - Type constraints: - - T: `tensor(int32)`, `tensor(int64)`, `tensor(string)` - - T1: `tensor(float)` - """ - return ( - _TfIdfVectorizer( - _TfIdfVectorizer.Attributes( - max_gram_length=AttrInt64(max_gram_length, name="max_gram_length"), - max_skip_count=AttrInt64(max_skip_count, name="max_skip_count"), - min_gram_length=AttrInt64(min_gram_length, name="min_gram_length"), - mode=AttrString(mode, name="mode"), - ngram_counts=AttrInt64s(ngram_counts, name="ngram_counts"), - ngram_indexes=AttrInt64s(ngram_indexes, name="ngram_indexes"), - pool_int64s=AttrInt64s.maybe(pool_int64s, name="pool_int64s"), - pool_strings=AttrStrings.maybe(pool_strings, name="pool_strings"), - weights=AttrFloat32s.maybe(weights, name="weights"), - ), - _TfIdfVectorizer.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def thresholded_relu( - X: Var, - *, - alpha: float = 1.0, -) -> Var: - r""" - ThresholdedRelu takes one input data (Tensor) and produces one output - data (Tensor) where the rectified linear function, y = x for x > - alpha, y = 0 otherwise, is applied to the tensor elementwise. - - Parameters - ========== - X - Type T. - Input tensor - alpha - Attribute. - Threshold value - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@10::ThresholdedRelu``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _ThresholdedRelu( - _ThresholdedRelu.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - ), - _ThresholdedRelu.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) +This operator returns the unique values or sliced unique subtensors of +the input tensor and three optional outputs. The first output tensor 'Y' +contains all unique values or subtensors of the input. The second +optional output tensor 'indices' contains indices of 'Y' elements' first +occurrence in 'X'. The third optional output tensor 'inverse_indices' +contains, for elements of 'X', its corresponding indices in 'Y'. The +fourth optional output tensor 'counts' contains the count of each +element of 'Y' in the input. +Outputs are either sorted in ascending order or optionally in the order +of the first occurrence of the values in the input. -def tile( - input: Var, - repeats: Var, -) -> Var: - r""" - Constructs a tensor by tiling a given tensor. This is the same as - function ``tile`` in Numpy, but no broadcast. For example A = [[1, 2], - [3, 4]], B = [1, 2], tile(A, B) = [[1, 2, 1, 2], [3, 4, 3, 4]] - - Parameters - ========== - input - Type T. - Input tensor of any shape. - repeats - Type T1. - 1D int64 tensor of the same length as input's dimension number, includes - numbers of repeated copies along input's dimensions. - - Returns - ======= - output : Var - Type T. - Output tensor of the same dimensions and type as tensor input. - output_dim[i] = input_dim[i] \* repeats[i] - - Notes - ===== - Signature: ``ai.onnx@13::Tile``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T1: `tensor(int64)` - """ - return ( - _Tile( - _Tile.Attributes(), - _Tile.Inputs( - input=unwrap_vars(input), - repeats=unwrap_vars(repeats), - ), - ) - .get_output_vars( - input=get_value(input), - repeats=get_value(repeats), - ) - .output - ) - - -def top_k( - X: Var, - K: Var, - *, - axis: int = -1, - largest: int = 1, - sorted: int = 1, -) -> tuple[Var, Var]: - r""" - Retrieve the top-K largest or smallest elements along a specified axis. - Given an input tensor of shape [a_0, a_1, ..., a\_{n-1}] and integer - argument k, return two outputs: - - - Value tensor of shape [a_0, a_1, ..., a\_{axis-1}, k, a\_{axis+1}, - ... a\_{n-1}] which contains the values of the top k elements along - the specified axis - - - Index tensor of shape [a_0, a_1, ..., a\_{axis-1}, k, a\_{axis+1}, - ... a\_{n-1}] which contains the indices of the top k elements - (original indices from the input tensor). - - - If "largest" is 1 (the default value) then the k largest elements are - returned. - - - If "sorted" is 1 (the default value) then the resulting k elements - will be sorted. - - - If "sorted" is 0, order of returned 'Values' and 'Indices' are - undefined. - - Given two equivalent values, this operator uses the indices along the - axis as a tiebreaker. That is, the element with the lower index will - appear first. - - Parameters - ========== - X - Type T. - Tensor of shape [a_0, a_1, ..., a\_{n-1}] - K - Type tensor(int64). - A 1-D tensor containing a single positive value corresponding to the - number of top elements to retrieve - axis - Attribute. - Dimension on which to do the sort. Negative value means counting - dimensions from the back. Accepted range is [-r, r-1] where r = - rank(input). - largest - Attribute. - Whether to return the top-K largest or smallest elements. - sorted - Attribute. - Whether to return the elements in sorted order. - - Returns - ======= - Values : Var - Type T. - Tensor of shape [a_0, a_1, ..., a\_{axis-1}, k, a\_{axis+1}, ... - a\_{n-1}] containing top K values from the input tensor - Indices : Var - Type I. - Tensor of shape [a_0, a_1, ..., a\_{axis-1}, k, a\_{axis+1}, ... - a\_{n-1}] containing the corresponding input tensor indices for the top - K values. - - Notes - ===== - Signature: ``ai.onnx@11::TopK``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - I: `tensor(int64)` - """ - return ( - _TopK( - _TopK.Attributes( - axis=AttrInt64(axis, name="axis"), - largest=AttrInt64(largest, name="largest"), - sorted=AttrInt64(sorted, name="sorted"), - ), - _TopK.Inputs( - X=unwrap_vars(X), - K=unwrap_vars(K), - ), - ) - .get_output_vars( - X=get_value(X), - K=get_value(K), - ) - ._unpack_to_any() - ) - - -def transpose( - data: Var, - *, - perm: Optional[Iterable[int]] = None, -) -> Var: - r""" - Transpose the input tensor similar to numpy.transpose. For example, when - perm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output - shape will be (2, 1, 3). - - Parameters - ========== - data - Type T. - An input tensor. - perm - Attribute. - A list of integers. By default, reverse the dimensions, otherwise - permute the axes according to the values given. - - Returns - ======= - transposed : Var - Type T. - Transposed output. - - Notes - ===== - Signature: ``ai.onnx@13::Transpose``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Transpose( - _Transpose.Attributes( - perm=AttrInt64s.maybe(perm, name="perm"), - ), - _Transpose.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .transposed - ) - - -def trilu( - input: Var, - k: Optional[Var] = None, - *, - upper: int = 1, -) -> Var: - r""" - Given a 2-D matrix or batches of 2-D matrices, returns the upper or - lower triangular part of the tensor(s). The attribute "upper" determines - whether the upper or lower part is retained. If set to true, the upper - triangular matrix is retained. Lower triangular matrix is retained - otherwise. Default value for the "upper" attribute is true. Trilu takes - one input tensor of shape [\*, N, M], where \* is zero or more batch - dimensions. The upper triangular part consists of the elements on and - above the given diagonal (k). The lower triangular part consists of - elements on and below the diagonal. All other elements in the matrix are - set to zero. If k = 0, the triangular part on and above/below the main - diagonal is retained. If upper is set to true, a positive k retains the - upper triangular matrix excluding the main diagonal and (k-1) diagonals - above it. A negative k value retains the main diagonal and \|k\| - diagonals below it. If upper is set to false, a positive k retains the - lower triangular matrix including the main diagonal and k diagonals - above it. A negative k value excludes the main diagonal and (\|k\|-1) - diagonals below it. - - Parameters - ========== - input - Type T. - Input tensor of rank 2 or higher. - k - Type tensor(int64). - A 0-D tensor containing a single value corresponding to the number - diagonals above or below the main diagonal to exclude or include. - Default value is 0 if it's not specified. - upper - Attribute. - Boolean. Indicates whether upper or lower part of matrix is retained. - Default is true. - - Returns - ======= - output : Var - Type T. - Output tensor of the same type and shape as the input tensor. - - Notes - ===== - Signature: ``ai.onnx@14::Trilu``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Trilu( - _Trilu.Attributes( - upper=AttrInt64(upper, name="upper"), - ), - _Trilu.Inputs( - input=unwrap_vars(input), - k=unwrap_vars(k), - ), - ) - .get_output_vars( - input=get_value(input), - k=get_value(k), - ) - .output - ) - - -def unique( - X: Var, - *, - axis: Optional[int] = None, - sorted: int = 1, -) -> tuple[Var, Var, Var, Var]: - r""" - Find the unique elements of a tensor. When an optional attribute 'axis' - is provided, unique subtensors sliced along the 'axis' are returned. - Otherwise the input tensor is flattened and unique values of the - flattened tensor are returned. - - This operator returns the unique values or sliced unique subtensors of - the input tensor and three optional outputs. The first output tensor 'Y' - contains all unique values or subtensors of the input. The second - optional output tensor 'indices' contains indices of 'Y' elements' first - occurrence in 'X'. The third optional output tensor 'inverse_indices' - contains, for elements of 'X', its corresponding indices in 'Y'. The - fourth optional output tensor 'counts' contains the count of each - element of 'Y' in the input. - - Outputs are either sorted in ascending order or optionally in the order - of the first occurrence of the values in the input. - - https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html - - Example 1: - - :: - - input_X = [2, 1, 1, 3, 4, 3] - attribute_sorted = 0 - attribute_axis = None - output_Y = [2, 1, 3, 4] - output_indices = [0, 1, 3, 4] - output_inverse_indices = [0, 1, 1, 2, 3, 2] - output_counts = [1, 2, 2, 1] - - Example 2: - - :: +https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html - input_X = [[1, 3], [2, 3]] - attribute_sorted = 1 - attribute_axis = None - output_Y = [1, 2, 3] - output_indices = [0, 2, 1] - output_inverse_indices = [0, 2, 1, 2] - output_counts = [1, 1, 2] +Example 1: - Example 3: +:: - :: + input_X = [2, 1, 1, 3, 4, 3] + attribute_sorted = 0 + attribute_axis = None + output_Y = [2, 1, 3, 4] + output_indices = [0, 1, 3, 4] + output_inverse_indices = [0, 1, 1, 2, 3, 2] + output_counts = [1, 2, 2, 1] - input_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]] - attribute_sorted = 1 - attribute_axis = 0 - output_Y = [[1, 0, 0], [2, 3, 4]] - output_indices = [0, 2] - output_inverse_indices = [0, 0, 1] - output_counts = [2, 1] +Example 2: - Example 4: +:: + + input_X = [[1, 3], [2, 3]] + attribute_sorted = 1 + attribute_axis = None + output_Y = [1, 2, 3] + output_indices = [0, 2, 1] + output_inverse_indices = [0, 2, 1, 2] + output_counts = [1, 1, 2] + +Example 3: + +:: + + input_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]] + attribute_sorted = 1 + attribute_axis = 0 + output_Y = [[1, 0, 0], [2, 3, 4]] + output_indices = [0, 2] + output_inverse_indices = [0, 0, 1] + output_counts = [2, 1] + +Example 4: + +:: + + input_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]], + [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]] + attribute_sorted = 1 + attribute_axis = 1 + +intermediate data are presented below for better understanding: there +are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)): + +:: + + A: [[1, 1], [1, 1]], + [[0, 1], [0, 1]], + [[2, 1], [2, 1]], + [[0, 1], [0, 1]]. - :: +there are 3 unique subtensors: - input_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]], - [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]] - attribute_sorted = 1 - attribute_axis = 1 +:: - intermediate data are presented below for better understanding: there - are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)): + [[1, 1], [1, 1]], + [[0, 1], [0, 1]], + [[2, 1], [2, 1]]. - :: +sorted unique subtensors: - A: [[1, 1], [1, 1]], - [[0, 1], [0, 1]], - [[2, 1], [2, 1]], - [[0, 1], [0, 1]]. +:: - there are 3 unique subtensors: + B: [[0, 1], [0, 1]], + [[1, 1], [1, 1]], + [[2, 1], [2, 1]]. - :: +output_Y is constructed from B: - [[1, 1], [1, 1]], - [[0, 1], [0, 1]], - [[2, 1], [2, 1]]. +:: - sorted unique subtensors: + [[[0. 1.], [1. 1.], [2. 1.]], + [[0. 1.], [1. 1.], [2. 1.]]] - :: +output_indices is to map from B to A: + +:: + + [1, 0, 2] + +output_inverse_indices is to map from A to B: + +:: + + [1, 0, 2, 0] + +output_counts: + +:: - B: [[0, 1], [0, 1]], - [[1, 1], [1, 1]], - [[2, 1], [2, 1]]. + [2, 1, 1] + +Parameters +========== +X + Type T. + A N-D input tensor that is to be processed. +axis + Attribute. + (Optional) The dimension to apply unique. If not specified, the unique + elements of the flattened input are returned. Negative value means + counting dimensions from the back. Accepted range is [-r, r-1] where r = + rank(input). +sorted + Attribute. + (Optional) Whether to sort the unique elements in ascending order before + returning as output. Must be one of 0, or 1 (default). - output_Y is constructed from B: - - :: - - [[[0. 1.], [1. 1.], [2. 1.]], - [[0. 1.], [1. 1.], [2. 1.]]] - - output_indices is to map from B to A: - - :: - - [1, 0, 2] - - output_inverse_indices is to map from A to B: - - :: - - [1, 0, 2, 0] - - output_counts: - - :: - - [2, 1, 1] - - Parameters - ========== - X - Type T. - A N-D input tensor that is to be processed. - axis - Attribute. - (Optional) The dimension to apply unique. If not specified, the unique - elements of the flattened input are returned. Negative value means - counting dimensions from the back. Accepted range is [-r, r-1] where r = - rank(input). - sorted - Attribute. - (Optional) Whether to sort the unique elements in ascending order before - returning as output. Must be one of 0, or 1 (default). - - Returns - ======= - Y : Var - Type T. - A tensor of the same type as 'X' containing all the unique values or - subtensors sliced along a provided 'axis' in 'X', either sorted or - maintained in the same order they occur in input 'X' - indices : Var - Type tensor(int64). - A 1-D INT64 tensor containing indices of 'Y' elements' first occurrence - in 'X'. When 'axis' is provided, it contains indices to subtensors in - input 'X' on the 'axis'. When 'axis' is not provided, it contains - indices to values in the flattened input tensor. - inverse_indices : Var - Type tensor(int64). - A 1-D INT64 tensor containing, for elements of 'X', its corresponding - indices in 'Y'. When 'axis' is provided, it contains indices to - subtensors in output 'Y' on the 'axis'. When 'axis' is not provided, it - contains indices to values in output 'Y'. - counts : Var - Type tensor(int64). - A 1-D INT64 tensor containing the count of each element of 'Y' in input - 'X' - - Notes - ===== - Signature: ``ai.onnx@11::Unique``. - - Type constraints: - - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Unique( - _Unique.Attributes( - axis=AttrInt64.maybe(axis, name="axis"), - sorted=AttrInt64(sorted, name="sorted"), - ), - _Unique.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - ._unpack_to_any() - ) - - -def unsqueeze( - data: Var, - axes: Var, -) -> Var: - r""" - Insert single-dimensional entries to the shape of an input tensor - (``data``). Takes one required input ``axes`` - which contains a list of - dimension indices and this operator will insert a dimension of value - ``1`` into the corresponding index of the output tensor (``expanded``). - - For example, given an input tensor (``data``) of shape [3, 4, 5], then - Unsqueeze(data, axes=[0, 4]) outputs a tensor (``expanded``) containing - same data as ``data`` but with shape [1, 3, 4, 5, 1]. - - The input ``axes`` should not contain any duplicate entries. It is an - error if it contains duplicates. The rank of the output tensor - (``output_rank``) is the rank of the input tensor (``data``) plus the - number of values in ``axes``. Each value in ``axes`` should be within - the (inclusive) range [-output_rank , output_rank - 1]. The order of - values in ``axes`` does not matter and can come in any order. - - Parameters - ========== - data - Type T. - Original tensor - axes - Type tensor(int64). - List of integers indicating the dimensions to be inserted. Negative - value means counting dimensions from the back. Accepted range is [-r, - r-1] where r = rank(expanded). - - Returns - ======= - expanded : Var - Type T. - Reshaped tensor with same data as input. - - Notes - ===== - Signature: ``ai.onnx@13::Unsqueeze``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Unsqueeze( - _Unsqueeze.Attributes(), - _Unsqueeze.Inputs( - data=unwrap_vars(data), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - axes=get_value(axes), - ) - .expanded - ) - - -def where( - condition: Var, - X: Var, - Y: Var, -) -> Var: - r""" - Return elements, either from X or Y, depending on condition. Where - behaves like - `numpy.where `__ - with three parameters. - - This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - Parameters - ========== - condition - Type B. - When True (nonzero), yield X, otherwise yield Y - X - Type T. - values selected at indices where condition is True - Y - Type T. - values selected at indices where condition is False - - Returns - ======= - output : Var - Type T. - Tensor of shape equal to the broadcasted shape of condition, X, and Y. - - Notes - ===== - Signature: ``ai.onnx@16::Where``. - - Type constraints: - - B: `tensor(bool)` - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - return ( - _Where( - _Where.Attributes(), - _Where.Inputs( - condition=unwrap_vars(condition), - X=unwrap_vars(X), - Y=unwrap_vars(Y), - ), - ) - .get_output_vars( - condition=get_value(condition), - X=get_value(X), - Y=get_value(Y), - ) - .output - ) - - -def xor( - A: Var, - B: Var, -) -> Var: - r""" - Returns the tensor resulted from performing the ``xor`` logical - operation elementwise on the input tensors ``A`` and ``B`` (with - Numpy-style broadcasting support). - - This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - Parameters - ========== - A - Type T. - First input operand for the logical operator. - B - Type T. - Second input operand for the logical operator. - - Returns - ======= - C : Var - Type T1. - Result tensor. - - Notes - ===== - Signature: ``ai.onnx@7::Xor``. - - Type constraints: - - T: `tensor(bool)` - - T1: `tensor(bool)` - """ - return ( - _Xor( - _Xor.Attributes(), - _Xor.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - ) - .C - ) +Returns +======= +Y : Var + Type T. + A tensor of the same type as 'X' containing all the unique values or + subtensors sliced along a provided 'axis' in 'X', either sorted or + maintained in the same order they occur in input 'X' +indices : Var + Type tensor(int64). + A 1-D INT64 tensor containing indices of 'Y' elements' first occurrence + in 'X'. When 'axis' is provided, it contains indices to subtensors in + input 'X' on the 'axis'. When 'axis' is not provided, it contains + indices to values in the flattened input tensor. +inverse_indices : Var + Type tensor(int64). + A 1-D INT64 tensor containing, for elements of 'X', its corresponding + indices in 'Y'. When 'axis' is provided, it contains indices to + subtensors in output 'Y' on the 'axis'. When 'axis' is not provided, it + contains indices to values in output 'Y'. +counts : Var + Type tensor(int64). + A 1-D INT64 tensor containing the count of each element of 'Y' in input + 'X' + +Notes +===== +Signature: ``ai.onnx@11::Unique``. + +Type constraints: + - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Unique( + _Unique.Attributes( + axis=AttrInt64.maybe(axis, name="axis"), + sorted=AttrInt64(sorted, name="sorted"), + ), _Unique.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), )._unpack_to_any() + + +def unsqueeze(data: Var, axes: Var, ) -> Var: + r""" +Insert single-dimensional entries to the shape of an input tensor +(``data``). Takes one required input ``axes`` - which contains a list of +dimension indices and this operator will insert a dimension of value +``1`` into the corresponding index of the output tensor (``expanded``). + +For example, given an input tensor (``data``) of shape [3, 4, 5], then +Unsqueeze(data, axes=[0, 4]) outputs a tensor (``expanded``) containing +same data as ``data`` but with shape [1, 3, 4, 5, 1]. + +The input ``axes`` should not contain any duplicate entries. It is an +error if it contains duplicates. The rank of the output tensor +(``output_rank``) is the rank of the input tensor (``data``) plus the +number of values in ``axes``. Each value in ``axes`` should be within +the (inclusive) range [-output_rank , output_rank - 1]. The order of +values in ``axes`` does not matter and can come in any order. + +Parameters +========== +data + Type T. + Original tensor +axes + Type tensor(int64). + List of integers indicating the dimensions to be inserted. Negative + value means counting dimensions from the back. Accepted range is [-r, + r-1] where r = rank(expanded). + +Returns +======= +expanded : Var + Type T. + Reshaped tensor with same data as input. + +Notes +===== +Signature: ``ai.onnx@13::Unsqueeze``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Unsqueeze( + _Unsqueeze.Attributes( + ), _Unsqueeze.Inputs( + data=unwrap_vars(data), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), axes=get_value(axes), ).expanded + + +def where(condition: Var, X: Var, Y: Var, ) -> Var: + r""" +Return elements, either from X or Y, depending on condition. Where +behaves like +`numpy.where `__ +with three parameters. + +This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +Parameters +========== +condition + Type B. + When True (nonzero), yield X, otherwise yield Y +X + Type T. + values selected at indices where condition is True +Y + Type T. + values selected at indices where condition is False + +Returns +======= +output : Var + Type T. + Tensor of shape equal to the broadcasted shape of condition, X, and Y. + +Notes +===== +Signature: ``ai.onnx@16::Where``. + +Type constraints: + - B: `tensor(bool)` + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _Where( + _Where.Attributes( + ), _Where.Inputs( + condition=unwrap_vars(condition), X=unwrap_vars(X), Y=unwrap_vars(Y), ), ).get_output_vars( + condition=get_value(condition), X=get_value(X), Y=get_value(Y), ).output + + +def xor(A: Var, B: Var, ) -> Var: + r""" +Returns the tensor resulted from performing the ``xor`` logical +operation elementwise on the input tensors ``A`` and ``B`` (with +Numpy-style broadcasting support). + +This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +Parameters +========== +A + Type T. + First input operand for the logical operator. +B + Type T. + Second input operand for the logical operator. + +Returns +======= +C : Var + Type T1. + Result tensor. + +Notes +===== +Signature: ``ai.onnx@7::Xor``. + +Type constraints: + - T: `tensor(bool)` + - T1: `tensor(bool)` + """ + return _Xor( + _Xor.Attributes( + ), _Xor.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), ), ).get_output_vars( + A=get_value(A), B=get_value(B), ).C def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: @@ -17076,4 +14373,4 @@ def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: "Xor": xor, } -__all__ = [fun.__name__ for fun in _CONSTRUCTORS.values()] + ["const"] +__all__ = [fun.__name__ for fun in _CONSTRUCTORS.values()] + ["const"] diff --git a/src/spox/opset/ai/onnx/v18.py b/src/spox/opset/ai/onnx/v18.py index f8de6f3..318c81f 100644 --- a/src/spox/opset/ai/onnx/v18.py +++ b/src/spox/opset/ai/onnx/v18.py @@ -1,348 +1,200 @@ -# Copyright (c) QuantCo 2023-2024 -# SPDX-License-Identifier: BSD-3-Clause - # ruff: noqa: E741 -- Allow ambiguous variable name -from collections.abc import Iterable, Sequence +import typing +import warnings from dataclasses import dataclass +from collections.abc import Iterable, Sequence from typing import ( + Any, + Callable, Optional, + Union, ) +from typing import cast as typing_cast import numpy as np import numpy.typing as npt +from spox._var import Var, VarInfo, result_type, unwrap_vars, get_value +from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._attributes import ( + AttrDtype, AttrFloat32, + AttrFloat32s, + AttrGraph, AttrInt64, AttrInt64s, AttrString, + AttrStrings, + AttrTensor, + AttrType, ) -from spox._fields import BaseAttributes, BaseInputs, BaseOutputs +from spox._graph import Graph, subgraph +from spox._internal_op import intro from spox._node import OpType -from spox._standard import StandardNode -from spox._var import Var, VarInfo, get_value, unwrap_vars -from spox.opset.ai.onnx.v17 import ( - _DFT, - _GRU, - _LRN, - _LSTM, - _RNN, - _STFT, - _Abs, - _Acos, - _Acosh, - _Add, - _And, - _ArgMax, - _ArgMin, - _Asin, - _Asinh, - _Atan, - _Atanh, - _AveragePool, - _BatchNormalization, - _Bernoulli, - _BitShift, - _BlackmanWindow, - _Cast, - _CastLike, - _Ceil, - _Celu, - _Clip, - _Compress, - _Concat, - _ConcatFromSequence, - _Constant, - _ConstantOfShape, - _Conv, - _ConvInteger, - _ConvTranspose, - _Cos, - _Cosh, - _CumSum, - _DepthToSpace, - _DequantizeLinear, - _Det, - _Div, - _Dropout, - _DynamicQuantizeLinear, - _Einsum, - _Elu, - _Equal, - _Erf, - _Exp, - _Expand, - _EyeLike, - _Flatten, - _Floor, - _Gather, - _GatherElements, - _GatherND, - _Gemm, - _GlobalAveragePool, - _GlobalLpPool, - _GlobalMaxPool, - _Greater, - _GreaterOrEqual, - _GridSample, - _HammingWindow, - _HannWindow, - _Hardmax, - _HardSigmoid, - _HardSwish, - _Identity, - _If, - _InstanceNormalization, - _IsInf, - _IsNaN, - _LayerNormalization, - _LeakyRelu, - _Less, - _LessOrEqual, - _Log, - _LogSoftmax, - _Loop, - _LpNormalization, - _MatMul, - _MatMulInteger, - _Max, - _MaxPool, - _MaxRoiPool, - _MaxUnpool, - _Mean, - _MeanVarianceNormalization, - _MelWeightMatrix, - _Min, - _Mod, - _Mul, - _Multinomial, - _Neg, - _NegativeLogLikelihoodLoss, - _NonMaxSuppression, - _NonZero, - _Not, - _OneHot, - _Optional, - _Or, - _Pow, - _PRelu, - _QLinearConv, - _QLinearMatMul, - _QuantizeLinear, - _RandomNormal, - _RandomNormalLike, - _RandomUniform, - _RandomUniformLike, - _Range, - _Reciprocal, - _ReduceSum, - _Relu, - _Reshape, - _ReverseSequence, - _RoiAlign, - _Round, - _Scan, - _Selu, - _SequenceAt, - _SequenceConstruct, - _SequenceEmpty, - _SequenceErase, - _SequenceInsert, - _SequenceLength, - _SequenceMap, - _Shape, - _Shrink, - _Sigmoid, - _Sign, - _Sin, - _Sinh, - _Size, - _Slice, - _Softmax, - _SoftmaxCrossEntropyLoss, - _Softplus, - _Softsign, - _SpaceToDepth, - _SplitToSequence, - _Sqrt, - _Squeeze, - _StringNormalizer, - _Sub, - _Sum, - _Tan, - _Tanh, - _TfIdfVectorizer, - _ThresholdedRelu, - _Tile, - _TopK, - _Transpose, - _Trilu, - _Unique, - _Unsqueeze, - _Where, - _Xor, - abs, - acos, - acosh, - add, - and_, - arg_max, - arg_min, - asin, - asinh, - atan, - atanh, - average_pool, - batch_normalization, - bernoulli, - bit_shift, - blackman_window, - cast, - cast_like, - ceil, - celu, - clip, - compress, - concat, - concat_from_sequence, - constant, - constant_of_shape, - conv, - conv_integer, - conv_transpose, - cos, - cosh, - cumsum, - depth_to_space, - dequantize_linear, - det, - dft, - div, - dropout, - dynamic_quantize_linear, - einsum, - elu, - equal, - erf, - exp, - expand, - eye_like, - flatten, - floor, - gather, - gather_elements, - gather_nd, - gemm, - global_average_pool, - global_lp_pool, - global_max_pool, - greater, - greater_or_equal, - grid_sample, - gru, - hamming_window, - hann_window, - hard_sigmoid, - hard_swish, - hardmax, - identity, - if_, - instance_normalization, - isinf, - isnan, - layer_normalization, - leaky_relu, - less, - less_or_equal, - log, - log_softmax, - loop, - lp_normalization, - lrn, - lstm, - matmul, - matmul_integer, - max, - max_pool, - max_roi_pool, - max_unpool, - mean, - mean_variance_normalization, - mel_weight_matrix, - min, - mod, - mul, - multinomial, - neg, - negative_log_likelihood_loss, - non_max_suppression, - non_zero, - not_, - one_hot, - optional, - or_, - pow, - prelu, - qlinear_conv, - qlinear_matmul, - quantize_linear, - random_normal, - random_normal_like, - random_uniform, - random_uniform_like, - range, - reciprocal, - reduce_sum, - relu, - reshape, - reverse_sequence, - rnn, - roi_align, - round, - scan, - selu, - sequence_at, - sequence_construct, - sequence_empty, - sequence_erase, - sequence_insert, - sequence_length, - sequence_map, - shape, - shrink, - sigmoid, - sign, - sin, - sinh, - size, - slice, - softmax, - softmax_cross_entropy_loss, - softplus, - softsign, - space_to_depth, - split_to_sequence, - sqrt, - squeeze, - stft, - string_normalizer, - sub, - sum, - tan, - tanh, - tf_idf_vectorizer, - thresholded_relu, - tile, - top_k, - transpose, - trilu, - unique, - unsqueeze, - where, - xor, -) - - +from spox._standard import InferenceError, StandardNode +from spox._type_system import Tensor, Type, Sequence as SpoxSequence +from spox._value_prop import PropValueType + + +from spox.opset.ai.onnx.v17 import _Abs, abs +from spox.opset.ai.onnx.v17 import _Acos, acos +from spox.opset.ai.onnx.v17 import _Acosh, acosh +from spox.opset.ai.onnx.v17 import _Add, add +from spox.opset.ai.onnx.v17 import _And, and_ +from spox.opset.ai.onnx.v17 import _ArgMax, arg_max +from spox.opset.ai.onnx.v17 import _ArgMin, arg_min +from spox.opset.ai.onnx.v17 import _Asin, asin +from spox.opset.ai.onnx.v17 import _Asinh, asinh +from spox.opset.ai.onnx.v17 import _Atan, atan +from spox.opset.ai.onnx.v17 import _Atanh, atanh +from spox.opset.ai.onnx.v17 import _AveragePool, average_pool +from spox.opset.ai.onnx.v17 import _BatchNormalization, batch_normalization +from spox.opset.ai.onnx.v17 import _Bernoulli, bernoulli +from spox.opset.ai.onnx.v17 import _BitShift, bit_shift +from spox.opset.ai.onnx.v17 import _BlackmanWindow, blackman_window +from spox.opset.ai.onnx.v17 import _Cast, cast +from spox.opset.ai.onnx.v17 import _CastLike, cast_like +from spox.opset.ai.onnx.v17 import _Ceil, ceil +from spox.opset.ai.onnx.v17 import _Celu, celu +from spox.opset.ai.onnx.v17 import _Clip, clip +from spox.opset.ai.onnx.v17 import _Compress, compress +from spox.opset.ai.onnx.v17 import _Concat, concat +from spox.opset.ai.onnx.v17 import _ConcatFromSequence, concat_from_sequence +from spox.opset.ai.onnx.v17 import _Constant, constant +from spox.opset.ai.onnx.v17 import _ConstantOfShape, constant_of_shape +from spox.opset.ai.onnx.v17 import _Conv, conv +from spox.opset.ai.onnx.v17 import _ConvInteger, conv_integer +from spox.opset.ai.onnx.v17 import _ConvTranspose, conv_transpose +from spox.opset.ai.onnx.v17 import _Cos, cos +from spox.opset.ai.onnx.v17 import _Cosh, cosh +from spox.opset.ai.onnx.v17 import _CumSum, cumsum +from spox.opset.ai.onnx.v17 import _DFT, dft +from spox.opset.ai.onnx.v17 import _DepthToSpace, depth_to_space +from spox.opset.ai.onnx.v17 import _DequantizeLinear, dequantize_linear +from spox.opset.ai.onnx.v17 import _Det, det +from spox.opset.ai.onnx.v17 import _Div, div +from spox.opset.ai.onnx.v17 import _Dropout, dropout +from spox.opset.ai.onnx.v17 import _DynamicQuantizeLinear, dynamic_quantize_linear +from spox.opset.ai.onnx.v17 import _Einsum, einsum +from spox.opset.ai.onnx.v17 import _Elu, elu +from spox.opset.ai.onnx.v17 import _Equal, equal +from spox.opset.ai.onnx.v17 import _Erf, erf +from spox.opset.ai.onnx.v17 import _Exp, exp +from spox.opset.ai.onnx.v17 import _Expand, expand +from spox.opset.ai.onnx.v17 import _EyeLike, eye_like +from spox.opset.ai.onnx.v17 import _Flatten, flatten +from spox.opset.ai.onnx.v17 import _Floor, floor +from spox.opset.ai.onnx.v17 import _GRU, gru +from spox.opset.ai.onnx.v17 import _Gather, gather +from spox.opset.ai.onnx.v17 import _GatherElements, gather_elements +from spox.opset.ai.onnx.v17 import _GatherND, gather_nd +from spox.opset.ai.onnx.v17 import _Gemm, gemm +from spox.opset.ai.onnx.v17 import _GlobalAveragePool, global_average_pool +from spox.opset.ai.onnx.v17 import _GlobalLpPool, global_lp_pool +from spox.opset.ai.onnx.v17 import _GlobalMaxPool, global_max_pool +from spox.opset.ai.onnx.v17 import _Greater, greater +from spox.opset.ai.onnx.v17 import _GreaterOrEqual, greater_or_equal +from spox.opset.ai.onnx.v17 import _GridSample, grid_sample +from spox.opset.ai.onnx.v17 import _HammingWindow, hamming_window +from spox.opset.ai.onnx.v17 import _HannWindow, hann_window +from spox.opset.ai.onnx.v17 import _HardSigmoid, hard_sigmoid +from spox.opset.ai.onnx.v17 import _HardSwish, hard_swish +from spox.opset.ai.onnx.v17 import _Hardmax, hardmax +from spox.opset.ai.onnx.v17 import _Identity, identity +from spox.opset.ai.onnx.v17 import _If, if_ +from spox.opset.ai.onnx.v17 import _InstanceNormalization, instance_normalization +from spox.opset.ai.onnx.v17 import _IsInf, isinf +from spox.opset.ai.onnx.v17 import _IsNaN, isnan +from spox.opset.ai.onnx.v17 import _LRN, lrn +from spox.opset.ai.onnx.v17 import _LSTM, lstm +from spox.opset.ai.onnx.v17 import _LayerNormalization, layer_normalization +from spox.opset.ai.onnx.v17 import _LeakyRelu, leaky_relu +from spox.opset.ai.onnx.v17 import _Less, less +from spox.opset.ai.onnx.v17 import _LessOrEqual, less_or_equal +from spox.opset.ai.onnx.v17 import _Log, log +from spox.opset.ai.onnx.v17 import _LogSoftmax, log_softmax +from spox.opset.ai.onnx.v17 import _Loop, loop +from spox.opset.ai.onnx.v17 import _LpNormalization, lp_normalization +from spox.opset.ai.onnx.v17 import _MatMul, matmul +from spox.opset.ai.onnx.v17 import _MatMulInteger, matmul_integer +from spox.opset.ai.onnx.v17 import _Max, max +from spox.opset.ai.onnx.v17 import _MaxPool, max_pool +from spox.opset.ai.onnx.v17 import _MaxRoiPool, max_roi_pool +from spox.opset.ai.onnx.v17 import _MaxUnpool, max_unpool +from spox.opset.ai.onnx.v17 import _Mean, mean +from spox.opset.ai.onnx.v17 import _MeanVarianceNormalization, mean_variance_normalization +from spox.opset.ai.onnx.v17 import _MelWeightMatrix, mel_weight_matrix +from spox.opset.ai.onnx.v17 import _Min, min +from spox.opset.ai.onnx.v17 import _Mod, mod +from spox.opset.ai.onnx.v17 import _Mul, mul +from spox.opset.ai.onnx.v17 import _Multinomial, multinomial +from spox.opset.ai.onnx.v17 import _Neg, neg +from spox.opset.ai.onnx.v17 import _NegativeLogLikelihoodLoss, negative_log_likelihood_loss +from spox.opset.ai.onnx.v17 import _NonMaxSuppression, non_max_suppression +from spox.opset.ai.onnx.v17 import _NonZero, non_zero +from spox.opset.ai.onnx.v17 import _Not, not_ +from spox.opset.ai.onnx.v17 import _OneHot, one_hot +from spox.opset.ai.onnx.v17 import _Optional, optional +from spox.opset.ai.onnx.v17 import _Or, or_ +from spox.opset.ai.onnx.v17 import _PRelu, prelu +from spox.opset.ai.onnx.v17 import _Pow, pow +from spox.opset.ai.onnx.v17 import _QLinearConv, qlinear_conv +from spox.opset.ai.onnx.v17 import _QLinearMatMul, qlinear_matmul +from spox.opset.ai.onnx.v17 import _QuantizeLinear, quantize_linear +from spox.opset.ai.onnx.v17 import _RNN, rnn +from spox.opset.ai.onnx.v17 import _RandomNormal, random_normal +from spox.opset.ai.onnx.v17 import _RandomNormalLike, random_normal_like +from spox.opset.ai.onnx.v17 import _RandomUniform, random_uniform +from spox.opset.ai.onnx.v17 import _RandomUniformLike, random_uniform_like +from spox.opset.ai.onnx.v17 import _Range, range +from spox.opset.ai.onnx.v17 import _Reciprocal, reciprocal +from spox.opset.ai.onnx.v17 import _ReduceSum, reduce_sum +from spox.opset.ai.onnx.v17 import _Relu, relu +from spox.opset.ai.onnx.v17 import _Reshape, reshape +from spox.opset.ai.onnx.v17 import _ReverseSequence, reverse_sequence +from spox.opset.ai.onnx.v17 import _RoiAlign, roi_align +from spox.opset.ai.onnx.v17 import _Round, round +from spox.opset.ai.onnx.v17 import _STFT, stft +from spox.opset.ai.onnx.v17 import _Scan, scan +from spox.opset.ai.onnx.v17 import _Selu, selu +from spox.opset.ai.onnx.v17 import _SequenceAt, sequence_at +from spox.opset.ai.onnx.v17 import _SequenceConstruct, sequence_construct +from spox.opset.ai.onnx.v17 import _SequenceEmpty, sequence_empty +from spox.opset.ai.onnx.v17 import _SequenceErase, sequence_erase +from spox.opset.ai.onnx.v17 import _SequenceInsert, sequence_insert +from spox.opset.ai.onnx.v17 import _SequenceLength, sequence_length +from spox.opset.ai.onnx.v17 import _SequenceMap, sequence_map +from spox.opset.ai.onnx.v17 import _Shape, shape +from spox.opset.ai.onnx.v17 import _Shrink, shrink +from spox.opset.ai.onnx.v17 import _Sigmoid, sigmoid +from spox.opset.ai.onnx.v17 import _Sign, sign +from spox.opset.ai.onnx.v17 import _Sin, sin +from spox.opset.ai.onnx.v17 import _Sinh, sinh +from spox.opset.ai.onnx.v17 import _Size, size +from spox.opset.ai.onnx.v17 import _Slice, slice +from spox.opset.ai.onnx.v17 import _Softmax, softmax +from spox.opset.ai.onnx.v17 import _SoftmaxCrossEntropyLoss, softmax_cross_entropy_loss +from spox.opset.ai.onnx.v17 import _Softplus, softplus +from spox.opset.ai.onnx.v17 import _Softsign, softsign +from spox.opset.ai.onnx.v17 import _SpaceToDepth, space_to_depth +from spox.opset.ai.onnx.v17 import _SplitToSequence, split_to_sequence +from spox.opset.ai.onnx.v17 import _Sqrt, sqrt +from spox.opset.ai.onnx.v17 import _Squeeze, squeeze +from spox.opset.ai.onnx.v17 import _StringNormalizer, string_normalizer +from spox.opset.ai.onnx.v17 import _Sub, sub +from spox.opset.ai.onnx.v17 import _Sum, sum +from spox.opset.ai.onnx.v17 import _Tan, tan +from spox.opset.ai.onnx.v17 import _Tanh, tanh +from spox.opset.ai.onnx.v17 import _TfIdfVectorizer, tf_idf_vectorizer +from spox.opset.ai.onnx.v17 import _ThresholdedRelu, thresholded_relu +from spox.opset.ai.onnx.v17 import _Tile, tile +from spox.opset.ai.onnx.v17 import _TopK, top_k +from spox.opset.ai.onnx.v17 import _Transpose, transpose +from spox.opset.ai.onnx.v17 import _Trilu, trilu +from spox.opset.ai.onnx.v17 import _Unique, unique +from spox.opset.ai.onnx.v17 import _Unsqueeze, unsqueeze +from spox.opset.ai.onnx.v17 import _Where, where +from spox.opset.ai.onnx.v17 import _Xor, xor class _BitwiseAnd(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -363,7 +215,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _BitwiseNot(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -383,7 +234,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _BitwiseOr(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -404,7 +254,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _BitwiseXor(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -425,7 +274,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _CenterCropPad(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -446,7 +294,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Col2Im(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -470,7 +317,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _GroupNormalization(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -493,7 +339,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _LpPool(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -519,7 +364,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Mish(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -539,7 +383,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _OptionalGetElement(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -559,7 +402,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _OptionalHasElement(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -579,7 +421,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Pad(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -602,7 +443,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceL1(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -624,7 +464,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceL2(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -646,7 +485,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceLogSum(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -668,7 +506,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceLogSumExp(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -690,7 +527,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceMax(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -712,7 +548,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceMean(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -734,7 +569,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceMin(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -756,7 +590,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceProd(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -778,7 +611,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceSumSquare(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -800,7 +632,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Resize(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -831,7 +662,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ScatterElements(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -854,7 +684,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ScatterND(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -876,7 +705,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Split(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -898,2102 +726,1684 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - -def bitwise_and( - A: Var, - B: Var, -) -> Var: +def bitwise_and(A: Var, B: Var, ) -> Var: r""" - Returns the tensor resulting from performing the bitwise ``and`` - operation elementwise on the input tensors ``A`` and ``B`` (with - Numpy-style broadcasting support). - - This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - Parameters - ========== - A - Type T. - First input operand for the bitwise operator. - B - Type T. - Second input operand for the bitwise operator. - - Returns - ======= - C : Var - Type T. - Result tensor. - - Notes - ===== - Signature: ``ai.onnx@18::BitwiseAnd``. - - Type constraints: - - T: `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +Returns the tensor resulting from performing the bitwise ``and`` +operation elementwise on the input tensors ``A`` and ``B`` (with +Numpy-style broadcasting support). + +This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +Parameters +========== +A + Type T. + First input operand for the bitwise operator. +B + Type T. + Second input operand for the bitwise operator. + +Returns +======= +C : Var + Type T. + Result tensor. + +Notes +===== +Signature: ``ai.onnx@18::BitwiseAnd``. + +Type constraints: + - T: `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _BitwiseAnd( - _BitwiseAnd.Attributes(), - _BitwiseAnd.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - ) - .C - ) - - -def bitwise_not( - X: Var, -) -> Var: + return _BitwiseAnd( + _BitwiseAnd.Attributes( + ), _BitwiseAnd.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), ), ).get_output_vars( + A=get_value(A), B=get_value(B), ).C + + +def bitwise_not(X: Var, ) -> Var: r""" - Returns the bitwise not of the input tensor element-wise. - - Parameters - ========== - X - Type T. - Input tensor - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@18::BitwiseNot``. - - Type constraints: - - T: `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +Returns the bitwise not of the input tensor element-wise. + +Parameters +========== +X + Type T. + Input tensor + +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@18::BitwiseNot``. + +Type constraints: + - T: `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _BitwiseNot( - _BitwiseNot.Attributes(), - _BitwiseNot.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def bitwise_or( - A: Var, - B: Var, -) -> Var: + return _BitwiseNot( + _BitwiseNot.Attributes( + ), _BitwiseNot.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def bitwise_or(A: Var, B: Var, ) -> Var: r""" - Returns the tensor resulting from performing the bitwise ``or`` - operation elementwise on the input tensors ``A`` and ``B`` (with - Numpy-style broadcasting support). - - This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - Parameters - ========== - A - Type T. - First input operand for the bitwise operator. - B - Type T. - Second input operand for the bitwise operator. - - Returns - ======= - C : Var - Type T. - Result tensor. - - Notes - ===== - Signature: ``ai.onnx@18::BitwiseOr``. - - Type constraints: - - T: `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +Returns the tensor resulting from performing the bitwise ``or`` +operation elementwise on the input tensors ``A`` and ``B`` (with +Numpy-style broadcasting support). + +This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +Parameters +========== +A + Type T. + First input operand for the bitwise operator. +B + Type T. + Second input operand for the bitwise operator. + +Returns +======= +C : Var + Type T. + Result tensor. + +Notes +===== +Signature: ``ai.onnx@18::BitwiseOr``. + +Type constraints: + - T: `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _BitwiseOr( - _BitwiseOr.Attributes(), - _BitwiseOr.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - ) - .C - ) - - -def bitwise_xor( - A: Var, - B: Var, -) -> Var: + return _BitwiseOr( + _BitwiseOr.Attributes( + ), _BitwiseOr.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), ), ).get_output_vars( + A=get_value(A), B=get_value(B), ).C + + +def bitwise_xor(A: Var, B: Var, ) -> Var: r""" - Returns the tensor resulting from performing the bitwise ``xor`` - operation elementwise on the input tensors ``A`` and ``B`` (with - Numpy-style broadcasting support). - - This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - Parameters - ========== - A - Type T. - First input operand for the bitwise operator. - B - Type T. - Second input operand for the bitwise operator. - - Returns - ======= - C : Var - Type T. - Result tensor. - - Notes - ===== - Signature: ``ai.onnx@18::BitwiseXor``. - - Type constraints: - - T: `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +Returns the tensor resulting from performing the bitwise ``xor`` +operation elementwise on the input tensors ``A`` and ``B`` (with +Numpy-style broadcasting support). + +This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +Parameters +========== +A + Type T. + First input operand for the bitwise operator. +B + Type T. + Second input operand for the bitwise operator. + +Returns +======= +C : Var + Type T. + Result tensor. + +Notes +===== +Signature: ``ai.onnx@18::BitwiseXor``. + +Type constraints: + - T: `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _BitwiseXor( - _BitwiseXor.Attributes(), - _BitwiseXor.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - ) - .C - ) - - -def center_crop_pad( - input_data: Var, - shape: Var, - *, - axes: Optional[Iterable[int]] = None, -) -> Var: + return _BitwiseXor( + _BitwiseXor.Attributes( + ), _BitwiseXor.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), ), ).get_output_vars( + A=get_value(A), B=get_value(B), ).C + + +def center_crop_pad(input_data: Var, shape: Var, *, axes: Optional[Iterable[int]] = None, ) -> Var: r""" - Center crop or pad an input to given dimensions. - - The crop/pad dimensions can be specified for a subset of the ``axes``. - Non-specified dimensions will not be cropped or padded. - - If the input dimensions are bigger than the crop shape, a centered - cropping window is extracted from the input. If the input dimensions are - smaller than the crop shape, the input is padded on each side equally, - so that the input is centered in the output. - - Parameters - ========== - input_data - Type T. - Input to extract the centered crop from. - shape - Type Tind. - 1-D tensor representing the cropping window dimensions. - axes - Attribute. - If provided, it specifies a subset of axes that 'shape' refer to. If not - provided, all axes are assumed [0, 1, ..., r-1], where r = rank(data). - Negative value means counting dimensions from the back. Accepted range - is [-r, r-1], where r = rank(data). Behavior is undefined if an axis is - repeated. - - Returns - ======= - output_data : Var - Type T. - Output data. - - Notes - ===== - Signature: ``ai.onnx@18::CenterCropPad``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - Tind: `tensor(int32)`, `tensor(int64)` +Center crop or pad an input to given dimensions. + +The crop/pad dimensions can be specified for a subset of the ``axes``. +Non-specified dimensions will not be cropped or padded. + +If the input dimensions are bigger than the crop shape, a centered +cropping window is extracted from the input. If the input dimensions are +smaller than the crop shape, the input is padded on each side equally, +so that the input is centered in the output. + +Parameters +========== +input_data + Type T. + Input to extract the centered crop from. +shape + Type Tind. + 1-D tensor representing the cropping window dimensions. +axes + Attribute. + If provided, it specifies a subset of axes that 'shape' refer to. If not + provided, all axes are assumed [0, 1, ..., r-1], where r = rank(data). + Negative value means counting dimensions from the back. Accepted range + is [-r, r-1], where r = rank(data). Behavior is undefined if an axis is + repeated. + +Returns +======= +output_data : Var + Type T. + Output data. + +Notes +===== +Signature: ``ai.onnx@18::CenterCropPad``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - Tind: `tensor(int32)`, `tensor(int64)` """ - return ( - _CenterCropPad( - _CenterCropPad.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - ), - _CenterCropPad.Inputs( - input_data=unwrap_vars(input_data), - shape=unwrap_vars(shape), - ), - ) - .get_output_vars( - input_data=get_value(input_data), - shape=get_value(shape), - ) - .output_data - ) - - -def col2_im( - input: Var, - image_shape: Var, - block_shape: Var, - *, - dilations: Optional[Iterable[int]] = None, - pads: Optional[Iterable[int]] = None, - strides: Optional[Iterable[int]] = None, -) -> Var: + return _CenterCropPad( + _CenterCropPad.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + ), _CenterCropPad.Inputs( + input_data=unwrap_vars(input_data), shape=unwrap_vars(shape), ), ).get_output_vars( + input_data=get_value(input_data), shape=get_value(shape), ).output_data + + +def col2_im(input: Var, image_shape: Var, block_shape: Var, *, dilations: Optional[Iterable[int]] = None, pads: Optional[Iterable[int]] = None, strides: Optional[Iterable[int]] = None, ) -> Var: r""" - The operator rearranges column blocks back into a multidimensional image - - Col2Im behaves similarly to PyTorch's fold - https://pytorch.org/docs/stable/generated/torch.nn.Fold.html, but it - only supports *batched* multi-dimensional image tensors. Another - implementation in Python with N-dimension support can be found at - https://github.com/f-dangel/unfoldNd/. - - NOTE: Although specifying image_shape looks redundant because it could - be calculated from convolution formulas, it is required as input for - more advanced scenarios as explained at PyTorch's implementation - (https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/Col2Im.cpp#L10) - - Parameters - ========== - input - Type T. - Input data tensor to be rearranged from column blocks back into an - image. This is a 3-dimensional tensor containing [N, C \* - n-ary-product(block_shape), L], where N is batch dimension, C is image - channel dimension and L is number of blocks.The blocks are enumerated in - increasing lexicographic-order of their indices.For example, with an - image-size 10\ *20 and block-size 9*\ 18, there would be 2*3 blocks, - enumerated in the order block(0, 0), block(0, 1), block(0, 2), block(1, - 0), block(1, 1), block(1, 2). - image_shape - Type tensor(int64). - The shape of the spatial dimensions of the image after rearranging the - column blocks.This is a 1-dimensional tensor with size of at least 2, - containing the value [H_img, W_img] for a 2-D image or [dim_i1, dim_i2, - ..., dim_iN] for a N-D image. - block_shape - Type tensor(int64). - The shape of the block to apply on the input.This is a 1-dimensional - tensor of size of at least 2, containing the value [H_block, W_block] - for a 2-D image or [dim_b1, dim_b2, ..., dim_bN] for a N-D block.This is - the block-shape before dilation is applied to it. - dilations - Attribute. - 1-dimensional tensor with dilation value along each spatial axis of the - image. If not present, the dilation defaults to 1 along each spatial - axis of the image. - pads - Attribute. - 1-dimensional tensor with padding value for the beginning and ending - along each spatial axis, it can take any value greater than or equal to - 0. The value represent the number of pixels added to the beginning and - end part of the corresponding axis. ``pads`` format should be as follow - [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin is the number - of pixels added at the beginning of axis ``i`` and xi_end is the number - of pixels added at the end of axis ``i``. If not present, the padding - defaults to 0 along start and end of each spatial axis. - strides - Attribute. - 1-dimensional tensor with stride value along each spatial axis. If not - present, the stride defaults to 1 along each spatial axis. - - Returns - ======= - output : Var - Type T. - Output tensor produced by rearranging blocks into an image. - - Notes - ===== - Signature: ``ai.onnx@18::Col2Im``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +The operator rearranges column blocks back into a multidimensional image + +Col2Im behaves similarly to PyTorch's fold +https://pytorch.org/docs/stable/generated/torch.nn.Fold.html, but it +only supports *batched* multi-dimensional image tensors. Another +implementation in Python with N-dimension support can be found at +https://github.com/f-dangel/unfoldNd/. + +NOTE: Although specifying image_shape looks redundant because it could +be calculated from convolution formulas, it is required as input for +more advanced scenarios as explained at PyTorch's implementation +(https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/Col2Im.cpp#L10) + +Parameters +========== +input + Type T. + Input data tensor to be rearranged from column blocks back into an + image. This is a 3-dimensional tensor containing [N, C \* + n-ary-product(block_shape), L], where N is batch dimension, C is image + channel dimension and L is number of blocks.The blocks are enumerated in + increasing lexicographic-order of their indices.For example, with an + image-size 10\ *20 and block-size 9*\ 18, there would be 2*3 blocks, + enumerated in the order block(0, 0), block(0, 1), block(0, 2), block(1, + 0), block(1, 1), block(1, 2). +image_shape + Type tensor(int64). + The shape of the spatial dimensions of the image after rearranging the + column blocks.This is a 1-dimensional tensor with size of at least 2, + containing the value [H_img, W_img] for a 2-D image or [dim_i1, dim_i2, + ..., dim_iN] for a N-D image. +block_shape + Type tensor(int64). + The shape of the block to apply on the input.This is a 1-dimensional + tensor of size of at least 2, containing the value [H_block, W_block] + for a 2-D image or [dim_b1, dim_b2, ..., dim_bN] for a N-D block.This is + the block-shape before dilation is applied to it. +dilations + Attribute. + 1-dimensional tensor with dilation value along each spatial axis of the + image. If not present, the dilation defaults to 1 along each spatial + axis of the image. +pads + Attribute. + 1-dimensional tensor with padding value for the beginning and ending + along each spatial axis, it can take any value greater than or equal to + 0. The value represent the number of pixels added to the beginning and + end part of the corresponding axis. ``pads`` format should be as follow + [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin is the number + of pixels added at the beginning of axis ``i`` and xi_end is the number + of pixels added at the end of axis ``i``. If not present, the padding + defaults to 0 along start and end of each spatial axis. +strides + Attribute. + 1-dimensional tensor with stride value along each spatial axis. If not + present, the stride defaults to 1 along each spatial axis. + +Returns +======= +output : Var + Type T. + Output tensor produced by rearranging blocks into an image. + +Notes +===== +Signature: ``ai.onnx@18::Col2Im``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _Col2Im( - _Col2Im.Attributes( - dilations=AttrInt64s.maybe(dilations, name="dilations"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _Col2Im.Inputs( - input=unwrap_vars(input), - image_shape=unwrap_vars(image_shape), - block_shape=unwrap_vars(block_shape), - ), - ) - .get_output_vars( - input=get_value(input), - image_shape=get_value(image_shape), - block_shape=get_value(block_shape), - ) - .output - ) - - -def group_normalization( - X: Var, - scale: Var, - bias: Var, - *, - epsilon: float = 9.999999747378752e-06, - num_groups: int, -) -> Var: + return _Col2Im( + _Col2Im.Attributes( + dilations=AttrInt64s.maybe(dilations, name="dilations"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), _Col2Im.Inputs( + input=unwrap_vars(input), image_shape=unwrap_vars(image_shape), block_shape=unwrap_vars(block_shape), ), ).get_output_vars( + input=get_value(input), image_shape=get_value(image_shape), block_shape=get_value(block_shape), ).output + + +def group_normalization(X: Var, scale: Var, bias: Var, *, epsilon: float = 9.999999747378752e-06, num_groups: int, ) -> Var: r""" - A GroupNormalization function. Carries out group normalization as - described in the paper https://arxiv.org/abs/1803.08494 - - This operator transforms input according to - - :: - - y = scale * (x - mean) / sqrt(variance + epsilon) + bias, - - where the mean and variance are computed per instance per group of - channels, and ``scale`` and ``bias`` should be specified for each group - of channels. The number of groups ``num_groups`` should be divisible by - the number of channels so that there are an equal number of channels per - group. - - When the number of groups is the same as the number of channels, this - operator is equivalent to InstanceNormalization. When there is only one - group, this operator is equivalent to LayerNormalization. - - Parameters - ========== - X - Type T. - Input data tensor. Dimensions for image cases are ``(N x C x H x W)``, - where ``N`` is the batch size, ``C`` is the number of channels, and - ``H`` and ``W`` are the height and width of the data. Statistics are - computed for every group of channels over ``C``, ``H``, and ``W``. For - non-image cases, the dimensions are in the form of - ``(N x C x D1 x D2 ... Dn)``. - scale - Type T. - Scale tensor of shape ``(num_groups)``. - bias - Type T. - Bias tensor of shape ``(num_groups)``. - epsilon - Attribute. - The epsilon value to use to avoid division by zero. - num_groups - Attribute. - The number of groups of channels. It should be a divisor of the number - of channels ``C``. - - Returns - ======= - Y : Var - Type T. - The output tensor of the same shape as ``X``. - - Notes - ===== - Signature: ``ai.onnx@18::GroupNormalization``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` +A GroupNormalization function. Carries out group normalization as +described in the paper https://arxiv.org/abs/1803.08494 + +This operator transforms input according to + +:: + + y = scale * (x - mean) / sqrt(variance + epsilon) + bias, + +where the mean and variance are computed per instance per group of +channels, and ``scale`` and ``bias`` should be specified for each group +of channels. The number of groups ``num_groups`` should be divisible by +the number of channels so that there are an equal number of channels per +group. + +When the number of groups is the same as the number of channels, this +operator is equivalent to InstanceNormalization. When there is only one +group, this operator is equivalent to LayerNormalization. + +Parameters +========== +X + Type T. + Input data tensor. Dimensions for image cases are ``(N x C x H x W)``, + where ``N`` is the batch size, ``C`` is the number of channels, and + ``H`` and ``W`` are the height and width of the data. Statistics are + computed for every group of channels over ``C``, ``H``, and ``W``. For + non-image cases, the dimensions are in the form of + ``(N x C x D1 x D2 ... Dn)``. +scale + Type T. + Scale tensor of shape ``(num_groups)``. +bias + Type T. + Bias tensor of shape ``(num_groups)``. +epsilon + Attribute. + The epsilon value to use to avoid division by zero. +num_groups + Attribute. + The number of groups of channels. It should be a divisor of the number + of channels ``C``. + +Returns +======= +Y : Var + Type T. + The output tensor of the same shape as ``X``. + +Notes +===== +Signature: ``ai.onnx@18::GroupNormalization``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return ( - _GroupNormalization( - _GroupNormalization.Attributes( - epsilon=AttrFloat32(epsilon, name="epsilon"), - num_groups=AttrInt64(num_groups, name="num_groups"), - ), - _GroupNormalization.Inputs( - X=unwrap_vars(X), - scale=unwrap_vars(scale), - bias=unwrap_vars(bias), - ), - ) - .get_output_vars( - X=get_value(X), - scale=get_value(scale), - bias=get_value(bias), - ) - .Y - ) - - -def lp_pool( - X: Var, - *, - auto_pad: str = "NOTSET", - ceil_mode: int = 0, - dilations: Optional[Iterable[int]] = None, - kernel_shape: Iterable[int], - p: int = 2, - pads: Optional[Iterable[int]] = None, - strides: Optional[Iterable[int]] = None, -) -> Var: + return _GroupNormalization( + _GroupNormalization.Attributes( + epsilon=AttrFloat32(epsilon, name="epsilon"), + num_groups=AttrInt64(num_groups, name="num_groups"), + ), _GroupNormalization.Inputs( + X=unwrap_vars(X), scale=unwrap_vars(scale), bias=unwrap_vars(bias), ), ).get_output_vars( + X=get_value(X), scale=get_value(scale), bias=get_value(bias), ).Y + + +def lp_pool(X: Var, *, auto_pad: str = "NOTSET", ceil_mode: int = 0, dilations: Optional[Iterable[int]] = None, kernel_shape: Iterable[int], p: int = 2, pads: Optional[Iterable[int]] = None, strides: Optional[Iterable[int]] = None, ) -> Var: r""" - LpPool consumes an input tensor X and applies Lp pooling across the - tensor according to kernel sizes, stride sizes, and pad lengths. Lp - pooling consisting of computing the Lp norm on all values of a subset of - the input tensor according to the kernel size and downsampling the data - into the output tensor Y for further processing. The output spatial - shape will be following: - - :: - - output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - {kernelSpatialShape}) / strides_spatial_shape[i] + 1) - - or - - :: - - output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - {kernelSpatialShape}) / strides_spatial_shape[i] + 1) - - if ceil_mode is enabled ``pad_shape[i]`` is the sum of pads along axis - ``i``. - - ``auto_pad`` is a DEPRECATED attribute. If you are using them currently, - the output spatial shape will be following: - - :: - - VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - {kernelSpatialShape} + 1) / strides_spatial_shape[i]) - SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) - - And pad shape will be following if ``SAME_UPPER`` or ``SAME_LOWER``: - - :: - - pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + {kernelSpatialShape} - input_spatial_shape[i] - - Parameters - ========== - X - Type T. - Input data tensor from the previous operator; dimensions for image case - are (N x C x H x W), where N is the batch size, C is the number of - channels, and H and W are the height and the width of the data. For non - image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), - where N is the batch size. - auto_pad - Attribute. - auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where - default value is NOTSET, which means explicit padding is used. - SAME_UPPER or SAME_LOWER mean pad the input so that - ``output_shape[i] = ceil(input_shape[i] / strides[i])`` for each axis - ``i``. The padding is split between the two sides equally or almost - equally (depending on whether it is even or odd). In case the padding is - an odd number, the extra padding is added at the end for SAME_UPPER and - at the beginning for SAME_LOWER. - ceil_mode - Attribute. - Whether to use ceil or floor (default) to compute the output shape. - dilations - Attribute. - dilation value along each spatial axis of the filter. If not present, - the dilation defaults is 1 along each spatial axis. - kernel_shape - Attribute. - The size of the kernel along each axis. - p - Attribute. - p value of the Lp norm used to pool over the input data. - pads - Attribute. - Padding for the beginning and ending along each spatial axis, it can - take any value greater than or equal to 0. The value represent the - number of pixels added to the beginning and end part of the - corresponding axis. ``pads`` format should be as follow [x1_begin, - x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels - added at the beginning of axis ``i`` and xi_end, the number of pixels - added at the end of axis ``i``. This attribute cannot be used - simultaneously with auto_pad attribute. If not present, the padding - defaults to 0 along start and end of each spatial axis. - strides - Attribute. - Stride along each spatial axis. If not present, the stride defaults to 1 - along each spatial axis. - - Returns - ======= - Y : Var - Type T. - Output data tensor from Lp pooling across the input tensor. Dimensions - will vary based on various kernel, stride, and pad sizes. - - Notes - ===== - Signature: ``ai.onnx@18::LpPool``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` +LpPool consumes an input tensor X and applies Lp pooling across the +tensor according to kernel sizes, stride sizes, and pad lengths. Lp +pooling consisting of computing the Lp norm on all values of a subset of +the input tensor according to the kernel size and downsampling the data +into the output tensor Y for further processing. The output spatial +shape will be following: + +:: + + output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - {kernelSpatialShape}) / strides_spatial_shape[i] + 1) + +or + +:: + + output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - {kernelSpatialShape}) / strides_spatial_shape[i] + 1) + +if ceil_mode is enabled ``pad_shape[i]`` is the sum of pads along axis +``i``. + +``auto_pad`` is a DEPRECATED attribute. If you are using them currently, +the output spatial shape will be following: + +:: + + VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - {kernelSpatialShape} + 1) / strides_spatial_shape[i]) + SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) + +And pad shape will be following if ``SAME_UPPER`` or ``SAME_LOWER``: + +:: + + pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + {kernelSpatialShape} - input_spatial_shape[i] + +Parameters +========== +X + Type T. + Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. +auto_pad + Attribute. + auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where + default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that + ``output_shape[i] = ceil(input_shape[i] / strides[i])`` for each axis + ``i``. The padding is split between the two sides equally or almost + equally (depending on whether it is even or odd). In case the padding is + an odd number, the extra padding is added at the end for SAME_UPPER and + at the beginning for SAME_LOWER. +ceil_mode + Attribute. + Whether to use ceil or floor (default) to compute the output shape. +dilations + Attribute. + dilation value along each spatial axis of the filter. If not present, + the dilation defaults is 1 along each spatial axis. +kernel_shape + Attribute. + The size of the kernel along each axis. +p + Attribute. + p value of the Lp norm used to pool over the input data. +pads + Attribute. + Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. ``pads`` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis ``i`` and xi_end, the number of pixels + added at the end of axis ``i``. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. +strides + Attribute. + Stride along each spatial axis. If not present, the stride defaults to 1 + along each spatial axis. + +Returns +======= +Y : Var + Type T. + Output data tensor from Lp pooling across the input tensor. Dimensions + will vary based on various kernel, stride, and pad sizes. + +Notes +===== +Signature: ``ai.onnx@18::LpPool``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return ( - _LpPool( - _LpPool.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), - p=AttrInt64(p, name="p"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _LpPool.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def mish( - X: Var, -) -> Var: + return _LpPool( + _LpPool.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), + p=AttrInt64(p, name="p"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), _LpPool.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def mish(X: Var, ) -> Var: r""" - Mish: A Self Regularized Non-Monotonic Neural Activation Function. +Mish: A Self Regularized Non-Monotonic Neural Activation Function. - Perform the linear unit element-wise on the input tensor X using - formula: +Perform the linear unit element-wise on the input tensor X using +formula: - :: +:: - mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^{x})) + mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^{x})) - Parameters - ========== - X - Type T. - Input tensor +Parameters +========== +X + Type T. + Input tensor - Returns - ======= - Y : Var - Type T. - Output tensor +Returns +======= +Y : Var + Type T. + Output tensor - Notes - ===== - Signature: ``ai.onnx@18::Mish``. +Notes +===== +Signature: ``ai.onnx@18::Mish``. - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return ( - _Mish( - _Mish.Attributes(), - _Mish.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def optional_get_element( - input: Var, -) -> Var: + return _Mish( + _Mish.Attributes( + ), _Mish.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def optional_get_element(input: Var, ) -> Var: r""" - If the input is a tensor or sequence type, it returns the input. If the - input is an optional type, it outputs the element in the input. It is an - error if the input is an empty optional-type (i.e. does not have an - element) and the behavior is undefined in this case. - - Parameters - ========== - input - Type O. - The optional input. - - Returns - ======= - output : Var - Type V. - Output element in the optional input. - - Notes - ===== - Signature: ``ai.onnx@18::OptionalGetElement``. - - Type constraints: - - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - V: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +If the input is a tensor or sequence type, it returns the input. If the +input is an optional type, it outputs the element in the input. It is an +error if the input is an empty optional-type (i.e. does not have an +element) and the behavior is undefined in this case. + +Parameters +========== +input + Type O. + The optional input. + +Returns +======= +output : Var + Type V. + Output element in the optional input. + +Notes +===== +Signature: ``ai.onnx@18::OptionalGetElement``. + +Type constraints: + - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - V: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _OptionalGetElement( - _OptionalGetElement.Attributes(), - _OptionalGetElement.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def optional_has_element( - input: Optional[Var] = None, -) -> Var: + return _OptionalGetElement( + _OptionalGetElement.Attributes( + ), _OptionalGetElement.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def optional_has_element(input: Optional[Var] = None, ) -> Var: r""" - Returns true if (1) the input is an optional-type and contains an - element, or, (2) the input is a tensor or sequence type. If the input is - not provided or is an empty optional-type, this op returns false. - - Parameters - ========== - input - Type O. - The optional input. - - Returns - ======= - output : Var - Type B. - A scalar boolean tensor. If true, it indicates that optional-type input - contains an element. Otherwise, it is empty. - - Notes - ===== - Signature: ``ai.onnx@18::OptionalHasElement``. - - Type constraints: - - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - B: `tensor(bool)` +Returns true if (1) the input is an optional-type and contains an +element, or, (2) the input is a tensor or sequence type. If the input is +not provided or is an empty optional-type, this op returns false. + +Parameters +========== +input + Type O. + The optional input. + +Returns +======= +output : Var + Type B. + A scalar boolean tensor. If true, it indicates that optional-type input + contains an element. Otherwise, it is empty. + +Notes +===== +Signature: ``ai.onnx@18::OptionalHasElement``. + +Type constraints: + - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - B: `tensor(bool)` """ - return ( - _OptionalHasElement( - _OptionalHasElement.Attributes(), - _OptionalHasElement.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def pad( - data: Var, - pads: Var, - constant_value: Optional[Var] = None, - axes: Optional[Var] = None, - *, - mode: str = "constant", -) -> Var: + return _OptionalHasElement( + _OptionalHasElement.Attributes( + ), _OptionalHasElement.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output + + +def pad(data: Var, pads: Var, constant_value: Optional[Var] = None, axes: Optional[Var] = None, *, mode: str = "constant", ) -> Var: r""" - Given a tensor containing the data to be padded (``data``), a tensor - containing the number of start and end pad values for axis (``pads``), - (optionally) a ``mode``, and (optionally) ``constant_value``, a padded - tensor (``output``) is generated. - - The three supported ``modes`` are (similar to corresponding modes - supported by ``numpy.pad``): - - 1) ``constant``\ (default) - pads with a given constant value as - specified by ``constant_value`` (which defaults to 0, empty string, - or False) - - 2) ``reflect`` - pads with the reflection of the vector mirrored on the - first and last values of the vector along each axis - - 3) ``edge`` - pads with the edge values of array - - Example 1 (``constant`` mode): - - Insert 0 pads to the beginning of the second dimension. - - :: - - data = [ - [1.0, 1.2], - [2.3, 3.4], - [4.5, 5.7], - ] - - pads = [0, 2, 0, 0] - - mode = 'constant' - - constant_value = 0.0 - - output = [ - [0.0, 0.0, 1.0, 1.2], - [0.0, 0.0, 2.3, 3.4], - [0.0, 0.0, 4.5, 5.7], - ] - - Example 2 (``reflect`` mode): - - :: - - data = [ - [1.0, 1.2], - [2.3, 3.4], - [4.5, 5.7], - ] - - pads = [0, 2, 0, 0] - - mode = 'reflect' - - output = [ - [1.0, 1.2, 1.0, 1.2], - [2.3, 3.4, 2.3, 3.4], - [4.5, 5.7, 4.5, 5.7], - ] - - Example 3 (``edge`` mode): - - :: - - data = [ - [1.0, 1.2], - [2.3, 3.4], - [4.5, 5.7], - ] - - pads = [0, 2, 0, 0] - - mode = 'edge' - - output = [ - [1.0, 1.0, 1.0, 1.2], - [2.3, 2.3, 2.3, 3.4], - [4.5, 4.5, 4.5, 5.7], - ] - - Parameters - ========== - data - Type T. - Input tensor. - pads - Type tensor(int64). - Tensor of integers indicating the number of padding elements to add or - remove (if negative) at the beginning and end of each axis. For 2D input - tensor, it is the number of pixels. ``pads`` should be a 1D tensor of - shape [2 \* num_axes] where ``num_axes`` refers to the number of - elements in the ``axes`` input or the input rank if ``axes`` are not - provided explicitly. ``pads`` format should be: [x1_begin, x2_begin, - ..., x1_end, x2_end,...], where xi_begin is the number of pad values - added at the beginning of axis ``axes[i]`` and xi_end, the number of pad - values added at the end of axis ``axes[i]``. - constant_value - Type T. - (Optional) A scalar value to be used if the mode chosen is ``constant`` - (by default it is 0, empty string or False). - axes - Type Tind. - 1-D tensor of axes that ``pads`` apply to. Negative value means counting - dimensions from the back. Accepted range is [-r, r-1] where r = - rank(data). Behavior is undefined if an axis is repeated. If not - provided, all axes are assumed (``[0, 1, ..., input_rank-1]``). - mode - Attribute. - Supported modes: ``constant``\ (default), ``reflect``, ``edge`` - - Returns - ======= - output : Var - Type T. - Tensor after padding. - - Notes - ===== - Signature: ``ai.onnx@18::Pad``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - Tind: `tensor(int32)`, `tensor(int64)` +Given a tensor containing the data to be padded (``data``), a tensor +containing the number of start and end pad values for axis (``pads``), +(optionally) a ``mode``, and (optionally) ``constant_value``, a padded +tensor (``output``) is generated. + +The three supported ``modes`` are (similar to corresponding modes +supported by ``numpy.pad``): + +1) ``constant``\ (default) - pads with a given constant value as + specified by ``constant_value`` (which defaults to 0, empty string, + or False) + +2) ``reflect`` - pads with the reflection of the vector mirrored on the + first and last values of the vector along each axis + +3) ``edge`` - pads with the edge values of array + +Example 1 (``constant`` mode): + +Insert 0 pads to the beginning of the second dimension. + +:: + + data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + + pads = [0, 2, 0, 0] + + mode = 'constant' + + constant_value = 0.0 + + output = [ + [0.0, 0.0, 1.0, 1.2], + [0.0, 0.0, 2.3, 3.4], + [0.0, 0.0, 4.5, 5.7], + ] + +Example 2 (``reflect`` mode): + +:: + + data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + + pads = [0, 2, 0, 0] + + mode = 'reflect' + + output = [ + [1.0, 1.2, 1.0, 1.2], + [2.3, 3.4, 2.3, 3.4], + [4.5, 5.7, 4.5, 5.7], + ] + +Example 3 (``edge`` mode): + +:: + + data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + + pads = [0, 2, 0, 0] + + mode = 'edge' + + output = [ + [1.0, 1.0, 1.0, 1.2], + [2.3, 2.3, 2.3, 3.4], + [4.5, 4.5, 4.5, 5.7], + ] + +Parameters +========== +data + Type T. + Input tensor. +pads + Type tensor(int64). + Tensor of integers indicating the number of padding elements to add or + remove (if negative) at the beginning and end of each axis. For 2D input + tensor, it is the number of pixels. ``pads`` should be a 1D tensor of + shape [2 \* num_axes] where ``num_axes`` refers to the number of + elements in the ``axes`` input or the input rank if ``axes`` are not + provided explicitly. ``pads`` format should be: [x1_begin, x2_begin, + ..., x1_end, x2_end,...], where xi_begin is the number of pad values + added at the beginning of axis ``axes[i]`` and xi_end, the number of pad + values added at the end of axis ``axes[i]``. +constant_value + Type T. + (Optional) A scalar value to be used if the mode chosen is ``constant`` + (by default it is 0, empty string or False). +axes + Type Tind. + 1-D tensor of axes that ``pads`` apply to. Negative value means counting + dimensions from the back. Accepted range is [-r, r-1] where r = + rank(data). Behavior is undefined if an axis is repeated. If not + provided, all axes are assumed (``[0, 1, ..., input_rank-1]``). +mode + Attribute. + Supported modes: ``constant``\ (default), ``reflect``, ``edge`` + +Returns +======= +output : Var + Type T. + Tensor after padding. + +Notes +===== +Signature: ``ai.onnx@18::Pad``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - Tind: `tensor(int32)`, `tensor(int64)` """ - return ( - _Pad( - _Pad.Attributes( - mode=AttrString(mode, name="mode"), - ), - _Pad.Inputs( - data=unwrap_vars(data), - pads=unwrap_vars(pads), - constant_value=unwrap_vars(constant_value), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - pads=get_value(pads), - constant_value=get_value(constant_value), - axes=get_value(axes), - ) - .output - ) - - -def reduce_l1( - data: Var, - axes: Optional[Var] = None, - *, - keepdims: int = 1, - noop_with_empty_axes: int = 0, -) -> Var: + return _Pad( + _Pad.Attributes( + mode=AttrString(mode, name="mode"), + ), _Pad.Inputs( + data=unwrap_vars(data), pads=unwrap_vars(pads), constant_value=unwrap_vars(constant_value), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), pads=get_value(pads), constant_value=get_value(constant_value), axes=get_value(axes), ).output + + +def reduce_l1(data: Var, axes: Optional[Var] = None, *, keepdims: int = 1, noop_with_empty_axes: int = 0, ) -> Var: r""" - Computes the L1 norm of the input tensor's elements along the provided - axes. The resulting tensor has the same rank as the input if - ``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting - tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. Reduction over an empty set of values yields 0. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Type tensor(int64). - Optional input list of integers, along which to reduce. The default is - to reduce over all the dimensions of the input tensor if - 'noop_with_empty_axes' is false, else act as an Identity op when - 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = - rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - noop_with_empty_axes - Attribute. - Defines behavior if 'axes' is empty. Default behavior with 'false' is to - reduce all axes. When axes is empty and this attribute is set to true, - input tensor will not be reduced,and the output tensor would be - equivalent to input tensor. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@18::ReduceL1``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` +Computes the L1 norm of the input tensor's elements along the provided +axes. The resulting tensor has the same rank as the input if +``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting +tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. Reduction over an empty set of values yields 0. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Type tensor(int64). + Optional input list of integers, along which to reduce. The default is + to reduce over all the dimensions of the input tensor if + 'noop_with_empty_axes' is false, else act as an Identity op when + 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = + rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. +noop_with_empty_axes + Attribute. + Defines behavior if 'axes' is empty. Default behavior with 'false' is to + reduce all axes. When axes is empty and this attribute is set to true, + input tensor will not be reduced,and the output tensor would be + equivalent to input tensor. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@18::ReduceL1``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return ( - _ReduceL1( - _ReduceL1.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" - ), - ), - _ReduceL1.Inputs( - data=unwrap_vars(data), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - axes=get_value(axes), - ) - .reduced - ) - - -def reduce_l2( - data: Var, - axes: Optional[Var] = None, - *, - keepdims: int = 1, - noop_with_empty_axes: int = 0, -) -> Var: + return _ReduceL1( + _ReduceL1.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64(noop_with_empty_axes, name="noop_with_empty_axes"), + ), _ReduceL1.Inputs( + data=unwrap_vars(data), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), axes=get_value(axes), ).reduced + + +def reduce_l2(data: Var, axes: Optional[Var] = None, *, keepdims: int = 1, noop_with_empty_axes: int = 0, ) -> Var: r""" - Computes the L2 norm of the input tensor's elements along the provided - axes. The resulting tensor has the same rank as the input if - ``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting - tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. Reduction over an empty set of values yields 0. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Type tensor(int64). - Optional input list of integers, along which to reduce. The default is - to reduce over all the dimensions of the input tensor if - 'noop_with_empty_axes' is false, else act as an Identity op when - 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = - rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - noop_with_empty_axes - Attribute. - Defines behavior if 'axes' is empty. Default behavior with 'false' is to - reduce all axes. When axes is empty and this attribute is set to true, - input tensor will not be reduced,and the output tensor would be - equivalent to input tensor. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@18::ReduceL2``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` +Computes the L2 norm of the input tensor's elements along the provided +axes. The resulting tensor has the same rank as the input if +``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting +tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. Reduction over an empty set of values yields 0. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Type tensor(int64). + Optional input list of integers, along which to reduce. The default is + to reduce over all the dimensions of the input tensor if + 'noop_with_empty_axes' is false, else act as an Identity op when + 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = + rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. +noop_with_empty_axes + Attribute. + Defines behavior if 'axes' is empty. Default behavior with 'false' is to + reduce all axes. When axes is empty and this attribute is set to true, + input tensor will not be reduced,and the output tensor would be + equivalent to input tensor. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@18::ReduceL2``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return ( - _ReduceL2( - _ReduceL2.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" - ), - ), - _ReduceL2.Inputs( - data=unwrap_vars(data), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - axes=get_value(axes), - ) - .reduced - ) - - -def reduce_log_sum( - data: Var, - axes: Optional[Var] = None, - *, - keepdims: int = 1, - noop_with_empty_axes: int = 0, -) -> Var: + return _ReduceL2( + _ReduceL2.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64(noop_with_empty_axes, name="noop_with_empty_axes"), + ), _ReduceL2.Inputs( + data=unwrap_vars(data), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), axes=get_value(axes), ).reduced + + +def reduce_log_sum(data: Var, axes: Optional[Var] = None, *, keepdims: int = 1, noop_with_empty_axes: int = 0, ) -> Var: r""" - Computes the log sum of the input tensor's elements along the provided - axes. The resulting tensor has the same rank as the input if - ``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting - tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. Reduction over an empty set of values yields minus infinity (if - supported by the datatype) or undefined otherwise. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Type tensor(int64). - Optional input list of integers, along which to reduce. The default is - to reduce over all the dimensions of the input tensor if - 'noop_with_empty_axes' is false, else act as an Identity op when - 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = - rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - noop_with_empty_axes - Attribute. - Defines behavior if 'axes' is empty. Default behavior with 'false' is to - reduce all axes. When axes is empty and this attribute is set to true, - input tensor will not be reduced,and the output tensor would be - equivalent to input tensor. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@18::ReduceLogSum``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` +Computes the log sum of the input tensor's elements along the provided +axes. The resulting tensor has the same rank as the input if +``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting +tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. Reduction over an empty set of values yields minus infinity (if +supported by the datatype) or undefined otherwise. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Type tensor(int64). + Optional input list of integers, along which to reduce. The default is + to reduce over all the dimensions of the input tensor if + 'noop_with_empty_axes' is false, else act as an Identity op when + 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = + rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. +noop_with_empty_axes + Attribute. + Defines behavior if 'axes' is empty. Default behavior with 'false' is to + reduce all axes. When axes is empty and this attribute is set to true, + input tensor will not be reduced,and the output tensor would be + equivalent to input tensor. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@18::ReduceLogSum``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return ( - _ReduceLogSum( - _ReduceLogSum.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" - ), - ), - _ReduceLogSum.Inputs( - data=unwrap_vars(data), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - axes=get_value(axes), - ) - .reduced - ) - - -def reduce_log_sum_exp( - data: Var, - axes: Optional[Var] = None, - *, - keepdims: int = 1, - noop_with_empty_axes: int = 0, -) -> Var: + return _ReduceLogSum( + _ReduceLogSum.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64(noop_with_empty_axes, name="noop_with_empty_axes"), + ), _ReduceLogSum.Inputs( + data=unwrap_vars(data), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), axes=get_value(axes), ).reduced + + +def reduce_log_sum_exp(data: Var, axes: Optional[Var] = None, *, keepdims: int = 1, noop_with_empty_axes: int = 0, ) -> Var: r""" - Computes the log sum exponent of the input tensor's elements along the - provided axes. The resulting tensor has the same rank as the input if - ``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting - tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. Reduction over an empty set of values yields minus infinity (if - supported by the datatype) or undefined otherwise. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Type tensor(int64). - Optional input list of integers, along which to reduce. The default is - to reduce over all the dimensions of the input tensor if - 'noop_with_empty_axes' is false, else act as an Identity op when - 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = - rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - noop_with_empty_axes - Attribute. - Defines behavior if 'axes' is empty. Default behavior with 'false' is to - reduce all axes. When axes is empty and this attribute is set to true, - input tensor will not be reduced,and the output tensor would be - equivalent to input tensor. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@18::ReduceLogSumExp``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` +Computes the log sum exponent of the input tensor's elements along the +provided axes. The resulting tensor has the same rank as the input if +``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting +tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. Reduction over an empty set of values yields minus infinity (if +supported by the datatype) or undefined otherwise. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Type tensor(int64). + Optional input list of integers, along which to reduce. The default is + to reduce over all the dimensions of the input tensor if + 'noop_with_empty_axes' is false, else act as an Identity op when + 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = + rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. +noop_with_empty_axes + Attribute. + Defines behavior if 'axes' is empty. Default behavior with 'false' is to + reduce all axes. When axes is empty and this attribute is set to true, + input tensor will not be reduced,and the output tensor would be + equivalent to input tensor. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@18::ReduceLogSumExp``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return ( - _ReduceLogSumExp( - _ReduceLogSumExp.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" - ), - ), - _ReduceLogSumExp.Inputs( - data=unwrap_vars(data), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - axes=get_value(axes), - ) - .reduced - ) - - -def reduce_max( - data: Var, - axes: Optional[Var] = None, - *, - keepdims: int = 1, - noop_with_empty_axes: int = 0, -) -> Var: + return _ReduceLogSumExp( + _ReduceLogSumExp.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64(noop_with_empty_axes, name="noop_with_empty_axes"), + ), _ReduceLogSumExp.Inputs( + data=unwrap_vars(data), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), axes=get_value(axes), ).reduced + + +def reduce_max(data: Var, axes: Optional[Var] = None, *, keepdims: int = 1, noop_with_empty_axes: int = 0, ) -> Var: r""" - Computes the max of the input tensor's elements along the provided axes. - The resulting tensor has the same rank as the input if ``keepdims`` - equals 1. If ``keepdims`` equals 0, then the resulting tensor has the - reduced dimension pruned. Input tensors of rank zero are valid. - Reduction over an empty set of values yields minus infinity (if - supported by the datatype) or the minimum value of the data type - otherwise. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Type tensor(int64). - Optional input list of integers, along which to reduce. The default is - to reduce over all the dimensions of the input tensor if - 'noop_with_empty_axes' is false, else act as an Identity op when - 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = - rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - noop_with_empty_axes - Attribute. - Defines behavior if 'axes' is empty. Default behavior with 'false' is to - reduce all axes. When axes is empty and this attribute is set to true, - input tensor will not be reduced,and the output tensor would be - equivalent to input tensor. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@18::ReduceMax``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +Computes the max of the input tensor's elements along the provided axes. +The resulting tensor has the same rank as the input if ``keepdims`` +equals 1. If ``keepdims`` equals 0, then the resulting tensor has the +reduced dimension pruned. Input tensors of rank zero are valid. +Reduction over an empty set of values yields minus infinity (if +supported by the datatype) or the minimum value of the data type +otherwise. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Type tensor(int64). + Optional input list of integers, along which to reduce. The default is + to reduce over all the dimensions of the input tensor if + 'noop_with_empty_axes' is false, else act as an Identity op when + 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = + rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. +noop_with_empty_axes + Attribute. + Defines behavior if 'axes' is empty. Default behavior with 'false' is to + reduce all axes. When axes is empty and this attribute is set to true, + input tensor will not be reduced,and the output tensor would be + equivalent to input tensor. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@18::ReduceMax``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _ReduceMax( - _ReduceMax.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" - ), - ), - _ReduceMax.Inputs( - data=unwrap_vars(data), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - axes=get_value(axes), - ) - .reduced - ) - - -def reduce_mean( - data: Var, - axes: Optional[Var] = None, - *, - keepdims: int = 1, - noop_with_empty_axes: int = 0, -) -> Var: + return _ReduceMax( + _ReduceMax.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64(noop_with_empty_axes, name="noop_with_empty_axes"), + ), _ReduceMax.Inputs( + data=unwrap_vars(data), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), axes=get_value(axes), ).reduced + + +def reduce_mean(data: Var, axes: Optional[Var] = None, *, keepdims: int = 1, noop_with_empty_axes: int = 0, ) -> Var: r""" - Computes the mean of the input tensor's elements along the provided - axes. The resulting tensor has the same rank as the input if - ``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting - tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. Reduction over an empty set of values yields undefined. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Type tensor(int64). - Optional input list of integers, along which to reduce. The default is - to reduce over all the dimensions of the input tensor if - 'noop_with_empty_axes' is false, else act as an Identity op when - 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = - rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - noop_with_empty_axes - Attribute. - Defines behavior if 'axes' is empty. Default behavior with 'false' is to - reduce all axes. When axes is empty and this attribute is set to true, - input tensor will not be reduced,and the output tensor would be - equivalent to input tensor. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@18::ReduceMean``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` +Computes the mean of the input tensor's elements along the provided +axes. The resulting tensor has the same rank as the input if +``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting +tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. Reduction over an empty set of values yields undefined. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Type tensor(int64). + Optional input list of integers, along which to reduce. The default is + to reduce over all the dimensions of the input tensor if + 'noop_with_empty_axes' is false, else act as an Identity op when + 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = + rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. +noop_with_empty_axes + Attribute. + Defines behavior if 'axes' is empty. Default behavior with 'false' is to + reduce all axes. When axes is empty and this attribute is set to true, + input tensor will not be reduced,and the output tensor would be + equivalent to input tensor. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@18::ReduceMean``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return ( - _ReduceMean( - _ReduceMean.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" - ), - ), - _ReduceMean.Inputs( - data=unwrap_vars(data), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - axes=get_value(axes), - ) - .reduced - ) - - -def reduce_min( - data: Var, - axes: Optional[Var] = None, - *, - keepdims: int = 1, - noop_with_empty_axes: int = 0, -) -> Var: + return _ReduceMean( + _ReduceMean.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64(noop_with_empty_axes, name="noop_with_empty_axes"), + ), _ReduceMean.Inputs( + data=unwrap_vars(data), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), axes=get_value(axes), ).reduced + + +def reduce_min(data: Var, axes: Optional[Var] = None, *, keepdims: int = 1, noop_with_empty_axes: int = 0, ) -> Var: r""" - Computes the min of the input tensor's elements along the provided axes. - The resulting tensor has the same rank as the input if ``keepdims`` - equals 1. If ``keepdims`` equals 0, then the resulting tensor has the - reduced dimension pruned. Input tensors of rank zero are valid. - Reduction over an empty set of values yields plus infinity (if supported - by the datatype) or the maximum value of the data type otherwise. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Type tensor(int64). - Optional input list of integers, along which to reduce. The default is - to reduce over all the dimensions of the input tensor if - 'noop_with_empty_axes' is false, else act as an Identity op when - 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = - rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - noop_with_empty_axes - Attribute. - Defines behavior if 'axes' is empty. Default behavior with 'false' is to - reduce all axes. When axes is empty and this attribute is set to true, - input tensor will not be reduced,and the output tensor would be - equivalent to input tensor. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@18::ReduceMin``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +Computes the min of the input tensor's elements along the provided axes. +The resulting tensor has the same rank as the input if ``keepdims`` +equals 1. If ``keepdims`` equals 0, then the resulting tensor has the +reduced dimension pruned. Input tensors of rank zero are valid. +Reduction over an empty set of values yields plus infinity (if supported +by the datatype) or the maximum value of the data type otherwise. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Type tensor(int64). + Optional input list of integers, along which to reduce. The default is + to reduce over all the dimensions of the input tensor if + 'noop_with_empty_axes' is false, else act as an Identity op when + 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = + rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. +noop_with_empty_axes + Attribute. + Defines behavior if 'axes' is empty. Default behavior with 'false' is to + reduce all axes. When axes is empty and this attribute is set to true, + input tensor will not be reduced,and the output tensor would be + equivalent to input tensor. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@18::ReduceMin``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _ReduceMin( - _ReduceMin.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" - ), - ), - _ReduceMin.Inputs( - data=unwrap_vars(data), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - axes=get_value(axes), - ) - .reduced - ) - - -def reduce_prod( - data: Var, - axes: Optional[Var] = None, - *, - keepdims: int = 1, - noop_with_empty_axes: int = 0, -) -> Var: + return _ReduceMin( + _ReduceMin.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64(noop_with_empty_axes, name="noop_with_empty_axes"), + ), _ReduceMin.Inputs( + data=unwrap_vars(data), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), axes=get_value(axes), ).reduced + + +def reduce_prod(data: Var, axes: Optional[Var] = None, *, keepdims: int = 1, noop_with_empty_axes: int = 0, ) -> Var: r""" - Computes the product of the input tensor's elements along the provided - axes. The resulting tensor has the same rank as the input if - ``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting - tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. Reduction over an empty set of values yields 1. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Type tensor(int64). - Optional input list of integers, along which to reduce. The default is - to reduce over all the dimensions of the input tensor if - 'noop_with_empty_axes' is false, else act as an Identity op when - 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = - rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - noop_with_empty_axes - Attribute. - Defines behavior if 'axes' is empty. Default behavior with 'false' is to - reduce all axes. When axes is empty and this attribute is set to true, - input tensor will not be reduced,and the output tensor would be - equivalent to input tensor. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@18::ReduceProd``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` +Computes the product of the input tensor's elements along the provided +axes. The resulting tensor has the same rank as the input if +``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting +tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. Reduction over an empty set of values yields 1. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Type tensor(int64). + Optional input list of integers, along which to reduce. The default is + to reduce over all the dimensions of the input tensor if + 'noop_with_empty_axes' is false, else act as an Identity op when + 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = + rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. +noop_with_empty_axes + Attribute. + Defines behavior if 'axes' is empty. Default behavior with 'false' is to + reduce all axes. When axes is empty and this attribute is set to true, + input tensor will not be reduced,and the output tensor would be + equivalent to input tensor. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@18::ReduceProd``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return ( - _ReduceProd( - _ReduceProd.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" - ), - ), - _ReduceProd.Inputs( - data=unwrap_vars(data), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - axes=get_value(axes), - ) - .reduced - ) - - -def reduce_sum_square( - data: Var, - axes: Optional[Var] = None, - *, - keepdims: int = 1, - noop_with_empty_axes: int = 0, -) -> Var: + return _ReduceProd( + _ReduceProd.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64(noop_with_empty_axes, name="noop_with_empty_axes"), + ), _ReduceProd.Inputs( + data=unwrap_vars(data), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), axes=get_value(axes), ).reduced + + +def reduce_sum_square(data: Var, axes: Optional[Var] = None, *, keepdims: int = 1, noop_with_empty_axes: int = 0, ) -> Var: r""" - Computes the sum square of the input tensor's elements along the - provided axes. The resulting tensor has the same rank as the input if - ``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting - tensor has the reduced dimension pruned. Input tensors of rank zero are - valid. Reduction over an empty set of values yields 0. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Type tensor(int64). - Optional input list of integers, along which to reduce. The default is - to reduce over all the dimensions of the input tensor if - 'noop_with_empty_axes' is false, else act as an Identity op when - 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = - rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - noop_with_empty_axes - Attribute. - Defines behavior if 'axes' is empty. Default behavior with 'false' is to - reduce all axes. When axes is empty and this attribute is set to true, - input tensor will not be reduced,and the output tensor would be - equivalent to input tensor. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@18::ReduceSumSquare``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` +Computes the sum square of the input tensor's elements along the +provided axes. The resulting tensor has the same rank as the input if +``keepdims`` equals 1. If ``keepdims`` equals 0, then the resulting +tensor has the reduced dimension pruned. Input tensors of rank zero are +valid. Reduction over an empty set of values yields 0. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Type tensor(int64). + Optional input list of integers, along which to reduce. The default is + to reduce over all the dimensions of the input tensor if + 'noop_with_empty_axes' is false, else act as an Identity op when + 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = + rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. +noop_with_empty_axes + Attribute. + Defines behavior if 'axes' is empty. Default behavior with 'false' is to + reduce all axes. When axes is empty and this attribute is set to true, + input tensor will not be reduced,and the output tensor would be + equivalent to input tensor. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@18::ReduceSumSquare``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return ( - _ReduceSumSquare( - _ReduceSumSquare.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" - ), - ), - _ReduceSumSquare.Inputs( - data=unwrap_vars(data), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - axes=get_value(axes), - ) - .reduced - ) - - -def resize( - X: Var, - roi: Optional[Var] = None, - scales: Optional[Var] = None, - sizes: Optional[Var] = None, - *, - antialias: int = 0, - axes: Optional[Iterable[int]] = None, - coordinate_transformation_mode: str = "half_pixel", - cubic_coeff_a: float = -0.75, - exclude_outside: int = 0, - extrapolation_value: float = 0.0, - keep_aspect_ratio_policy: str = "stretch", - mode: str = "nearest", - nearest_mode: str = "round_prefer_floor", -) -> Var: + return _ReduceSumSquare( + _ReduceSumSquare.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64(noop_with_empty_axes, name="noop_with_empty_axes"), + ), _ReduceSumSquare.Inputs( + data=unwrap_vars(data), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), axes=get_value(axes), ).reduced + + +def resize(X: Var, roi: Optional[Var] = None, scales: Optional[Var] = None, sizes: Optional[Var] = None, *, antialias: int = 0, axes: Optional[Iterable[int]] = None, coordinate_transformation_mode: str = "half_pixel", cubic_coeff_a: float = -0.75, exclude_outside: int = 0, extrapolation_value: float = 0.0, keep_aspect_ratio_policy: str = "stretch", mode: str = "nearest", nearest_mode: str = "round_prefer_floor", ) -> Var: r""" - Resize the input tensor. In general, it calculates every value in the - output tensor as a weighted average of neighborhood (a.k.a. sampling - locations) in the input tensor. Each dimension value of the output - tensor is: - ``output_dimension = floor(input_dimension * (roi_end - roi_start) * scale)`` - if input "sizes" is not specified. - - Parameters - ========== - X - Type T1. - N-D tensor - roi - Type T2. - 1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is - the rank of X or the length of axes, if provided. The RoIs' coordinates - are normalized in the coordinate system of the input image. It only - takes effect when coordinate_transformation_mode is "tf_crop_and_resize" - scales - Type tensor(float). - The scale array along each dimension. It takes value greater than 0. If - it's less than 1, it's sampling down, otherwise, it's upsampling. The - number of elements of 'scales' should be the same as the rank of input - 'X' or the length of 'axes', if provided. One of 'scales' and 'sizes' - MUST be specified and it is an error if both are specified. If 'sizes' - is needed, the user can use an empty string as the name of 'scales' in - this operator's input list. - sizes - Type tensor(int64). - Target size of the output tensor. Its interpretation depends on the - 'keep_aspect_ratio_policy' value.The number of elements of 'sizes' - should be the same as the rank of input 'X', or the length of 'axes', if - provided. Only one of 'scales' and 'sizes' can be specified. - antialias - Attribute. - If set to 1, "linear" and "cubic" interpolation modes will use an - antialiasing filter when downscaling. Antialiasing is achieved by - stretching the resampling filter by a factor max(1, 1 / scale), which - means that when downsampling, more input pixels contribute to an output - pixel. - axes - Attribute. - If provided, it specifies a subset of axes that 'roi', 'scales' and - 'sizes' refer to. If not provided, all axes are assumed [0, 1, ..., - r-1], where r = rank(data). Non-specified dimensions are interpreted as - non-resizable. Negative value means counting dimensions from the back. - Accepted range is [-r, r-1], where r = rank(data). Behavior is undefined - if an axis is repeated. - coordinate_transformation_mode - Attribute. - This attribute describes how to transform the coordinate in the resized - tensor to the coordinate in the original tensor. - - The coordinate of each dimension is transformed individually. Let's - describe a case using axis x as an example. Denote x_resized as the - coordinate of axis x in the resized tensor, x_original as the coordinate - of axis x in the original tensor, ``length_original`` as the length of - the original tensor in axis x, length_resized as the length of the - resized tensor in axis x, roi_x = (start_x, end_x) of the axis x in - input "roi", ``scale = length_resized / length_original``, - - if coordinate_transformation_mode is ``"half_pixel"``, - ``x_original = (x_resized + 0.5) / scale - 0.5`` - - if coordinate_transformation_mode is ``"pytorch_half_pixel"``, - ``x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0`` - - if coordinate_transformation_mode is ``"align_corners"``, - ``x_original = x_resized * (length_original - 1) / (length_resized - 1)`` - - if coordinate_transformation_mode is ``"asymmetric"``, - ``x_original = x_resized / scale`` - - if coordinate_transformation_mode is ``"tf_crop_and_resize"``, - ``x_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1)`` - . - cubic_coeff_a - Attribute. - The coefficient 'a' used in cubic interpolation. Two common choice are - -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out - Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the - details. This attribute is valid only if mode is "cubic". - exclude_outside - Attribute. - If set to 1, the weight of sampling locations outside the tensor will be - set to 0 and the weight will be renormalized so that their sum is 1.0. - The default value is 0. - extrapolation_value - Attribute. - When coordinate_transformation_mode is "tf_crop_and_resize" and - x_original is outside the range [0, length_original - 1], this value is - used as the corresponding output value. Default is 0.0f. - keep_aspect_ratio_policy - Attribute. - This attribute describes how to interpret the ``sizes`` input with - regard to keeping the original aspect ratio of the input, and it is not - applicable when the ``scales`` input is used. - - Given a set of ``sizes``, associated with a subset of ``axes`` - (explicitly provided or default), and assuming ``d = axes[i]``, with - ``i`` being the index of the provided ``sizes``. - - If ``keep_aspect_ratio_policy`` is ``"stretch"``, the original aspect - ratio is disregarded, and the input is resized to the specified size: - ``out_size[d] = sizes[i]`` - - If ``keep_aspect_ratio_policy`` is ``"not_larger"``, the sizes are - adjusted so that no extent of the output is larger than the specified - size, while keeping the original aspect ratio: - ``scale = Min(sizes[i] / in_size[d])`` - ``out_size[d] = round_int(scale * in_size[i])`` - - If ``keep_aspect_ratio_policy`` is ``"not_smaller"``, the sizes are - adjusted so that no extent of the output is smaller than the specified - size, while keeping the original aspect ratio: - ``scale = Max(sizes[i] / in_size[d])`` - ``out_size[d] = round_int(scale * in_size[i])`` - - For non-resizable axes (those not specified in ``axes``), the output - size will be equal to the input size. - - Note: ``round_int`` stands for computing the nearest integer value, - rounding halfway cases up. - mode - Attribute. - Three interpolation modes: "nearest" (default), "linear" and "cubic". - The "linear" mode includes linear interpolation for 1D tensor and - N-linear interpolation for N-D tensor (for example, bilinear - interpolation for 2D tensor). The "cubic" mode includes cubic - interpolation for 1D tensor and N-cubic interpolation for N-D tensor - (for example, bicubic interpolation for 2D tensor). - nearest_mode - Attribute. - Four modes: "round_prefer_floor" (default, as known as round half down), - "round_prefer_ceil" (as known as round half up), "floor", "ceil". Only - used by nearest interpolation. It indicates how to get "nearest" pixel - in input tensor from x_original, so this attribute is valid only if - "mode" is "nearest". - - Returns - ======= - Y : Var - Type T1. - N-D tensor after resizing - - Notes - ===== - Signature: ``ai.onnx@18::Resize``. - - Type constraints: - - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` +Resize the input tensor. In general, it calculates every value in the +output tensor as a weighted average of neighborhood (a.k.a. sampling +locations) in the input tensor. Each dimension value of the output +tensor is: +``output_dimension = floor(input_dimension * (roi_end - roi_start) * scale)`` +if input "sizes" is not specified. + +Parameters +========== +X + Type T1. + N-D tensor +roi + Type T2. + 1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is + the rank of X or the length of axes, if provided. The RoIs' coordinates + are normalized in the coordinate system of the input image. It only + takes effect when coordinate_transformation_mode is "tf_crop_and_resize" +scales + Type tensor(float). + The scale array along each dimension. It takes value greater than 0. If + it's less than 1, it's sampling down, otherwise, it's upsampling. The + number of elements of 'scales' should be the same as the rank of input + 'X' or the length of 'axes', if provided. One of 'scales' and 'sizes' + MUST be specified and it is an error if both are specified. If 'sizes' + is needed, the user can use an empty string as the name of 'scales' in + this operator's input list. +sizes + Type tensor(int64). + Target size of the output tensor. Its interpretation depends on the + 'keep_aspect_ratio_policy' value.The number of elements of 'sizes' + should be the same as the rank of input 'X', or the length of 'axes', if + provided. Only one of 'scales' and 'sizes' can be specified. +antialias + Attribute. + If set to 1, "linear" and "cubic" interpolation modes will use an + antialiasing filter when downscaling. Antialiasing is achieved by + stretching the resampling filter by a factor max(1, 1 / scale), which + means that when downsampling, more input pixels contribute to an output + pixel. +axes + Attribute. + If provided, it specifies a subset of axes that 'roi', 'scales' and + 'sizes' refer to. If not provided, all axes are assumed [0, 1, ..., + r-1], where r = rank(data). Non-specified dimensions are interpreted as + non-resizable. Negative value means counting dimensions from the back. + Accepted range is [-r, r-1], where r = rank(data). Behavior is undefined + if an axis is repeated. +coordinate_transformation_mode + Attribute. + This attribute describes how to transform the coordinate in the resized + tensor to the coordinate in the original tensor. + + The coordinate of each dimension is transformed individually. Let's + describe a case using axis x as an example. Denote x_resized as the + coordinate of axis x in the resized tensor, x_original as the coordinate + of axis x in the original tensor, ``length_original`` as the length of + the original tensor in axis x, length_resized as the length of the + resized tensor in axis x, roi_x = (start_x, end_x) of the axis x in + input "roi", ``scale = length_resized / length_original``, + + if coordinate_transformation_mode is ``"half_pixel"``, + ``x_original = (x_resized + 0.5) / scale - 0.5`` + + if coordinate_transformation_mode is ``"pytorch_half_pixel"``, + ``x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0`` + + if coordinate_transformation_mode is ``"align_corners"``, + ``x_original = x_resized * (length_original - 1) / (length_resized - 1)`` + + if coordinate_transformation_mode is ``"asymmetric"``, + ``x_original = x_resized / scale`` + + if coordinate_transformation_mode is ``"tf_crop_and_resize"``, + ``x_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1)`` + . +cubic_coeff_a + Attribute. + The coefficient 'a' used in cubic interpolation. Two common choice are + -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out + Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the + details. This attribute is valid only if mode is "cubic". +exclude_outside + Attribute. + If set to 1, the weight of sampling locations outside the tensor will be + set to 0 and the weight will be renormalized so that their sum is 1.0. + The default value is 0. +extrapolation_value + Attribute. + When coordinate_transformation_mode is "tf_crop_and_resize" and + x_original is outside the range [0, length_original - 1], this value is + used as the corresponding output value. Default is 0.0f. +keep_aspect_ratio_policy + Attribute. + This attribute describes how to interpret the ``sizes`` input with + regard to keeping the original aspect ratio of the input, and it is not + applicable when the ``scales`` input is used. + + Given a set of ``sizes``, associated with a subset of ``axes`` + (explicitly provided or default), and assuming ``d = axes[i]``, with + ``i`` being the index of the provided ``sizes``. + + If ``keep_aspect_ratio_policy`` is ``"stretch"``, the original aspect + ratio is disregarded, and the input is resized to the specified size: + ``out_size[d] = sizes[i]`` + + If ``keep_aspect_ratio_policy`` is ``"not_larger"``, the sizes are + adjusted so that no extent of the output is larger than the specified + size, while keeping the original aspect ratio: + ``scale = Min(sizes[i] / in_size[d])`` + ``out_size[d] = round_int(scale * in_size[i])`` + + If ``keep_aspect_ratio_policy`` is ``"not_smaller"``, the sizes are + adjusted so that no extent of the output is smaller than the specified + size, while keeping the original aspect ratio: + ``scale = Max(sizes[i] / in_size[d])`` + ``out_size[d] = round_int(scale * in_size[i])`` + + For non-resizable axes (those not specified in ``axes``), the output + size will be equal to the input size. + + Note: ``round_int`` stands for computing the nearest integer value, + rounding halfway cases up. +mode + Attribute. + Three interpolation modes: "nearest" (default), "linear" and "cubic". + The "linear" mode includes linear interpolation for 1D tensor and + N-linear interpolation for N-D tensor (for example, bilinear + interpolation for 2D tensor). The "cubic" mode includes cubic + interpolation for 1D tensor and N-cubic interpolation for N-D tensor + (for example, bicubic interpolation for 2D tensor). +nearest_mode + Attribute. + Four modes: "round_prefer_floor" (default, as known as round half down), + "round_prefer_ceil" (as known as round half up), "floor", "ceil". Only + used by nearest interpolation. It indicates how to get "nearest" pixel + in input tensor from x_original, so this attribute is valid only if + "mode" is "nearest". + +Returns +======= +Y : Var + Type T1. + N-D tensor after resizing + +Notes +===== +Signature: ``ai.onnx@18::Resize``. + +Type constraints: + - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return ( - _Resize( - _Resize.Attributes( - antialias=AttrInt64(antialias, name="antialias"), - axes=AttrInt64s.maybe(axes, name="axes"), - coordinate_transformation_mode=AttrString( - coordinate_transformation_mode, - name="coordinate_transformation_mode", - ), - cubic_coeff_a=AttrFloat32(cubic_coeff_a, name="cubic_coeff_a"), - exclude_outside=AttrInt64(exclude_outside, name="exclude_outside"), - extrapolation_value=AttrFloat32( - extrapolation_value, name="extrapolation_value" - ), - keep_aspect_ratio_policy=AttrString( - keep_aspect_ratio_policy, name="keep_aspect_ratio_policy" - ), - mode=AttrString(mode, name="mode"), - nearest_mode=AttrString(nearest_mode, name="nearest_mode"), - ), - _Resize.Inputs( - X=unwrap_vars(X), - roi=unwrap_vars(roi), - scales=unwrap_vars(scales), - sizes=unwrap_vars(sizes), - ), - ) - .get_output_vars( - X=get_value(X), - roi=get_value(roi), - scales=get_value(scales), - sizes=get_value(sizes), - ) - .Y - ) - - -def scatter_elements( - data: Var, - indices: Var, - updates: Var, - *, - axis: int = 0, - reduction: str = "none", -) -> Var: + return _Resize( + _Resize.Attributes( + antialias=AttrInt64(antialias, name="antialias"), + axes=AttrInt64s.maybe(axes, name="axes"), + coordinate_transformation_mode=AttrString(coordinate_transformation_mode, name="coordinate_transformation_mode"), + cubic_coeff_a=AttrFloat32(cubic_coeff_a, name="cubic_coeff_a"), + exclude_outside=AttrInt64(exclude_outside, name="exclude_outside"), + extrapolation_value=AttrFloat32(extrapolation_value, name="extrapolation_value"), + keep_aspect_ratio_policy=AttrString(keep_aspect_ratio_policy, name="keep_aspect_ratio_policy"), + mode=AttrString(mode, name="mode"), + nearest_mode=AttrString(nearest_mode, name="nearest_mode"), + ), _Resize.Inputs( + X=unwrap_vars(X), roi=unwrap_vars(roi), scales=unwrap_vars(scales), sizes=unwrap_vars(sizes), ), ).get_output_vars( + X=get_value(X), roi=get_value(roi), scales=get_value(scales), sizes=get_value(sizes), ).Y + + +def scatter_elements(data: Var, indices: Var, updates: Var, *, axis: int = 0, reduction: str = "none", ) -> Var: r""" - ScatterElements takes three inputs ``data``, ``updates``, and - ``indices`` of the same rank r >= 1 and an optional attribute axis that - identifies an axis of ``data`` (by default, the outer-most axis, that is - axis 0). The output of the operation is produced by creating a copy of - the input ``data``, and then updating its value to values specified by - ``updates`` at specific index positions specified by ``indices``. Its - output shape is the same as the shape of ``data``. - - For each entry in ``updates``, the target index in ``data`` is obtained - by combining the corresponding entry in ``indices`` with the index of - the entry itself: the index-value for dimension = axis is obtained from - the value of the corresponding entry in ``indices`` and the index-value - for dimension != axis is obtained from the index of the entry itself. - - ``reduction`` allows specification of an optional reduction operation, - which is applied to all values in ``updates`` tensor into ``output`` at - the specified ``indices``. In cases where ``reduction`` is set to - "none", indices should not have duplicate entries: that is, if idx1 != - idx2, then indices[idx1] != indices[idx2]. For instance, in a 2-D tensor - case, the update corresponding to the [i][j] entry is performed as - below: - - :: - - output[indices[i][j]][j] = updates[i][j] if axis = 0, - output[i][indices[i][j]] = updates[i][j] if axis = 1, - - When ``reduction`` is set to some reduction function ``f``, the update - corresponding to the [i][j] entry is performed as below: - - :: - - output[indices[i][j]][j] = f(output[indices[i][j]][j], updates[i][j]) if axis = 0, - output[i][indices[i][j]] = f(output[i][indices[i][j]], updates[i][j]) if axis = 1, - - where the ``f`` is ``+``, ``*``, ``max`` or ``min`` as specified. - - This operator is the inverse of GatherElements. It is similar to Torch's - Scatter operation. - - (Opset 18 change): Adds max/min to the set of allowed reduction ops. - - Example 1: - - :: - - data = [ - [0.0, 0.0, 0.0], - [0.0, 0.0, 0.0], - [0.0, 0.0, 0.0], - ] - indices = [ - [1, 0, 2], - [0, 2, 1], - ] - updates = [ - [1.0, 1.1, 1.2], - [2.0, 2.1, 2.2], - ] - output = [ - [2.0, 1.1, 0.0] - [1.0, 0.0, 2.2] - [0.0, 2.1, 1.2] - ] - - Example 2: - - :: - - data = [[1.0, 2.0, 3.0, 4.0, 5.0]] - indices = [[1, 3]] - updates = [[1.1, 2.1]] - axis = 1 - output = [[1.0, 1.1, 3.0, 2.1, 5.0]] - - Parameters - ========== - data - Type T. - Tensor of rank r >= 1. - indices - Type Tind. - Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index - values are expected to be within bounds [-s, s-1] along axis of size s. - It is an error if any of the index values are out of bounds. - updates - Type T. - Tensor of rank r >=1 (same rank and shape as indices) - axis - Attribute. - Which axis to scatter on. Negative value means counting dimensions from - the back. Accepted range is [-r, r-1] where r = rank(data). - reduction - Attribute. - Type of reduction to apply: none (default), add, mul, max, min. 'none': - no reduction applied. 'add': reduction using the addition operation. - 'mul': reduction using the multiplication operation.'max': reduction - using the maximum operation.'min': reduction using the minimum - operation. - - Returns - ======= - output : Var - Type T. - Tensor of rank r >= 1 (same rank as input). - - Notes - ===== - Signature: ``ai.onnx@18::ScatterElements``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - Tind: `tensor(int32)`, `tensor(int64)` +ScatterElements takes three inputs ``data``, ``updates``, and +``indices`` of the same rank r >= 1 and an optional attribute axis that +identifies an axis of ``data`` (by default, the outer-most axis, that is +axis 0). The output of the operation is produced by creating a copy of +the input ``data``, and then updating its value to values specified by +``updates`` at specific index positions specified by ``indices``. Its +output shape is the same as the shape of ``data``. + +For each entry in ``updates``, the target index in ``data`` is obtained +by combining the corresponding entry in ``indices`` with the index of +the entry itself: the index-value for dimension = axis is obtained from +the value of the corresponding entry in ``indices`` and the index-value +for dimension != axis is obtained from the index of the entry itself. + +``reduction`` allows specification of an optional reduction operation, +which is applied to all values in ``updates`` tensor into ``output`` at +the specified ``indices``. In cases where ``reduction`` is set to +"none", indices should not have duplicate entries: that is, if idx1 != +idx2, then indices[idx1] != indices[idx2]. For instance, in a 2-D tensor +case, the update corresponding to the [i][j] entry is performed as +below: + +:: + + output[indices[i][j]][j] = updates[i][j] if axis = 0, + output[i][indices[i][j]] = updates[i][j] if axis = 1, + +When ``reduction`` is set to some reduction function ``f``, the update +corresponding to the [i][j] entry is performed as below: + +:: + + output[indices[i][j]][j] = f(output[indices[i][j]][j], updates[i][j]) if axis = 0, + output[i][indices[i][j]] = f(output[i][indices[i][j]], updates[i][j]) if axis = 1, + +where the ``f`` is ``+``, ``*``, ``max`` or ``min`` as specified. + +This operator is the inverse of GatherElements. It is similar to Torch's +Scatter operation. + +(Opset 18 change): Adds max/min to the set of allowed reduction ops. + +Example 1: + +:: + + data = [ + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + ] + indices = [ + [1, 0, 2], + [0, 2, 1], + ] + updates = [ + [1.0, 1.1, 1.2], + [2.0, 2.1, 2.2], + ] + output = [ + [2.0, 1.1, 0.0] + [1.0, 0.0, 2.2] + [0.0, 2.1, 1.2] + ] + +Example 2: + +:: + + data = [[1.0, 2.0, 3.0, 4.0, 5.0]] + indices = [[1, 3]] + updates = [[1.1, 2.1]] + axis = 1 + output = [[1.0, 1.1, 3.0, 2.1, 5.0]] + +Parameters +========== +data + Type T. + Tensor of rank r >= 1. +indices + Type Tind. + Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index + values are expected to be within bounds [-s, s-1] along axis of size s. + It is an error if any of the index values are out of bounds. +updates + Type T. + Tensor of rank r >=1 (same rank and shape as indices) +axis + Attribute. + Which axis to scatter on. Negative value means counting dimensions from + the back. Accepted range is [-r, r-1] where r = rank(data). +reduction + Attribute. + Type of reduction to apply: none (default), add, mul, max, min. 'none': + no reduction applied. 'add': reduction using the addition operation. + 'mul': reduction using the multiplication operation.'max': reduction + using the maximum operation.'min': reduction using the minimum + operation. + +Returns +======= +output : Var + Type T. + Tensor of rank r >= 1 (same rank as input). + +Notes +===== +Signature: ``ai.onnx@18::ScatterElements``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - Tind: `tensor(int32)`, `tensor(int64)` """ - return ( - _ScatterElements( - _ScatterElements.Attributes( - axis=AttrInt64(axis, name="axis"), - reduction=AttrString(reduction, name="reduction"), - ), - _ScatterElements.Inputs( - data=unwrap_vars(data), - indices=unwrap_vars(indices), - updates=unwrap_vars(updates), - ), - ) - .get_output_vars( - data=get_value(data), - indices=get_value(indices), - updates=get_value(updates), - ) - .output - ) - - -def scatter_nd( - data: Var, - indices: Var, - updates: Var, - *, - reduction: str = "none", -) -> Var: + return _ScatterElements( + _ScatterElements.Attributes( + axis=AttrInt64(axis, name="axis"), + reduction=AttrString(reduction, name="reduction"), + ), _ScatterElements.Inputs( + data=unwrap_vars(data), indices=unwrap_vars(indices), updates=unwrap_vars(updates), ), ).get_output_vars( + data=get_value(data), indices=get_value(indices), updates=get_value(updates), ).output + + +def scatter_nd(data: Var, indices: Var, updates: Var, *, reduction: str = "none", ) -> Var: r""" - ScatterND takes three inputs ``data`` tensor of rank r >= 1, ``indices`` - tensor of rank q >= 1, and ``updates`` tensor of rank q + r - - indices.shape[-1] - 1. The output of the operation is produced by - creating a copy of the input ``data``, and then updating its value to - values specified by ``updates`` at specific index positions specified by - ``indices``. Its output shape is the same as the shape of ``data``. - - ``indices`` is an integer tensor. Let k denote indices.shape[-1], the - last dimension in the shape of ``indices``. ``indices`` is treated as a - (q-1)-dimensional tensor of k-tuples, where each k-tuple is a - partial-index into ``data``. Hence, k can be a value at most the rank of - ``data``. When k equals rank(data), each update entry specifies an - update to a single element of the tensor. When k is less than rank(data) - each update entry specifies an update to a slice of the tensor. Index - values are allowed to be negative, as per the usual convention for - counting backwards from the end, but are expected in the valid range. - - ``updates`` is treated as a (q-1)-dimensional tensor of - replacement-slice-values. Thus, the first (q-1) dimensions of - updates.shape must match the first (q-1) dimensions of indices.shape. - The remaining dimensions of ``updates`` correspond to the dimensions of - the replacement-slice-values. Each replacement-slice-value is a (r-k) - dimensional tensor, corresponding to the trailing (r-k) dimensions of - ``data``. Thus, the shape of ``updates`` must equal indices.shape[0:q-1] - ++ data.shape[k:r-1], where ++ denotes the concatenation of shapes. - - The ``output`` is calculated via the following equation: - - :: - - output = np.copy(data) - update_indices = indices.shape[:-1] - for idx in np.ndindex(update_indices): - output[indices[idx]] = updates[idx] - - The order of iteration in the above loop is not specified. In - particular, indices should not have duplicate entries: that is, if idx1 - != idx2, then indices[idx1] != indices[idx2]. This ensures that the - output value does not depend on the iteration order. - - ``reduction`` allows specification of an optional reduction operation, - which is applied to all values in ``updates`` tensor into ``output`` at - the specified ``indices``. In cases where ``reduction`` is set to - "none", indices should not have duplicate entries: that is, if idx1 != - idx2, then indices[idx1] != indices[idx2]. This ensures that the output - value does not depend on the iteration order. When ``reduction`` is set - to some reduction function ``f``, ``output`` is calculated as follows: - - :: - - output = np.copy(data) - update_indices = indices.shape[:-1] - for idx in np.ndindex(update_indices): - output[indices[idx]] = f(output[indices[idx]], updates[idx]) - - where the ``f`` is ``+``, ``*``, ``max`` or ``min`` as specified. - - This operator is the inverse of GatherND. - - (Opset 18 change): Adds max/min to the set of allowed reduction ops. - - Example 1: - - :: - - data = [1, 2, 3, 4, 5, 6, 7, 8] - indices = [[4], [3], [1], [7]] - updates = [9, 10, 11, 12] - output = [1, 11, 3, 10, 9, 6, 7, 12] - - Example 2: - - :: - - data = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], - [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], - [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]], - [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] - indices = [[0], [2]] - updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]] - output = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], - [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]], - [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] - - Parameters - ========== - data - Type T. - Tensor of rank r >= 1. - indices - Type tensor(int64). - Tensor of rank q >= 1. - updates - Type T. - Tensor of rank q + r - indices_shape[-1] - 1. - reduction - Attribute. - Type of reduction to apply: none (default), add, mul, max, min. 'none': - no reduction applied. 'add': reduction using the addition operation. - 'mul': reduction using the addition operation. 'max': reduction using - the maximum operation.'min': reduction using the minimum operation. - - Returns - ======= - output : Var - Type T. - Tensor of rank r >= 1. - - Notes - ===== - Signature: ``ai.onnx@18::ScatterND``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +ScatterND takes three inputs ``data`` tensor of rank r >= 1, ``indices`` +tensor of rank q >= 1, and ``updates`` tensor of rank q + r - +indices.shape[-1] - 1. The output of the operation is produced by +creating a copy of the input ``data``, and then updating its value to +values specified by ``updates`` at specific index positions specified by +``indices``. Its output shape is the same as the shape of ``data``. + +``indices`` is an integer tensor. Let k denote indices.shape[-1], the +last dimension in the shape of ``indices``. ``indices`` is treated as a +(q-1)-dimensional tensor of k-tuples, where each k-tuple is a +partial-index into ``data``. Hence, k can be a value at most the rank of +``data``. When k equals rank(data), each update entry specifies an +update to a single element of the tensor. When k is less than rank(data) +each update entry specifies an update to a slice of the tensor. Index +values are allowed to be negative, as per the usual convention for +counting backwards from the end, but are expected in the valid range. + +``updates`` is treated as a (q-1)-dimensional tensor of +replacement-slice-values. Thus, the first (q-1) dimensions of +updates.shape must match the first (q-1) dimensions of indices.shape. +The remaining dimensions of ``updates`` correspond to the dimensions of +the replacement-slice-values. Each replacement-slice-value is a (r-k) +dimensional tensor, corresponding to the trailing (r-k) dimensions of +``data``. Thus, the shape of ``updates`` must equal indices.shape[0:q-1] +++ data.shape[k:r-1], where ++ denotes the concatenation of shapes. + +The ``output`` is calculated via the following equation: + +:: + + output = np.copy(data) + update_indices = indices.shape[:-1] + for idx in np.ndindex(update_indices): + output[indices[idx]] = updates[idx] + +The order of iteration in the above loop is not specified. In +particular, indices should not have duplicate entries: that is, if idx1 +!= idx2, then indices[idx1] != indices[idx2]. This ensures that the +output value does not depend on the iteration order. + +``reduction`` allows specification of an optional reduction operation, +which is applied to all values in ``updates`` tensor into ``output`` at +the specified ``indices``. In cases where ``reduction`` is set to +"none", indices should not have duplicate entries: that is, if idx1 != +idx2, then indices[idx1] != indices[idx2]. This ensures that the output +value does not depend on the iteration order. When ``reduction`` is set +to some reduction function ``f``, ``output`` is calculated as follows: + +:: + + output = np.copy(data) + update_indices = indices.shape[:-1] + for idx in np.ndindex(update_indices): + output[indices[idx]] = f(output[indices[idx]], updates[idx]) + +where the ``f`` is ``+``, ``*``, ``max`` or ``min`` as specified. + +This operator is the inverse of GatherND. + +(Opset 18 change): Adds max/min to the set of allowed reduction ops. + +Example 1: + +:: + + data = [1, 2, 3, 4, 5, 6, 7, 8] + indices = [[4], [3], [1], [7]] + updates = [9, 10, 11, 12] + output = [1, 11, 3, 10, 9, 6, 7, 12] + +Example 2: + +:: + + data = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] + indices = [[0], [2]] + updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]] + output = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], + [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]], + [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] + +Parameters +========== +data + Type T. + Tensor of rank r >= 1. +indices + Type tensor(int64). + Tensor of rank q >= 1. +updates + Type T. + Tensor of rank q + r - indices_shape[-1] - 1. +reduction + Attribute. + Type of reduction to apply: none (default), add, mul, max, min. 'none': + no reduction applied. 'add': reduction using the addition operation. + 'mul': reduction using the addition operation. 'max': reduction using + the maximum operation.'min': reduction using the minimum operation. + +Returns +======= +output : Var + Type T. + Tensor of rank r >= 1. + +Notes +===== +Signature: ``ai.onnx@18::ScatterND``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _ScatterND( - _ScatterND.Attributes( - reduction=AttrString(reduction, name="reduction"), - ), - _ScatterND.Inputs( - data=unwrap_vars(data), - indices=unwrap_vars(indices), - updates=unwrap_vars(updates), - ), - ) - .get_output_vars( - data=get_value(data), - indices=get_value(indices), - updates=get_value(updates), - ) - .output - ) - - -def split( - input: Var, - split: Optional[Var] = None, - *, - axis: int = 0, - num_outputs: Optional[int] = None, -) -> Sequence[Var]: + return _ScatterND( + _ScatterND.Attributes( + reduction=AttrString(reduction, name="reduction"), + ), _ScatterND.Inputs( + data=unwrap_vars(data), indices=unwrap_vars(indices), updates=unwrap_vars(updates), ), ).get_output_vars( + data=get_value(data), indices=get_value(indices), updates=get_value(updates), ).output + + +def split(input: Var, split: Optional[Var] = None, *, axis: int = 0, num_outputs: Optional[int] = None, ) -> Sequence[Var]: r""" - Split a tensor into a list of tensors, along the specified 'axis'. - Either input 'split' or the attribute 'num_outputs' should be specified, - but not both. If the attribute 'num_outputs' is specified, then the - tensor is split into equal sized parts. If the tensor is not evenly - splittable into ``num_outputs``, the last chunk will be smaller. If the - input 'split' is specified, it indicates the sizes of each output in the - split. - - Parameters - ========== - input - Type T. - The tensor to split - split - Type tensor(int64). - Optional length of each output. Values should be >= 0.Sum of the values - must be equal to the dim value at 'axis' specified. - axis - Attribute. - Which axis to split on. A negative value means counting dimensions from - the back. Accepted range is [-rank, rank-1] where r = rank(input). - num_outputs - Attribute. - Number of outputs to split parts of the tensor into. If the tensor is - not evenly splittable the last chunk will be smaller. - - Returns - ======= - outputs : Sequence[Var] - Type T. - One or more outputs forming list of tensors after splitting - - Notes - ===== - Signature: ``ai.onnx@18::Split``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +Split a tensor into a list of tensors, along the specified 'axis'. +Either input 'split' or the attribute 'num_outputs' should be specified, +but not both. If the attribute 'num_outputs' is specified, then the +tensor is split into equal sized parts. If the tensor is not evenly +splittable into ``num_outputs``, the last chunk will be smaller. If the +input 'split' is specified, it indicates the sizes of each output in the +split. + +Parameters +========== +input + Type T. + The tensor to split +split + Type tensor(int64). + Optional length of each output. Values should be >= 0.Sum of the values + must be equal to the dim value at 'axis' specified. +axis + Attribute. + Which axis to split on. A negative value means counting dimensions from + the back. Accepted range is [-rank, rank-1] where r = rank(input). +num_outputs + Attribute. + Number of outputs to split parts of the tensor into. If the tensor is + not evenly splittable the last chunk will be smaller. + +Returns +======= +outputs : Sequence[Var] + Type T. + One or more outputs forming list of tensors after splitting + +Notes +===== +Signature: ``ai.onnx@18::Split``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _Split( - _Split.Attributes( - axis=AttrInt64(axis, name="axis"), - num_outputs=AttrInt64.maybe(num_outputs, name="num_outputs"), - ), - _Split.Inputs( - input=unwrap_vars(input), - split=unwrap_vars(split), - ), - out_variadic=num_outputs, - ) - .get_output_vars( - input=get_value(input), - split=get_value(split), - ) - .outputs - ) + return _Split( + _Split.Attributes( + axis=AttrInt64(axis, name="axis"), + num_outputs=AttrInt64.maybe(num_outputs, name="num_outputs"), + ), _Split.Inputs( + input=unwrap_vars(input), split=unwrap_vars(split), ), out_variadic=num_outputs, ).get_output_vars( + input=get_value(input), split=get_value(split), ).outputs def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: @@ -3381,4 +2791,4 @@ def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: "Xor": xor, } -__all__ = [fun.__name__ for fun in _CONSTRUCTORS.values()] + ["const"] +__all__ = [fun.__name__ for fun in _CONSTRUCTORS.values()] + ["const"] diff --git a/src/spox/opset/ai/onnx/v19.py b/src/spox/opset/ai/onnx/v19.py index 8fc23f6..c8119fd 100644 --- a/src/spox/opset/ai/onnx/v19.py +++ b/src/spox/opset/ai/onnx/v19.py @@ -1,18 +1,21 @@ -# Copyright (c) QuantCo 2023-2024 -# SPDX-License-Identifier: BSD-3-Clause - # ruff: noqa: E741 -- Allow ambiguous variable name -from collections.abc import Iterable, Sequence +import typing +import warnings from dataclasses import dataclass +from collections.abc import Iterable, Sequence from typing import ( + Any, Callable, Optional, + Union, ) from typing import cast as typing_cast import numpy as np import numpy.typing as npt +from spox._var import Var, VarInfo, result_type, unwrap_vars, get_value +from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._attributes import ( AttrDtype, AttrFloat32, @@ -23,354 +26,184 @@ AttrString, AttrStrings, AttrTensor, + AttrType, ) -from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._graph import Graph, subgraph +from spox._internal_op import intro from spox._node import OpType -from spox._standard import StandardNode -from spox._type_system import Tensor, Type +from spox._standard import InferenceError, StandardNode +from spox._type_system import Tensor, Type, Sequence as SpoxSequence from spox._value_prop import PropValueType -from spox._var import Var, VarInfo, get_value, unwrap_vars -from spox.opset.ai.onnx.v18 import ( - _DFT, - _GRU, - _LRN, - _LSTM, - _RNN, - _STFT, - _Abs, - _Acos, - _Acosh, - _Add, - _And, - _ArgMax, - _ArgMin, - _Asin, - _Asinh, - _Atan, - _Atanh, - _BatchNormalization, - _Bernoulli, - _BitShift, - _BitwiseAnd, - _BitwiseNot, - _BitwiseOr, - _BitwiseXor, - _BlackmanWindow, - _Ceil, - _Celu, - _CenterCropPad, - _Clip, - _Col2Im, - _Compress, - _Concat, - _ConcatFromSequence, - _ConstantOfShape, - _Conv, - _ConvInteger, - _ConvTranspose, - _Cos, - _Cosh, - _CumSum, - _DepthToSpace, - _Det, - _Div, - _Dropout, - _DynamicQuantizeLinear, - _Einsum, - _Elu, - _Erf, - _Exp, - _Expand, - _EyeLike, - _Flatten, - _Floor, - _Gather, - _GatherElements, - _GatherND, - _Gemm, - _GlobalAveragePool, - _GlobalLpPool, - _GlobalMaxPool, - _Greater, - _GreaterOrEqual, - _GridSample, - _GroupNormalization, - _HammingWindow, - _HannWindow, - _Hardmax, - _HardSigmoid, - _HardSwish, - _InstanceNormalization, - _IsInf, - _IsNaN, - _LayerNormalization, - _LeakyRelu, - _Less, - _LessOrEqual, - _Log, - _LogSoftmax, - _LpNormalization, - _LpPool, - _MatMul, - _MatMulInteger, - _Max, - _MaxPool, - _MaxRoiPool, - _MaxUnpool, - _Mean, - _MeanVarianceNormalization, - _MelWeightMatrix, - _Min, - _Mish, - _Mod, - _Mul, - _Multinomial, - _Neg, - _NegativeLogLikelihoodLoss, - _NonMaxSuppression, - _NonZero, - _Not, - _OneHot, - _Optional, - _OptionalGetElement, - _OptionalHasElement, - _Or, - _Pow, - _PRelu, - _QLinearConv, - _QLinearMatMul, - _RandomNormal, - _RandomNormalLike, - _RandomUniform, - _RandomUniformLike, - _Range, - _Reciprocal, - _ReduceL1, - _ReduceL2, - _ReduceLogSum, - _ReduceLogSumExp, - _ReduceMax, - _ReduceMean, - _ReduceMin, - _ReduceProd, - _ReduceSum, - _ReduceSumSquare, - _Relu, - _ReverseSequence, - _RoiAlign, - _Round, - _ScatterElements, - _ScatterND, - _Selu, - _SequenceAt, - _SequenceConstruct, - _SequenceEmpty, - _SequenceErase, - _SequenceInsert, - _SequenceLength, - _SequenceMap, - _Shrink, - _Sigmoid, - _Sign, - _Sin, - _Sinh, - _Slice, - _Softmax, - _SoftmaxCrossEntropyLoss, - _Softplus, - _Softsign, - _SpaceToDepth, - _Split, - _SplitToSequence, - _Sqrt, - _Squeeze, - _StringNormalizer, - _Sub, - _Sum, - _Tan, - _Tanh, - _TfIdfVectorizer, - _ThresholdedRelu, - _Tile, - _TopK, - _Transpose, - _Trilu, - _Unique, - _Unsqueeze, - _Where, - _Xor, - abs, - acos, - acosh, - add, - and_, - arg_max, - arg_min, - asin, - asinh, - atan, - atanh, - batch_normalization, - bernoulli, - bit_shift, - bitwise_and, - bitwise_not, - bitwise_or, - bitwise_xor, - blackman_window, - ceil, - celu, - center_crop_pad, - clip, - col2_im, - compress, - concat, - concat_from_sequence, - constant_of_shape, - conv, - conv_integer, - conv_transpose, - cos, - cosh, - cumsum, - depth_to_space, - det, - dft, - div, - dropout, - dynamic_quantize_linear, - einsum, - elu, - erf, - exp, - expand, - eye_like, - flatten, - floor, - gather, - gather_elements, - gather_nd, - gemm, - global_average_pool, - global_lp_pool, - global_max_pool, - greater, - greater_or_equal, - grid_sample, - group_normalization, - gru, - hamming_window, - hann_window, - hard_sigmoid, - hard_swish, - hardmax, - instance_normalization, - isinf, - isnan, - layer_normalization, - leaky_relu, - less, - less_or_equal, - log, - log_softmax, - lp_normalization, - lp_pool, - lrn, - lstm, - matmul, - matmul_integer, - max, - max_pool, - max_roi_pool, - max_unpool, - mean, - mean_variance_normalization, - mel_weight_matrix, - min, - mish, - mod, - mul, - multinomial, - neg, - negative_log_likelihood_loss, - non_max_suppression, - non_zero, - not_, - one_hot, - optional, - optional_get_element, - optional_has_element, - or_, - pow, - prelu, - qlinear_conv, - qlinear_matmul, - random_normal, - random_normal_like, - random_uniform, - random_uniform_like, - range, - reciprocal, - reduce_l1, - reduce_l2, - reduce_log_sum, - reduce_log_sum_exp, - reduce_max, - reduce_mean, - reduce_min, - reduce_prod, - reduce_sum, - reduce_sum_square, - relu, - reverse_sequence, - rnn, - roi_align, - round, - scatter_elements, - scatter_nd, - selu, - sequence_at, - sequence_construct, - sequence_empty, - sequence_erase, - sequence_insert, - sequence_length, - sequence_map, - shrink, - sigmoid, - sign, - sin, - sinh, - slice, - softmax, - softmax_cross_entropy_loss, - softplus, - softsign, - space_to_depth, - split, - split_to_sequence, - sqrt, - squeeze, - stft, - string_normalizer, - sub, - sum, - tan, - tanh, - tf_idf_vectorizer, - thresholded_relu, - tile, - top_k, - transpose, - trilu, - unique, - unsqueeze, - where, - xor, -) +from spox.opset.ai.onnx.v18 import _Abs, abs +from spox.opset.ai.onnx.v18 import _Acos, acos +from spox.opset.ai.onnx.v18 import _Acosh, acosh +from spox.opset.ai.onnx.v18 import _Add, add +from spox.opset.ai.onnx.v18 import _And, and_ +from spox.opset.ai.onnx.v18 import _ArgMax, arg_max +from spox.opset.ai.onnx.v18 import _ArgMin, arg_min +from spox.opset.ai.onnx.v18 import _Asin, asin +from spox.opset.ai.onnx.v18 import _Asinh, asinh +from spox.opset.ai.onnx.v18 import _Atan, atan +from spox.opset.ai.onnx.v18 import _Atanh, atanh +from spox.opset.ai.onnx.v18 import _BatchNormalization, batch_normalization +from spox.opset.ai.onnx.v18 import _Bernoulli, bernoulli +from spox.opset.ai.onnx.v18 import _BitShift, bit_shift +from spox.opset.ai.onnx.v18 import _BitwiseAnd, bitwise_and +from spox.opset.ai.onnx.v18 import _BitwiseNot, bitwise_not +from spox.opset.ai.onnx.v18 import _BitwiseOr, bitwise_or +from spox.opset.ai.onnx.v18 import _BitwiseXor, bitwise_xor +from spox.opset.ai.onnx.v18 import _BlackmanWindow, blackman_window +from spox.opset.ai.onnx.v18 import _Ceil, ceil +from spox.opset.ai.onnx.v18 import _Celu, celu +from spox.opset.ai.onnx.v18 import _CenterCropPad, center_crop_pad +from spox.opset.ai.onnx.v18 import _Clip, clip +from spox.opset.ai.onnx.v18 import _Col2Im, col2_im +from spox.opset.ai.onnx.v18 import _Compress, compress +from spox.opset.ai.onnx.v18 import _Concat, concat +from spox.opset.ai.onnx.v18 import _ConcatFromSequence, concat_from_sequence +from spox.opset.ai.onnx.v18 import _ConstantOfShape, constant_of_shape +from spox.opset.ai.onnx.v18 import _Conv, conv +from spox.opset.ai.onnx.v18 import _ConvInteger, conv_integer +from spox.opset.ai.onnx.v18 import _ConvTranspose, conv_transpose +from spox.opset.ai.onnx.v18 import _Cos, cos +from spox.opset.ai.onnx.v18 import _Cosh, cosh +from spox.opset.ai.onnx.v18 import _CumSum, cumsum +from spox.opset.ai.onnx.v18 import _DFT, dft +from spox.opset.ai.onnx.v18 import _DepthToSpace, depth_to_space +from spox.opset.ai.onnx.v18 import _Det, det +from spox.opset.ai.onnx.v18 import _Div, div +from spox.opset.ai.onnx.v18 import _Dropout, dropout +from spox.opset.ai.onnx.v18 import _DynamicQuantizeLinear, dynamic_quantize_linear +from spox.opset.ai.onnx.v18 import _Einsum, einsum +from spox.opset.ai.onnx.v18 import _Elu, elu +from spox.opset.ai.onnx.v18 import _Erf, erf +from spox.opset.ai.onnx.v18 import _Exp, exp +from spox.opset.ai.onnx.v18 import _Expand, expand +from spox.opset.ai.onnx.v18 import _EyeLike, eye_like +from spox.opset.ai.onnx.v18 import _Flatten, flatten +from spox.opset.ai.onnx.v18 import _Floor, floor +from spox.opset.ai.onnx.v18 import _GRU, gru +from spox.opset.ai.onnx.v18 import _Gather, gather +from spox.opset.ai.onnx.v18 import _GatherElements, gather_elements +from spox.opset.ai.onnx.v18 import _GatherND, gather_nd +from spox.opset.ai.onnx.v18 import _Gemm, gemm +from spox.opset.ai.onnx.v18 import _GlobalAveragePool, global_average_pool +from spox.opset.ai.onnx.v18 import _GlobalLpPool, global_lp_pool +from spox.opset.ai.onnx.v18 import _GlobalMaxPool, global_max_pool +from spox.opset.ai.onnx.v18 import _Greater, greater +from spox.opset.ai.onnx.v18 import _GreaterOrEqual, greater_or_equal +from spox.opset.ai.onnx.v18 import _GridSample, grid_sample +from spox.opset.ai.onnx.v18 import _GroupNormalization, group_normalization +from spox.opset.ai.onnx.v18 import _HammingWindow, hamming_window +from spox.opset.ai.onnx.v18 import _HannWindow, hann_window +from spox.opset.ai.onnx.v18 import _HardSigmoid, hard_sigmoid +from spox.opset.ai.onnx.v18 import _HardSwish, hard_swish +from spox.opset.ai.onnx.v18 import _Hardmax, hardmax +from spox.opset.ai.onnx.v18 import _InstanceNormalization, instance_normalization +from spox.opset.ai.onnx.v18 import _IsInf, isinf +from spox.opset.ai.onnx.v18 import _IsNaN, isnan +from spox.opset.ai.onnx.v18 import _LRN, lrn +from spox.opset.ai.onnx.v18 import _LSTM, lstm +from spox.opset.ai.onnx.v18 import _LayerNormalization, layer_normalization +from spox.opset.ai.onnx.v18 import _LeakyRelu, leaky_relu +from spox.opset.ai.onnx.v18 import _Less, less +from spox.opset.ai.onnx.v18 import _LessOrEqual, less_or_equal +from spox.opset.ai.onnx.v18 import _Log, log +from spox.opset.ai.onnx.v18 import _LogSoftmax, log_softmax +from spox.opset.ai.onnx.v18 import _LpNormalization, lp_normalization +from spox.opset.ai.onnx.v18 import _LpPool, lp_pool +from spox.opset.ai.onnx.v18 import _MatMul, matmul +from spox.opset.ai.onnx.v18 import _MatMulInteger, matmul_integer +from spox.opset.ai.onnx.v18 import _Max, max +from spox.opset.ai.onnx.v18 import _MaxPool, max_pool +from spox.opset.ai.onnx.v18 import _MaxRoiPool, max_roi_pool +from spox.opset.ai.onnx.v18 import _MaxUnpool, max_unpool +from spox.opset.ai.onnx.v18 import _Mean, mean +from spox.opset.ai.onnx.v18 import _MeanVarianceNormalization, mean_variance_normalization +from spox.opset.ai.onnx.v18 import _MelWeightMatrix, mel_weight_matrix +from spox.opset.ai.onnx.v18 import _Min, min +from spox.opset.ai.onnx.v18 import _Mish, mish +from spox.opset.ai.onnx.v18 import _Mod, mod +from spox.opset.ai.onnx.v18 import _Mul, mul +from spox.opset.ai.onnx.v18 import _Multinomial, multinomial +from spox.opset.ai.onnx.v18 import _Neg, neg +from spox.opset.ai.onnx.v18 import _NegativeLogLikelihoodLoss, negative_log_likelihood_loss +from spox.opset.ai.onnx.v18 import _NonMaxSuppression, non_max_suppression +from spox.opset.ai.onnx.v18 import _NonZero, non_zero +from spox.opset.ai.onnx.v18 import _Not, not_ +from spox.opset.ai.onnx.v18 import _OneHot, one_hot +from spox.opset.ai.onnx.v18 import _Optional, optional +from spox.opset.ai.onnx.v18 import _OptionalGetElement, optional_get_element +from spox.opset.ai.onnx.v18 import _OptionalHasElement, optional_has_element +from spox.opset.ai.onnx.v18 import _Or, or_ +from spox.opset.ai.onnx.v18 import _PRelu, prelu +from spox.opset.ai.onnx.v18 import _Pow, pow +from spox.opset.ai.onnx.v18 import _QLinearConv, qlinear_conv +from spox.opset.ai.onnx.v18 import _QLinearMatMul, qlinear_matmul +from spox.opset.ai.onnx.v18 import _RNN, rnn +from spox.opset.ai.onnx.v18 import _RandomNormal, random_normal +from spox.opset.ai.onnx.v18 import _RandomNormalLike, random_normal_like +from spox.opset.ai.onnx.v18 import _RandomUniform, random_uniform +from spox.opset.ai.onnx.v18 import _RandomUniformLike, random_uniform_like +from spox.opset.ai.onnx.v18 import _Range, range +from spox.opset.ai.onnx.v18 import _Reciprocal, reciprocal +from spox.opset.ai.onnx.v18 import _ReduceL1, reduce_l1 +from spox.opset.ai.onnx.v18 import _ReduceL2, reduce_l2 +from spox.opset.ai.onnx.v18 import _ReduceLogSum, reduce_log_sum +from spox.opset.ai.onnx.v18 import _ReduceLogSumExp, reduce_log_sum_exp +from spox.opset.ai.onnx.v18 import _ReduceMax, reduce_max +from spox.opset.ai.onnx.v18 import _ReduceMean, reduce_mean +from spox.opset.ai.onnx.v18 import _ReduceMin, reduce_min +from spox.opset.ai.onnx.v18 import _ReduceProd, reduce_prod +from spox.opset.ai.onnx.v18 import _ReduceSum, reduce_sum +from spox.opset.ai.onnx.v18 import _ReduceSumSquare, reduce_sum_square +from spox.opset.ai.onnx.v18 import _Relu, relu +from spox.opset.ai.onnx.v18 import _ReverseSequence, reverse_sequence +from spox.opset.ai.onnx.v18 import _RoiAlign, roi_align +from spox.opset.ai.onnx.v18 import _Round, round +from spox.opset.ai.onnx.v18 import _STFT, stft +from spox.opset.ai.onnx.v18 import _ScatterElements, scatter_elements +from spox.opset.ai.onnx.v18 import _ScatterND, scatter_nd +from spox.opset.ai.onnx.v18 import _Selu, selu +from spox.opset.ai.onnx.v18 import _SequenceAt, sequence_at +from spox.opset.ai.onnx.v18 import _SequenceConstruct, sequence_construct +from spox.opset.ai.onnx.v18 import _SequenceEmpty, sequence_empty +from spox.opset.ai.onnx.v18 import _SequenceErase, sequence_erase +from spox.opset.ai.onnx.v18 import _SequenceInsert, sequence_insert +from spox.opset.ai.onnx.v18 import _SequenceLength, sequence_length +from spox.opset.ai.onnx.v18 import _SequenceMap, sequence_map +from spox.opset.ai.onnx.v18 import _Shrink, shrink +from spox.opset.ai.onnx.v18 import _Sigmoid, sigmoid +from spox.opset.ai.onnx.v18 import _Sign, sign +from spox.opset.ai.onnx.v18 import _Sin, sin +from spox.opset.ai.onnx.v18 import _Sinh, sinh +from spox.opset.ai.onnx.v18 import _Slice, slice +from spox.opset.ai.onnx.v18 import _Softmax, softmax +from spox.opset.ai.onnx.v18 import _SoftmaxCrossEntropyLoss, softmax_cross_entropy_loss +from spox.opset.ai.onnx.v18 import _Softplus, softplus +from spox.opset.ai.onnx.v18 import _Softsign, softsign +from spox.opset.ai.onnx.v18 import _SpaceToDepth, space_to_depth +from spox.opset.ai.onnx.v18 import _Split, split +from spox.opset.ai.onnx.v18 import _SplitToSequence, split_to_sequence +from spox.opset.ai.onnx.v18 import _Sqrt, sqrt +from spox.opset.ai.onnx.v18 import _Squeeze, squeeze +from spox.opset.ai.onnx.v18 import _StringNormalizer, string_normalizer +from spox.opset.ai.onnx.v18 import _Sub, sub +from spox.opset.ai.onnx.v18 import _Sum, sum +from spox.opset.ai.onnx.v18 import _Tan, tan +from spox.opset.ai.onnx.v18 import _Tanh, tanh +from spox.opset.ai.onnx.v18 import _TfIdfVectorizer, tf_idf_vectorizer +from spox.opset.ai.onnx.v18 import _ThresholdedRelu, thresholded_relu +from spox.opset.ai.onnx.v18 import _Tile, tile +from spox.opset.ai.onnx.v18 import _TopK, top_k +from spox.opset.ai.onnx.v18 import _Transpose, transpose +from spox.opset.ai.onnx.v18 import _Trilu, trilu +from spox.opset.ai.onnx.v18 import _Unique, unique +from spox.opset.ai.onnx.v18 import _Unsqueeze, unsqueeze +from spox.opset.ai.onnx.v18 import _Where, where +from spox.opset.ai.onnx.v18 import _Xor, xor class _AveragePool(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -396,7 +229,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Cast(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -417,7 +249,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _CastLike(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -438,7 +269,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Constant(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -457,9 +287,7 @@ class Outputs(BaseOutputs): output: VarInfo def propagate_values(self, initializers) -> dict[str, PropValueType]: - ((key, raw),) = ( - (k, v.value) for k, v in self.attrs.get_fields().items() if v is not None - ) + ((key, raw),) = ((k, v.value) for k, v in self.attrs.get_fields().items() if v is not None) if key == "value": value = raw elif key == "value_float": @@ -477,18 +305,14 @@ def propagate_values(self, initializers) -> dict[str, PropValueType]: elif key == "sparse_value": return {} else: - raise RuntimeError( - f"Could not extract the set Constant value attribute, got: {key}" - ) + raise RuntimeError(f"Could not extract the set Constant value attribute, got: {key}") return {"output": value} - op_type = OpType("Constant", "", 19) attrs: Attributes inputs: BaseInputs outputs: Outputs - class _DeformConv(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -517,7 +341,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _DequantizeLinear(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -539,7 +362,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Equal(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -560,7 +382,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Identity(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -580,7 +401,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _If(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -601,7 +421,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Loop(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -623,7 +442,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Pad(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -646,7 +464,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _QuantizeLinear(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -669,7 +486,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Reshape(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -690,7 +506,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Resize(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -721,7 +536,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Scan(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -746,7 +560,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Shape(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -767,7 +580,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Size(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -787,1964 +599,1655 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - -def average_pool( - X: Var, - *, - auto_pad: str = "NOTSET", - ceil_mode: int = 0, - count_include_pad: int = 0, - dilations: Optional[Iterable[int]] = None, - kernel_shape: Iterable[int], - pads: Optional[Iterable[int]] = None, - strides: Optional[Iterable[int]] = None, -) -> Var: +def average_pool(X: Var, *, auto_pad: str = "NOTSET", ceil_mode: int = 0, count_include_pad: int = 0, dilations: Optional[Iterable[int]] = None, kernel_shape: Iterable[int], pads: Optional[Iterable[int]] = None, strides: Optional[Iterable[int]] = None, ) -> Var: r""" - AveragePool consumes an input tensor X and applies average pooling - across the tensor according to kernel sizes, stride sizes, and pad - lengths. average pooling consisting of computing the average on all - values of a subset of the input tensor according to the kernel size and - downsampling the data into the output tensor Y for further processing. - The output spatial shape is calculated differently depending on whether - explicit padding is used, where pads is employed, or auto padding is - used, where auto_pad is utilized. With explicit padding - (https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html?highlight=maxpool#torch.nn.MaxPool2d): - - :: - - output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1) - - or - - :: - - output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1) - - if ceil_mode is enabled. ``pad_shape[i]`` is the sum of pads along axis - ``i``. Sliding windows that would start in the right padded region are - ignored. - - ``auto_pad`` is a DEPRECATED attribute. If you are using them currently, - the output spatial shape will be following when ceil_mode is enabled: - - :: - - VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i]) - SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) - - or when ceil_mode is disabled - (https://www.tensorflow.org/api_docs/python/tf/keras/layers/AveragePooling2D): - - :: - - VALID: output_spatial_shape[i] = floor((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i]) + 1 - SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = floor((input_spatial_shape[i] - 1) / strides_spatial_shape[i]) + 1 - - And pad shape will be following if ``SAME_UPPER`` or ``SAME_LOWER``: - - :: - - pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i] - - The output of each pooling window is divided by the number of elements - (exclude pad when attribute count_include_pad is zero). - - Parameters - ========== - X - Type T. - Input data tensor from the previous operator; dimensions for image case - are (N x C x H x W), where N is the batch size, C is the number of - channels, and H and W are the height and the width of the data. For non - image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), - where N is the batch size. Optionally, if dimension denotation is in - effect, the operation expects the input data tensor to arrive with the - dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, - DATA_FEATURE ...]. - auto_pad - Attribute. - auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where - default value is NOTSET, which means explicit padding is used. - SAME_UPPER or SAME_LOWER mean pad the input so that - ``output_shape[i] = ceil(input_shape[i] / strides[i])`` for each axis - ``i``. The padding is split between the two sides equally or almost - equally (depending on whether it is even or odd). In case the padding is - an odd number, the extra padding is added at the end for SAME_UPPER and - at the beginning for SAME_LOWER. - ceil_mode - Attribute. - Whether to use ceil or floor (default) to compute the output shape. - count_include_pad - Attribute. - Whether include pad pixels when calculating values for the edges. - Default is 0, doesn't count include pad. - dilations - Attribute. - Dilation value along each spatial axis of filter. If not present, the - dilation defaults to 1 along each spatial axis. - kernel_shape - Attribute. - The size of the kernel along each axis. - pads - Attribute. - Padding for the beginning and ending along each spatial axis, it can - take any value greater than or equal to 0. The value represent the - number of pixels added to the beginning and end part of the - corresponding axis. ``pads`` format should be as follow [x1_begin, - x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels - added at the beginning of axis ``i`` and xi_end, the number of pixels - added at the end of axis ``i``. This attribute cannot be used - simultaneously with auto_pad attribute. If not present, the padding - defaults to 0 along start and end of each spatial axis. - strides - Attribute. - Stride along each spatial axis. If not present, the stride defaults to 1 - along each spatial axis. - - Returns - ======= - Y : Var - Type T. - Output data tensor from average or max pooling across the input tensor. - Dimensions will vary based on various kernel, stride, and pad sizes. - Floor value of the dimension is used - - Notes - ===== - Signature: ``ai.onnx@19::AveragePool``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` +AveragePool consumes an input tensor X and applies average pooling +across the tensor according to kernel sizes, stride sizes, and pad +lengths. average pooling consisting of computing the average on all +values of a subset of the input tensor according to the kernel size and +downsampling the data into the output tensor Y for further processing. +The output spatial shape is calculated differently depending on whether +explicit padding is used, where pads is employed, or auto padding is +used, where auto_pad is utilized. With explicit padding +(https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html?highlight=maxpool#torch.nn.MaxPool2d): + +:: + + output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1) + +or + +:: + + output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1) + +if ceil_mode is enabled. ``pad_shape[i]`` is the sum of pads along axis +``i``. Sliding windows that would start in the right padded region are +ignored. + +``auto_pad`` is a DEPRECATED attribute. If you are using them currently, +the output spatial shape will be following when ceil_mode is enabled: + +:: + + VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i]) + SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i]) + +or when ceil_mode is disabled +(https://www.tensorflow.org/api_docs/python/tf/keras/layers/AveragePooling2D): + +:: + + VALID: output_spatial_shape[i] = floor((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i]) + 1 + SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = floor((input_spatial_shape[i] - 1) / strides_spatial_shape[i]) + 1 + +And pad shape will be following if ``SAME_UPPER`` or ``SAME_LOWER``: + +:: + + pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i] + +The output of each pooling window is divided by the number of elements +(exclude pad when attribute count_include_pad is zero). + +Parameters +========== +X + Type T. + Input data tensor from the previous operator; dimensions for image case + are (N x C x H x W), where N is the batch size, C is the number of + channels, and H and W are the height and the width of the data. For non + image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), + where N is the batch size. Optionally, if dimension denotation is in + effect, the operation expects the input data tensor to arrive with the + dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, + DATA_FEATURE ...]. +auto_pad + Attribute. + auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where + default value is NOTSET, which means explicit padding is used. + SAME_UPPER or SAME_LOWER mean pad the input so that + ``output_shape[i] = ceil(input_shape[i] / strides[i])`` for each axis + ``i``. The padding is split between the two sides equally or almost + equally (depending on whether it is even or odd). In case the padding is + an odd number, the extra padding is added at the end for SAME_UPPER and + at the beginning for SAME_LOWER. +ceil_mode + Attribute. + Whether to use ceil or floor (default) to compute the output shape. +count_include_pad + Attribute. + Whether include pad pixels when calculating values for the edges. + Default is 0, doesn't count include pad. +dilations + Attribute. + Dilation value along each spatial axis of filter. If not present, the + dilation defaults to 1 along each spatial axis. +kernel_shape + Attribute. + The size of the kernel along each axis. +pads + Attribute. + Padding for the beginning and ending along each spatial axis, it can + take any value greater than or equal to 0. The value represent the + number of pixels added to the beginning and end part of the + corresponding axis. ``pads`` format should be as follow [x1_begin, + x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels + added at the beginning of axis ``i`` and xi_end, the number of pixels + added at the end of axis ``i``. This attribute cannot be used + simultaneously with auto_pad attribute. If not present, the padding + defaults to 0 along start and end of each spatial axis. +strides + Attribute. + Stride along each spatial axis. If not present, the stride defaults to 1 + along each spatial axis. + +Returns +======= +Y : Var + Type T. + Output data tensor from average or max pooling across the input tensor. + Dimensions will vary based on various kernel, stride, and pad sizes. + Floor value of the dimension is used + +Notes +===== +Signature: ``ai.onnx@19::AveragePool``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return ( - _AveragePool( - _AveragePool.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), - count_include_pad=AttrInt64( - count_include_pad, name="count_include_pad" - ), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _AveragePool.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def cast( - input: Var, - *, - saturate: int = 1, - to: npt.DTypeLike, -) -> Var: + return _AveragePool( + _AveragePool.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), + count_include_pad=AttrInt64(count_include_pad, name="count_include_pad"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), _AveragePool.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def cast(input: Var, *, saturate: int = 1, to: npt.DTypeLike, ) -> Var: r""" - The operator casts the elements of a given input tensor to a data type - specified by the 'to' argument and returns an output tensor of the same - size in the converted type. The 'to' argument must be one of the data - types specified in the 'DataType' enum field in the TensorProto message. - - Casting from string tensor in plain (e.g., "3.14" and "1000") and - scientific numeric representations (e.g., "1e-5" and "1E8") to float - types is supported. For example, converting string "100.5" to an integer - may yield result 100. There are some string literals reserved for - special floating-point values; "+INF" (and "INF"), "-INF", and "NaN" are - positive infinity, negative infinity, and not-a-number, respectively. - Any string which can exactly match "+INF" in a case-insensitive way - would be mapped to positive infinite. Similarly, this case-insensitive - rule is applied to "INF" and "NaN". When casting from numeric tensors to - string tensors, plain floating-point representation (such as - "314.15926") would be used. Converting non-numerical-literal string such - as "Hello World!" is an undefined behavior. Cases of converting string - representing floating-point arithmetic value, such as "2.718", to INT is - an undefined behavior. - - Conversion from a numerical type to any numerical type is always - allowed. User must be aware of precision loss and value change caused by - range difference between two types. For example, a 64-bit float - 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, - converting an integer 36 to Boolean may produce 1 because we truncate - bits which can't be stored in the targeted type. - - In more detail, the conversion among numerical types should follow these - rules if the destination type is not a float 8 type. - - - Casting from floating point to: - - - floating point: +/- infinity if OOR (out of range). - - fixed point: undefined if OOR. - - bool: +/- 0.0 to False; all else to True. - - - Casting from fixed point to: - - - floating point: +/- infinity if OOR. (+ infinity in the case of - uint) - - fixed point: when OOR, discard higher bits and reinterpret (with - respect to two's complement representation for signed types). For - example, 200 (int16) -> -56 (int8). - - bool: zero to False; nonzero to True. - - - Casting from bool to: - - - floating point: ``{1.0, 0.0}``. - - fixed point: ``{1, 0}``. - - bool: no change. - - Float 8 type were introduced to speed up the training of deep models. By - default the conversion of a float *x* obeys to the following rules. - ``[x]`` means the value rounded to the target mantissa width. - - ============== =========== ======== ======== ======== - x E4M3FN E4M3FNUZ E5M2 E5M2FNUZ - ============== =========== ======== ======== ======== - 0 0 0 0 0 - -0 -0 0 -0 0 - NaN NaN NaN NaN NaN - +/- Inf +/- FLT_MAX NaN FLT_MAX NaN - [x] > FLT_MAX FLT_MAX FLT_MAX FLT_MAX FLT_MAX - [x] < -FLT_MAX -FLT_MAX -FLT_MAX -FLT_MAX -FLT_MAX - else RNE RNE RNE RNE - ============== =========== ======== ======== ======== - - The behavior changes if the parameter 'saturate' is set to False. The - rules then become: - - ============== ====== ======== ======= ======== - x E4M3FN E4M3FNUZ E5M2 E5M2FNUZ - ============== ====== ======== ======= ======== - 0 0 0 0 0 - -0 -0 0 -0 0 - NaN NaN NaN NaN NaN - +/- Inf NaN NaN +/- Inf NaN - [x] > FLT_MAX NaN NaN Inf NaN - [x] < -FLT_MAX NaN NaN -Inf NaN - else RNE RNE RNE RNE - ============== ====== ======== ======= ======== - - Parameters - ========== - input - Type T1. - Input tensor to be cast. - saturate - Attribute. - The parameter defines how the conversion behaves if an input value is - out of range of the destination type. It only applies for float 8 - conversion (float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). - It is true by default. All cases are fully described in two tables - inserted in the operator description. - to - Attribute. - The data type to which the elements of the input tensor are cast. - Strictly must be one of the types from DataType enum in TensorProto - - Returns - ======= - output : Var - Type T2. - Output tensor with the same shape as input with type specified by the - 'to' argument - - Notes - ===== - Signature: ``ai.onnx@19::Cast``. - - Type constraints: - - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +The operator casts the elements of a given input tensor to a data type +specified by the 'to' argument and returns an output tensor of the same +size in the converted type. The 'to' argument must be one of the data +types specified in the 'DataType' enum field in the TensorProto message. + +Casting from string tensor in plain (e.g., "3.14" and "1000") and +scientific numeric representations (e.g., "1e-5" and "1E8") to float +types is supported. For example, converting string "100.5" to an integer +may yield result 100. There are some string literals reserved for +special floating-point values; "+INF" (and "INF"), "-INF", and "NaN" are +positive infinity, negative infinity, and not-a-number, respectively. +Any string which can exactly match "+INF" in a case-insensitive way +would be mapped to positive infinite. Similarly, this case-insensitive +rule is applied to "INF" and "NaN". When casting from numeric tensors to +string tensors, plain floating-point representation (such as +"314.15926") would be used. Converting non-numerical-literal string such +as "Hello World!" is an undefined behavior. Cases of converting string +representing floating-point arithmetic value, such as "2.718", to INT is +an undefined behavior. + +Conversion from a numerical type to any numerical type is always +allowed. User must be aware of precision loss and value change caused by +range difference between two types. For example, a 64-bit float +3.1415926459 may be round to a 32-bit float 3.141592. Similarly, +converting an integer 36 to Boolean may produce 1 because we truncate +bits which can't be stored in the targeted type. + +In more detail, the conversion among numerical types should follow these +rules if the destination type is not a float 8 type. + +- Casting from floating point to: + + - floating point: +/- infinity if OOR (out of range). + - fixed point: undefined if OOR. + - bool: +/- 0.0 to False; all else to True. + +- Casting from fixed point to: + + - floating point: +/- infinity if OOR. (+ infinity in the case of + uint) + - fixed point: when OOR, discard higher bits and reinterpret (with + respect to two's complement representation for signed types). For + example, 200 (int16) -> -56 (int8). + - bool: zero to False; nonzero to True. + +- Casting from bool to: + + - floating point: ``{1.0, 0.0}``. + - fixed point: ``{1, 0}``. + - bool: no change. + +Float 8 type were introduced to speed up the training of deep models. By +default the conversion of a float *x* obeys to the following rules. +``[x]`` means the value rounded to the target mantissa width. + +============== =========== ======== ======== ======== +x E4M3FN E4M3FNUZ E5M2 E5M2FNUZ +============== =========== ======== ======== ======== +0 0 0 0 0 +-0 -0 0 -0 0 +NaN NaN NaN NaN NaN ++/- Inf +/- FLT_MAX NaN FLT_MAX NaN +[x] > FLT_MAX FLT_MAX FLT_MAX FLT_MAX FLT_MAX +[x] < -FLT_MAX -FLT_MAX -FLT_MAX -FLT_MAX -FLT_MAX +else RNE RNE RNE RNE +============== =========== ======== ======== ======== + +The behavior changes if the parameter 'saturate' is set to False. The +rules then become: + +============== ====== ======== ======= ======== +x E4M3FN E4M3FNUZ E5M2 E5M2FNUZ +============== ====== ======== ======= ======== +0 0 0 0 0 +-0 -0 0 -0 0 +NaN NaN NaN NaN NaN ++/- Inf NaN NaN +/- Inf NaN +[x] > FLT_MAX NaN NaN Inf NaN +[x] < -FLT_MAX NaN NaN -Inf NaN +else RNE RNE RNE RNE +============== ====== ======== ======= ======== + +Parameters +========== +input + Type T1. + Input tensor to be cast. +saturate + Attribute. + The parameter defines how the conversion behaves if an input value is + out of range of the destination type. It only applies for float 8 + conversion (float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). + It is true by default. All cases are fully described in two tables + inserted in the operator description. +to + Attribute. + The data type to which the elements of the input tensor are cast. + Strictly must be one of the types from DataType enum in TensorProto + +Returns +======= +output : Var + Type T2. + Output tensor with the same shape as input with type specified by the + 'to' argument + +Notes +===== +Signature: ``ai.onnx@19::Cast``. + +Type constraints: + - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _Cast( - _Cast.Attributes( - saturate=AttrInt64(saturate, name="saturate"), - to=AttrDtype(to, name="to"), - ), - _Cast.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) + return _Cast( + _Cast.Attributes( + saturate=AttrInt64(saturate, name="saturate"), + to=AttrDtype(to, name="to"), + ), _Cast.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output -def cast_like( - input: Var, - target_type: Var, - *, - saturate: int = 1, -) -> Var: +def cast_like(input: Var, target_type: Var, *, saturate: int = 1, ) -> Var: r""" - The operator casts the elements of a given input tensor (the first - input) to the same data type as the elements of the second input tensor. - See documentation of the Cast operator for further details. - - Parameters - ========== - input - Type T1. - Input tensor to be cast. - target_type - Type T2. - The (first) input tensor will be cast to produce a tensor of the same - type as this (second input) tensor. - saturate - Attribute. - The parameter defines how the conversion behaves if an input value is - out of range of the destination type. It only applies for float 8 - conversion (float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). - It is true by default. Please refer to operator Cast description for - further details. - - Returns - ======= - output : Var - Type T2. - Output tensor produced by casting the first input tensor to have the - same type as the second input tensor. - - Notes - ===== - Signature: ``ai.onnx@19::CastLike``. - - Type constraints: - - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +The operator casts the elements of a given input tensor (the first +input) to the same data type as the elements of the second input tensor. +See documentation of the Cast operator for further details. + +Parameters +========== +input + Type T1. + Input tensor to be cast. +target_type + Type T2. + The (first) input tensor will be cast to produce a tensor of the same + type as this (second input) tensor. +saturate + Attribute. + The parameter defines how the conversion behaves if an input value is + out of range of the destination type. It only applies for float 8 + conversion (float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). + It is true by default. Please refer to operator Cast description for + further details. + +Returns +======= +output : Var + Type T2. + Output tensor produced by casting the first input tensor to have the + same type as the second input tensor. + +Notes +===== +Signature: ``ai.onnx@19::CastLike``. + +Type constraints: + - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _CastLike( - _CastLike.Attributes( - saturate=AttrInt64(saturate, name="saturate"), - ), - _CastLike.Inputs( - input=unwrap_vars(input), - target_type=unwrap_vars(target_type), - ), - ) - .get_output_vars( - input=get_value(input), - target_type=get_value(target_type), - ) - .output - ) + return _CastLike( + _CastLike.Attributes( + saturate=AttrInt64(saturate, name="saturate"), + ), _CastLike.Inputs( + input=unwrap_vars(input), target_type=unwrap_vars(target_type), ), ).get_output_vars( + input=get_value(input), target_type=get_value(target_type), ).output -def constant( - *, - value: Optional[np.ndarray] = None, - value_float: Optional[float] = None, - value_floats: Optional[Iterable[float]] = None, - value_int: Optional[int] = None, - value_ints: Optional[Iterable[int]] = None, - value_string: Optional[str] = None, - value_strings: Optional[Iterable[str]] = None, -) -> Var: +def constant(*, value: Optional[np.ndarray] = None, value_float: Optional[float] = None, value_floats: Optional[Iterable[float]] = None, value_int: Optional[int] = None, value_ints: Optional[Iterable[int]] = None, value_string: Optional[str] = None, value_strings: Optional[Iterable[str]] = None, ) -> Var: r""" - This operator produces a constant tensor. Exactly one of the provided - attributes, either value, sparse_value, or value\_\* must be specified. - - Parameters - ========== - sparse_value - Attribute. - The value for the elements of the output tensor in sparse format. - value - Attribute. - The value for the elements of the output tensor. - value_float - Attribute. - The value for the sole element for the scalar, float32, output tensor. - value_floats - Attribute. - The values for the elements for the 1D, float32, output tensor. - value_int - Attribute. - The value for the sole element for the scalar, int64, output tensor. - value_ints - Attribute. - The values for the elements for the 1D, int64, output tensor. - value_string - Attribute. - The value for the sole element for the scalar, UTF-8 string, output - tensor. - value_strings - Attribute. - The values for the elements for the 1D, UTF-8 string, output tensor. - - Returns - ======= - output : Var - Type T. - Output tensor containing the same value of the provided tensor. - - Notes - ===== - Signature: ``ai.onnx@19::Constant``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +This operator produces a constant tensor. Exactly one of the provided +attributes, either value, sparse_value, or value\_\* must be specified. + +Parameters +========== +sparse_value + Attribute. + The value for the elements of the output tensor in sparse format. +value + Attribute. + The value for the elements of the output tensor. +value_float + Attribute. + The value for the sole element for the scalar, float32, output tensor. +value_floats + Attribute. + The values for the elements for the 1D, float32, output tensor. +value_int + Attribute. + The value for the sole element for the scalar, int64, output tensor. +value_ints + Attribute. + The values for the elements for the 1D, int64, output tensor. +value_string + Attribute. + The value for the sole element for the scalar, UTF-8 string, output + tensor. +value_strings + Attribute. + The values for the elements for the 1D, UTF-8 string, output tensor. + +Returns +======= +output : Var + Type T. + Output tensor containing the same value of the provided tensor. + +Notes +===== +Signature: ``ai.onnx@19::Constant``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _Constant( - _Constant.Attributes( - value=AttrTensor.maybe(value, name="value"), - value_float=AttrFloat32.maybe(value_float, name="value_float"), - value_floats=AttrFloat32s.maybe(value_floats, name="value_floats"), - value_int=AttrInt64.maybe(value_int, name="value_int"), - value_ints=AttrInt64s.maybe(value_ints, name="value_ints"), - value_string=AttrString.maybe(value_string, name="value_string"), - value_strings=AttrStrings.maybe(value_strings, name="value_strings"), - ), - _Constant.Inputs(), - ) - .get_output_vars() - .output - ) - - -def deform_conv( - X: Var, - W: Var, - offset: Var, - B: Optional[Var] = None, - mask: Optional[Var] = None, - *, - dilations: Optional[Iterable[int]] = None, - group: int = 1, - kernel_shape: Optional[Iterable[int]] = None, - offset_group: int = 1, - pads: Optional[Iterable[int]] = None, - strides: Optional[Iterable[int]] = None, -) -> Var: + return _Constant( + _Constant.Attributes( + value=AttrTensor.maybe(value, name="value"), + value_float=AttrFloat32.maybe(value_float, name="value_float"), + value_floats=AttrFloat32s.maybe(value_floats, name="value_floats"), + value_int=AttrInt64.maybe(value_int, name="value_int"), + value_ints=AttrInt64s.maybe(value_ints, name="value_ints"), + value_string=AttrString.maybe(value_string, name="value_string"), + value_strings=AttrStrings.maybe(value_strings, name="value_strings"), + ), _Constant.Inputs( + ), ).get_output_vars( + ).output + + +def deform_conv(X: Var, W: Var, offset: Var, B: Optional[Var] = None, mask: Optional[Var] = None, *, dilations: Optional[Iterable[int]] = None, group: int = 1, kernel_shape: Optional[Iterable[int]] = None, offset_group: int = 1, pads: Optional[Iterable[int]] = None, strides: Optional[Iterable[int]] = None, ) -> Var: r""" - Performs deformable convolution as described in - https://arxiv.org/abs/1703.06211 and https://arxiv.org/abs/1811.11168. - This operator specification supports the general N-D case. Note that - most common use cases have 2D or 3D data. - - Parameters - ========== - X - Type T. - Input data tensor. For 2D image data, it has shape (N, C, H, W) where N - is the batch size, C is the number of input channels, and H and W are - the height and width. In general, the shape is (N, C, D1, D2, ... , Dn) - for n-dimensional data, where D1 to Dn are the spatial dimension sizes. - Most common use cases have n = 2 or 3. - W - Type T. - Weight tensor that will be used in the convolutions. It has shape (oC, - C/group, kH, kW), where oC is the number of output channels and kH and - kW are the kernel height and width. For more than 2 dimensions, it has - shape (oC, C/group, k1, k2, ... , kn). - offset - Type T. - Offset tensor denoting the offset for the sampling locations in the - convolution kernel. It has shape (N, offset_group \* kH \* kW \* 2, oH, - oW) for 2D data or (N, offset_group \* k1 \* k2 \* ... \* kn \* n, o1, - o2, ... , on) for nD data. Use linear interpolationfor fractional offset - values. Sampling locations outside of the padded input tensor gives - zero. - B - Type T. - Optional 1D bias of length oC to be added to the convolution. Default is - a tensor of zeros. - mask - Type T. - The mask tensor to be applied to each position in the convolution - kernel. It has shape (N, offset_group \* kH \* kW, oH, oW) for 2D data - or (N, offset_group \* k1 \* k2 \* ... \* kn \* n, o1, o2, ... , on) for - nD data. Default is a tensor of ones. - dilations - Attribute. - Dilation value along each spatial axis of the kernel. Default is 1 along - each axis. - group - Attribute. - Number of groups the input and output channels, C and oC, are divided - into. C and oC must both be divisible by group. Default is 1. - kernel_shape - Attribute. - Shape of the convolution kernel. If not present, it is inferred from the - shape of input W. - offset_group - Attribute. - Number of groups of offset. C must be divisible by offset_group. Default - is 1. - pads - Attribute. - Padding for the beginning and end along each spatial axis. The values - represent the number of pixels added to the beginning and end of the - corresponding axis and can take any nonnegative value. The format should - be as follows: [x1_begin, x2_begin, ..., x1_end, x2_end, ...], where - xi_begin is the number of pixels added at the beginning of axis ``i`` - and xi_end is the number of pixels added at the end of axis ``i``. - Default is 0 along each axis. - strides - Attribute. - Stride along each spatial axis. Default is 1 along each axis. - - Returns - ======= - Y : Var - Type T. - Output data tensor that contains the result of convolution. It has shape - (N, oC, oH, oW) for 2D data or (N, oC, o1, o2, ..., on) for nD data - - Notes - ===== - Signature: ``ai.onnx@19::DeformConv``. - - Type constraints: - - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` +Performs deformable convolution as described in +https://arxiv.org/abs/1703.06211 and https://arxiv.org/abs/1811.11168. +This operator specification supports the general N-D case. Note that +most common use cases have 2D or 3D data. + +Parameters +========== +X + Type T. + Input data tensor. For 2D image data, it has shape (N, C, H, W) where N + is the batch size, C is the number of input channels, and H and W are + the height and width. In general, the shape is (N, C, D1, D2, ... , Dn) + for n-dimensional data, where D1 to Dn are the spatial dimension sizes. + Most common use cases have n = 2 or 3. +W + Type T. + Weight tensor that will be used in the convolutions. It has shape (oC, + C/group, kH, kW), where oC is the number of output channels and kH and + kW are the kernel height and width. For more than 2 dimensions, it has + shape (oC, C/group, k1, k2, ... , kn). +offset + Type T. + Offset tensor denoting the offset for the sampling locations in the + convolution kernel. It has shape (N, offset_group \* kH \* kW \* 2, oH, + oW) for 2D data or (N, offset_group \* k1 \* k2 \* ... \* kn \* n, o1, + o2, ... , on) for nD data. Use linear interpolationfor fractional offset + values. Sampling locations outside of the padded input tensor gives + zero. +B + Type T. + Optional 1D bias of length oC to be added to the convolution. Default is + a tensor of zeros. +mask + Type T. + The mask tensor to be applied to each position in the convolution + kernel. It has shape (N, offset_group \* kH \* kW, oH, oW) for 2D data + or (N, offset_group \* k1 \* k2 \* ... \* kn \* n, o1, o2, ... , on) for + nD data. Default is a tensor of ones. +dilations + Attribute. + Dilation value along each spatial axis of the kernel. Default is 1 along + each axis. +group + Attribute. + Number of groups the input and output channels, C and oC, are divided + into. C and oC must both be divisible by group. Default is 1. +kernel_shape + Attribute. + Shape of the convolution kernel. If not present, it is inferred from the + shape of input W. +offset_group + Attribute. + Number of groups of offset. C must be divisible by offset_group. Default + is 1. +pads + Attribute. + Padding for the beginning and end along each spatial axis. The values + represent the number of pixels added to the beginning and end of the + corresponding axis and can take any nonnegative value. The format should + be as follows: [x1_begin, x2_begin, ..., x1_end, x2_end, ...], where + xi_begin is the number of pixels added at the beginning of axis ``i`` + and xi_end is the number of pixels added at the end of axis ``i``. + Default is 0 along each axis. +strides + Attribute. + Stride along each spatial axis. Default is 1 along each axis. + +Returns +======= +Y : Var + Type T. + Output data tensor that contains the result of convolution. It has shape + (N, oC, oH, oW) for 2D data or (N, oC, o1, o2, ..., on) for nD data + +Notes +===== +Signature: ``ai.onnx@19::DeformConv``. + +Type constraints: + - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return ( - _DeformConv( - _DeformConv.Attributes( - dilations=AttrInt64s.maybe(dilations, name="dilations"), - group=AttrInt64(group, name="group"), - kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), - offset_group=AttrInt64(offset_group, name="offset_group"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _DeformConv.Inputs( - X=unwrap_vars(X), - W=unwrap_vars(W), - offset=unwrap_vars(offset), - B=unwrap_vars(B), - mask=unwrap_vars(mask), - ), - ) - .get_output_vars( - X=get_value(X), - W=get_value(W), - offset=get_value(offset), - B=get_value(B), - mask=get_value(mask), - ) - .Y - ) - - -def dequantize_linear( - x: Var, - x_scale: Var, - x_zero_point: Optional[Var] = None, - *, - axis: int = 1, -) -> Var: + return _DeformConv( + _DeformConv.Attributes( + dilations=AttrInt64s.maybe(dilations, name="dilations"), + group=AttrInt64(group, name="group"), + kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), + offset_group=AttrInt64(offset_group, name="offset_group"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), _DeformConv.Inputs( + X=unwrap_vars(X), W=unwrap_vars(W), offset=unwrap_vars(offset), B=unwrap_vars(B), mask=unwrap_vars(mask), ), ).get_output_vars( + X=get_value(X), W=get_value(W), offset=get_value(offset), B=get_value(B), mask=get_value(mask), ).Y + + +def dequantize_linear(x: Var, x_scale: Var, x_zero_point: Optional[Var] = None, *, axis: int = 1, ) -> Var: r""" - The linear dequantization operator. It consumes a quantized tensor, a - scale, and a zero point to compute the full precision tensor. The - dequantization formula is ``y = (x - x_zero_point) * x_scale``. - ``x_scale`` and ``x_zero_point`` must have same shape, and can be either - a scalar for per-tensor / per layer quantization, or a 1-D tensor for - per-axis quantization. ``x_zero_point`` and ``x`` must have same type. - ``x`` and ``y`` must have same shape. In the case of dequantizing int32, - there's no zero point (zero point is supposed to be 0). ``zero-point`` - is usually not used in the case of float8e4m3fn, float8e4m3fnuz, - float8e5m2, float8e5m2fnuz quantization, but the dequantization formula - remains the same for consistency and 'x_scale' still determines the - output type. - - Parameters - ========== - x - Type T1. - N-D quantized input tensor to be de-quantized. - x_scale - Type T2. - Scale for input 'x'. It can be a scalar, which means a per-tensor/layer - dequantization, or a 1-D tensor for per-axis dequantization. - x_zero_point - Type T1. - Zero point for input 'x'. Shape must match x_scale. It's optional. Zero - point is 0 when it's not specified. - axis - Attribute. - (Optional) The axis of the dequantizing dimension of the input tensor. - Ignored for per-tensor quantization. Negative value means counting - dimensions from the back. Accepted range is [-r, r-1] where r = - rank(input). - - Returns - ======= - y : Var - Type T2. - N-D full precision output tensor. It has same shape as input 'x'. - - Notes - ===== - Signature: ``ai.onnx@19::DequantizeLinear``. - - Type constraints: - - T1: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int32)`, `tensor(int8)`, `tensor(uint8)` - - T2: `tensor(bfloat16)`, `tensor(float)`, `tensor(float16)` +The linear dequantization operator. It consumes a quantized tensor, a +scale, and a zero point to compute the full precision tensor. The +dequantization formula is ``y = (x - x_zero_point) * x_scale``. +``x_scale`` and ``x_zero_point`` must have same shape, and can be either +a scalar for per-tensor / per layer quantization, or a 1-D tensor for +per-axis quantization. ``x_zero_point`` and ``x`` must have same type. +``x`` and ``y`` must have same shape. In the case of dequantizing int32, +there's no zero point (zero point is supposed to be 0). ``zero-point`` +is usually not used in the case of float8e4m3fn, float8e4m3fnuz, +float8e5m2, float8e5m2fnuz quantization, but the dequantization formula +remains the same for consistency and 'x_scale' still determines the +output type. + +Parameters +========== +x + Type T1. + N-D quantized input tensor to be de-quantized. +x_scale + Type T2. + Scale for input 'x'. It can be a scalar, which means a per-tensor/layer + dequantization, or a 1-D tensor for per-axis dequantization. +x_zero_point + Type T1. + Zero point for input 'x'. Shape must match x_scale. It's optional. Zero + point is 0 when it's not specified. +axis + Attribute. + (Optional) The axis of the dequantizing dimension of the input tensor. + Ignored for per-tensor quantization. Negative value means counting + dimensions from the back. Accepted range is [-r, r-1] where r = + rank(input). + +Returns +======= +y : Var + Type T2. + N-D full precision output tensor. It has same shape as input 'x'. + +Notes +===== +Signature: ``ai.onnx@19::DequantizeLinear``. + +Type constraints: + - T1: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int32)`, `tensor(int8)`, `tensor(uint8)` + - T2: `tensor(bfloat16)`, `tensor(float)`, `tensor(float16)` """ - return ( - _DequantizeLinear( - _DequantizeLinear.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _DequantizeLinear.Inputs( - x=unwrap_vars(x), - x_scale=unwrap_vars(x_scale), - x_zero_point=unwrap_vars(x_zero_point), - ), - ) - .get_output_vars( - x=get_value(x), - x_scale=get_value(x_scale), - x_zero_point=get_value(x_zero_point), - ) - .y - ) + return _DequantizeLinear( + _DequantizeLinear.Attributes( + axis=AttrInt64(axis, name="axis"), + ), _DequantizeLinear.Inputs( + x=unwrap_vars(x), x_scale=unwrap_vars(x_scale), x_zero_point=unwrap_vars(x_zero_point), ), ).get_output_vars( + x=get_value(x), x_scale=get_value(x_scale), x_zero_point=get_value(x_zero_point), ).y -def equal( - A: Var, - B: Var, -) -> Var: +def equal(A: Var, B: Var, ) -> Var: r""" - Returns the tensor resulted from performing the ``equal`` logical - operation elementwise on the input tensors ``A`` and ``B`` (with - Numpy-style broadcasting support). - - This operator supports **multidirectional (i.e., Numpy-style) - broadcasting**; for more details please check `the - doc `__. - - Parameters - ========== - A - Type T. - First input operand for the logical operator. - B - Type T. - Second input operand for the logical operator. - - Returns - ======= - C : Var - Type T1. - Result tensor. - - Notes - ===== - Signature: ``ai.onnx@19::Equal``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T1: `tensor(bool)` +Returns the tensor resulted from performing the ``equal`` logical +operation elementwise on the input tensors ``A`` and ``B`` (with +Numpy-style broadcasting support). + +This operator supports **multidirectional (i.e., Numpy-style) +broadcasting**; for more details please check `the +doc `__. + +Parameters +========== +A + Type T. + First input operand for the logical operator. +B + Type T. + Second input operand for the logical operator. + +Returns +======= +C : Var + Type T1. + Result tensor. + +Notes +===== +Signature: ``ai.onnx@19::Equal``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T1: `tensor(bool)` """ - return ( - _Equal( - _Equal.Attributes(), - _Equal.Inputs( - A=unwrap_vars(A), - B=unwrap_vars(B), - ), - ) - .get_output_vars( - A=get_value(A), - B=get_value(B), - ) - .C - ) + return _Equal( + _Equal.Attributes( + ), _Equal.Inputs( + A=unwrap_vars(A), B=unwrap_vars(B), ), ).get_output_vars( + A=get_value(A), B=get_value(B), ).C -def identity( - input: Var, -) -> Var: +def identity(input: Var, ) -> Var: r""" - Identity operator - - Parameters - ========== - input - Type V. - Input tensor - - Returns - ======= - output : Var - Type V. - Tensor to copy input into. - - Notes - ===== - Signature: ``ai.onnx@19::Identity``. - - Type constraints: - - V: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +Identity operator + +Parameters +========== +input + Type V. + Input tensor + +Returns +======= +output : Var + Type V. + Tensor to copy input into. + +Notes +===== +Signature: ``ai.onnx@19::Identity``. + +Type constraints: + - V: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _Identity( - _Identity.Attributes(), - _Identity.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) + return _Identity( + _Identity.Attributes( + ), _Identity.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output -def if_( - cond: Var, - *, - else_branch: Callable[[], Iterable[Var]], - then_branch: Callable[[], Iterable[Var]], -) -> Sequence[Var]: +def if_(cond: Var, *, else_branch: Callable[[], Iterable[Var]], then_branch: Callable[[], Iterable[Var]], ) -> Sequence[Var]: r""" - If conditional - - Parameters - ========== - cond - Type B. - Condition for the if. The tensor must contain a single element. - else_branch - Attribute. - Graph to run if condition is false. Has N outputs: values you wish to be - live-out to the enclosing scope. The number of outputs must match the - number of outputs in the then_branch. - then_branch - Attribute. - Graph to run if condition is true. Has N outputs: values you wish to be - live-out to the enclosing scope. The number of outputs must match the - number of outputs in the else_branch. - - Returns - ======= - outputs : Sequence[Var] - Type V. - Values that are live-out to the enclosing scope. The return values in - the ``then_branch`` and ``else_branch`` must be of the same data type. - The ``then_branch`` and ``else_branch`` may produce tensors with the - same element type and different shapes. If corresponding outputs from - the then-branch and the else-branch have static shapes S1 and S2, then - the shape of the corresponding output variable of the if-node (if - present) must be compatible with both S1 and S2 as it represents the - union of both possible shapes.For example, if in a model file, the first - output of ``then_branch`` is typed float tensor with shape [2] and the - first output of ``else_branch`` is another float tensor with shape [3], - If's first output should have (a) no shape set, or (b) a shape of rank 1 - with neither ``dim_value`` nor ``dim_param`` set, or (c) a shape of rank - 1 with a unique ``dim_param``. In contrast, the first output cannot have - the shape [2] since [2] and [3] are not compatible. - - Notes - ===== - Signature: ``ai.onnx@19::If``. - - Type constraints: - - B: `tensor(bool)` - - V: `optional(seq(tensor(bfloat16)))`, `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bfloat16))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(float8e4m3fn))`, `optional(tensor(float8e4m3fnuz))`, `optional(tensor(float8e5m2))`, `optional(tensor(float8e5m2fnuz))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bfloat16))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(float8e4m3fn))`, `seq(tensor(float8e4m3fnuz))`, `seq(tensor(float8e5m2))`, `seq(tensor(float8e5m2fnuz))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +If conditional + +Parameters +========== +cond + Type B. + Condition for the if. The tensor must contain a single element. +else_branch + Attribute. + Graph to run if condition is false. Has N outputs: values you wish to be + live-out to the enclosing scope. The number of outputs must match the + number of outputs in the then_branch. +then_branch + Attribute. + Graph to run if condition is true. Has N outputs: values you wish to be + live-out to the enclosing scope. The number of outputs must match the + number of outputs in the else_branch. + +Returns +======= +outputs : Sequence[Var] + Type V. + Values that are live-out to the enclosing scope. The return values in + the ``then_branch`` and ``else_branch`` must be of the same data type. + The ``then_branch`` and ``else_branch`` may produce tensors with the + same element type and different shapes. If corresponding outputs from + the then-branch and the else-branch have static shapes S1 and S2, then + the shape of the corresponding output variable of the if-node (if + present) must be compatible with both S1 and S2 as it represents the + union of both possible shapes.For example, if in a model file, the first + output of ``then_branch`` is typed float tensor with shape [2] and the + first output of ``else_branch`` is another float tensor with shape [3], + If's first output should have (a) no shape set, or (b) a shape of rank 1 + with neither ``dim_value`` nor ``dim_param`` set, or (c) a shape of rank + 1 with a unique ``dim_param``. In contrast, the first output cannot have + the shape [2] since [2] and [3] are not compatible. + +Notes +===== +Signature: ``ai.onnx@19::If``. + +Type constraints: + - B: `tensor(bool)` + - V: `optional(seq(tensor(bfloat16)))`, `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bfloat16))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(float8e4m3fn))`, `optional(tensor(float8e4m3fnuz))`, `optional(tensor(float8e5m2))`, `optional(tensor(float8e5m2fnuz))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bfloat16))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(float8e4m3fn))`, `seq(tensor(float8e4m3fnuz))`, `seq(tensor(float8e5m2))`, `seq(tensor(float8e5m2fnuz))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - _else_branch_subgraph: Graph = subgraph((), else_branch) - _then_branch_subgraph: Graph = subgraph((), then_branch) - return ( - _If( - _If.Attributes( - else_branch=AttrGraph(_else_branch_subgraph, name="else_branch"), - then_branch=AttrGraph(_then_branch_subgraph, name="then_branch"), - ), - _If.Inputs( - cond=unwrap_vars(cond), - ), - out_variadic=len(_else_branch_subgraph.requested_results), - ) - .get_output_vars( - cond=get_value(cond), - ) - .outputs + _else_branch_subgraph: Graph = subgraph( + (), + else_branch + ) + _then_branch_subgraph: Graph = subgraph( + (), + then_branch ) + return _If( + _If.Attributes( + else_branch=AttrGraph(_else_branch_subgraph, name="else_branch"), + then_branch=AttrGraph(_then_branch_subgraph, name="then_branch"), + ), _If.Inputs( + cond=unwrap_vars(cond), ), out_variadic=len(_else_branch_subgraph.requested_results), ).get_output_vars( + cond=get_value(cond), ).outputs -def loop( - M: Optional[Var] = None, - cond: Optional[Var] = None, - v_initial: Sequence[Var] = (), - *, - body: Callable[..., Iterable[Var]], -) -> Sequence[Var]: +def loop(M: Optional[Var] = None, cond: Optional[Var] = None, v_initial: Sequence[Var] = (), *, body: Callable[..., Iterable[Var]], ) -> Sequence[Var]: r""" - Generic Looping construct. This loop has multiple termination - conditions: - - 1) Trip count. Iteration count specified at runtime. Set by specifying - the input M. Optional. Set to empty string to omit. Note that a - static trip count (specified at graph construction time) can be - specified by passing in a constant node for input M. - 2) Loop termination condition. This is an input to the op that - determines whether to run the first iteration and also a loop-carried - dependency for the body graph. The body graph must yield a value for - the condition variable, whether this input is provided or not. - - This table summarizes the operating modes of this operator with - equivalent C-style code: - - Operator inputs defined as (max_trip_count, condition_var). - - - input ("", ""): for (int i=0; ; ++i) { cond = ... // Note this value - is ignored, but is required in the body } - - - input ("", cond) // Note this is analogous to a while loop bool cond - = ...; for (int i=0; cond; ++i) { cond = ...; } - - - input ("", 1) // Note this is analogous to a do-while loop bool cond - = true for (int i=0; cond; ++i) { cond = ...; } - - - input (trip_count, "") // Note this is analogous to a for loop int - trip_count = ... for (int i=0; i < trip_count; ++i) { cond = ...; // - ignored } - - - input (trip_count, cond) int trip_count = ...; bool cond = ...; for - (int i=0; i < trip_count && cond; ++i) { cond = ...; } - - *Sample usage - cond as well as trip count* - - :: - - graph predict-net { - %a = Constant[value = ]() - %b = Constant[value = ]() - %keepgoing = Constant[value = ]() - %max_trip_count = Constant[value = ]() - %keepgoing_out, %b_out, %user_defined_vals = Loop[body = ](%max_trip_count, %keepgoing, %b) - return - } - - graph body-net ( - %i[INT32, scalar] // iteration number - %keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used - %b_in[INT32, scalar] // incoming value of loop-carried-dependency b - ) { - %my_local = Add(%a, %b_in) - %b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b - %keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition - %user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated - return %keepgoing_out, %b_out, %user_defined_val - } - - *Sample equivalent C code* - - :: - - { - /* User-defined code (enclosing scope) */ - int a = 3, b = 6; - bool keepgoing = true; // Analogous to input cond - /* End user-defined code */ - - /* Implicitly-defined code */ - const int max_trip_count = 10; // Analogous to input M - int user_defined_vals[]; // Imagine this is resizable - /* End implicitly-defined code */ - /* initialize loop-carried variables and scan-output variables */ - bool keepgoing_out = keepgoing - int b_out = b - - for (int i=0; i < max_trip_count && keepgoing_out; ++i) { - /* Implicitly-defined code: bind actual parameter values - to formal parameter variables of loop-body */ - bool keepgoing_in = keepgoing_out; - bool b_in = b_out; - - /* User-defined code (loop body) */ - int my_local = a + b_in; // Reading value "a" from the enclosing scope is fine - b_out = a - b_in; - keepgoing_out = my_local > b_out; - user_defined_val = b_in + b_in; // b_in and b_out are different variables - /* End user-defined code */ - - /* Implicitly defined-code */ - user_defined_vals[i] = user_defined_val // accumulate scan-output values - } - // int t = my_local; // Can't do this. my_local is not accessible here. - - // The values below are bound to the output variables of the loop and therefore accessible - // b_out; user_defined_vals; keepgoing_out; - } - - There are several things of note in this code snippet: - - 1) Values from the enclosing scope (i.e. variable "a" here) are in scope - and can be referenced in the inputs of the loop. - 2) Any values computed in the loop body that needs to be used in a - subsequent iteration or after the loop are modelled using a pair of - variables in the loop-body, consisting of an input variable (eg., - b_in) and an output variable (eg., b_out). These are referred to as - loop-carried dependences. The loop operation node supplies the input - value of the input variable for the first iteration, and returns the - output value of the output variable produced by the final iteration. - 3) Scan_output variables are used to implicitly concatenate values - computed across all the iterations. In the above example, the value - of user_defined_val computed over all iterations are concatenated and - returned as the value of user_defined_vals after the loop. - 4) Values created in the body cannot be accessed in the enclosing scope, - except using the mechanism described above. - - Note that the semantics of this op support "diagonal" or "wavefront" - execution. (See Step 3 here for an example: - https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). - Frontends should emit multi-layer RNNs as a series of While operators - (with time being the inner looping dimension), with each successive - layer consuming the scan_outputs from the previous layer, possibly going - through several point-wise operators (e.g. dropout, residual - connections, linear layer). - - The input/output of subgraph (produced by loop node) matching is based - on order instead of name. The implementation will figure out the names - based on this order. - - Parameters - ========== - M - Type I. - A maximum trip-count for the loop specified at runtime. Optional. Pass - empty string to skip. - cond - Type B. - A boolean termination condition. Optional. Pass empty string to skip. - v_initial - Type V. - The initial values of any loop-carried dependencies (values that change - across loop iterations) - body - Attribute. - The graph run each iteration. It has 2+N inputs: (iteration_num, - condition, loop carried dependencies...). It has 1+N+K outputs: - (condition, loop carried dependencies..., scan_outputs...). Each - scan_output is created by concatenating the value of the specified - output value at the end of each iteration of the loop. It is an error if - the dimensions or data type of these scan_outputs change across loop - iterations. - - Returns - ======= - v_final_and_scan_outputs : Sequence[Var] - Type V. - Final N loop carried dependency values then K scan_outputs. Scan outputs - must be Tensors. - - Notes - ===== - Signature: ``ai.onnx@19::Loop``. - - Type constraints: - - I: `tensor(int64)` - - B: `tensor(bool)` - - V: `optional(seq(tensor(bfloat16)))`, `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bfloat16))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(float8e4m3fn))`, `optional(tensor(float8e4m3fnuz))`, `optional(tensor(float8e5m2))`, `optional(tensor(float8e5m2fnuz))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bfloat16))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(float8e4m3fn))`, `seq(tensor(float8e4m3fnuz))`, `seq(tensor(float8e5m2))`, `seq(tensor(float8e5m2fnuz))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +Generic Looping construct. This loop has multiple termination +conditions: + +1) Trip count. Iteration count specified at runtime. Set by specifying + the input M. Optional. Set to empty string to omit. Note that a + static trip count (specified at graph construction time) can be + specified by passing in a constant node for input M. +2) Loop termination condition. This is an input to the op that + determines whether to run the first iteration and also a loop-carried + dependency for the body graph. The body graph must yield a value for + the condition variable, whether this input is provided or not. + +This table summarizes the operating modes of this operator with +equivalent C-style code: + +Operator inputs defined as (max_trip_count, condition_var). + +- input ("", ""): for (int i=0; ; ++i) { cond = ... // Note this value + is ignored, but is required in the body } + +- input ("", cond) // Note this is analogous to a while loop bool cond + = ...; for (int i=0; cond; ++i) { cond = ...; } + +- input ("", 1) // Note this is analogous to a do-while loop bool cond + = true for (int i=0; cond; ++i) { cond = ...; } + +- input (trip_count, "") // Note this is analogous to a for loop int + trip_count = ... for (int i=0; i < trip_count; ++i) { cond = ...; // + ignored } + +- input (trip_count, cond) int trip_count = ...; bool cond = ...; for + (int i=0; i < trip_count && cond; ++i) { cond = ...; } + +*Sample usage - cond as well as trip count* + +:: + + graph predict-net { + %a = Constant[value = ]() + %b = Constant[value = ]() + %keepgoing = Constant[value = ]() + %max_trip_count = Constant[value = ]() + %keepgoing_out, %b_out, %user_defined_vals = Loop[body = ](%max_trip_count, %keepgoing, %b) + return + } + + graph body-net ( + %i[INT32, scalar] // iteration number + %keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used + %b_in[INT32, scalar] // incoming value of loop-carried-dependency b + ) { + %my_local = Add(%a, %b_in) + %b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b + %keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition + %user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated + return %keepgoing_out, %b_out, %user_defined_val + } + +*Sample equivalent C code* + +:: + + { + /* User-defined code (enclosing scope) */ + int a = 3, b = 6; + bool keepgoing = true; // Analogous to input cond + /* End user-defined code */ + + /* Implicitly-defined code */ + const int max_trip_count = 10; // Analogous to input M + int user_defined_vals[]; // Imagine this is resizable + /* End implicitly-defined code */ + /* initialize loop-carried variables and scan-output variables */ + bool keepgoing_out = keepgoing + int b_out = b + + for (int i=0; i < max_trip_count && keepgoing_out; ++i) { + /* Implicitly-defined code: bind actual parameter values + to formal parameter variables of loop-body */ + bool keepgoing_in = keepgoing_out; + bool b_in = b_out; + + /* User-defined code (loop body) */ + int my_local = a + b_in; // Reading value "a" from the enclosing scope is fine + b_out = a - b_in; + keepgoing_out = my_local > b_out; + user_defined_val = b_in + b_in; // b_in and b_out are different variables + /* End user-defined code */ + + /* Implicitly defined-code */ + user_defined_vals[i] = user_defined_val // accumulate scan-output values + } + // int t = my_local; // Can't do this. my_local is not accessible here. + + // The values below are bound to the output variables of the loop and therefore accessible + // b_out; user_defined_vals; keepgoing_out; + } + +There are several things of note in this code snippet: + +1) Values from the enclosing scope (i.e. variable "a" here) are in scope + and can be referenced in the inputs of the loop. +2) Any values computed in the loop body that needs to be used in a + subsequent iteration or after the loop are modelled using a pair of + variables in the loop-body, consisting of an input variable (eg., + b_in) and an output variable (eg., b_out). These are referred to as + loop-carried dependences. The loop operation node supplies the input + value of the input variable for the first iteration, and returns the + output value of the output variable produced by the final iteration. +3) Scan_output variables are used to implicitly concatenate values + computed across all the iterations. In the above example, the value + of user_defined_val computed over all iterations are concatenated and + returned as the value of user_defined_vals after the loop. +4) Values created in the body cannot be accessed in the enclosing scope, + except using the mechanism described above. + +Note that the semantics of this op support "diagonal" or "wavefront" +execution. (See Step 3 here for an example: +https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). +Frontends should emit multi-layer RNNs as a series of While operators +(with time being the inner looping dimension), with each successive +layer consuming the scan_outputs from the previous layer, possibly going +through several point-wise operators (e.g. dropout, residual +connections, linear layer). + +The input/output of subgraph (produced by loop node) matching is based +on order instead of name. The implementation will figure out the names +based on this order. + +Parameters +========== +M + Type I. + A maximum trip-count for the loop specified at runtime. Optional. Pass + empty string to skip. +cond + Type B. + A boolean termination condition. Optional. Pass empty string to skip. +v_initial + Type V. + The initial values of any loop-carried dependencies (values that change + across loop iterations) +body + Attribute. + The graph run each iteration. It has 2+N inputs: (iteration_num, + condition, loop carried dependencies...). It has 1+N+K outputs: + (condition, loop carried dependencies..., scan_outputs...). Each + scan_output is created by concatenating the value of the specified + output value at the end of each iteration of the loop. It is an error if + the dimensions or data type of these scan_outputs change across loop + iterations. + +Returns +======= +v_final_and_scan_outputs : Sequence[Var] + Type V. + Final N loop carried dependency values then K scan_outputs. Scan outputs + must be Tensors. + +Notes +===== +Signature: ``ai.onnx@19::Loop``. + +Type constraints: + - I: `tensor(int64)` + - B: `tensor(bool)` + - V: `optional(seq(tensor(bfloat16)))`, `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bfloat16))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(float8e4m3fn))`, `optional(tensor(float8e4m3fnuz))`, `optional(tensor(float8e5m2))`, `optional(tensor(float8e5m2fnuz))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bfloat16))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(float8e4m3fn))`, `seq(tensor(float8e4m3fnuz))`, `seq(tensor(float8e5m2))`, `seq(tensor(float8e5m2fnuz))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ _body_subgraph: Graph = subgraph( - typing_cast(list[Type], [Tensor(np.int64, (1,)), Tensor(np.bool_, (1,))]) - + [var.unwrap_type() for var in v_initial], - body, - ) - return ( - _Loop( - _Loop.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - ), - _Loop.Inputs( - M=unwrap_vars(M), - cond=unwrap_vars(cond), - v_initial=unwrap_vars(v_initial), - ), - out_variadic=len(_body_subgraph.requested_results) - 1, - ) - .get_output_vars( - M=get_value(M), - cond=get_value(cond), - v_initial=get_value(v_initial), - ) - .v_final_and_scan_outputs + typing_cast(list[Type], [Tensor(np.int64, (1,)), Tensor(np.bool_, (1,))])+ [var.unwrap_type() for var in v_initial], + body ) + return _Loop( + _Loop.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + ), _Loop.Inputs( + M=unwrap_vars(M), cond=unwrap_vars(cond), v_initial=unwrap_vars(v_initial), ), out_variadic=len(_body_subgraph.requested_results) - 1, ).get_output_vars( + M=get_value(M), cond=get_value(cond), v_initial=get_value(v_initial), ).v_final_and_scan_outputs -def pad( - data: Var, - pads: Var, - constant_value: Optional[Var] = None, - axes: Optional[Var] = None, - *, - mode: str = "constant", -) -> Var: +def pad(data: Var, pads: Var, constant_value: Optional[Var] = None, axes: Optional[Var] = None, *, mode: str = "constant", ) -> Var: r""" - Given a tensor containing the data to be padded (``data``), a tensor - containing the number of start and end pad values for axis (``pads``), - (optionally) a ``mode``, and (optionally) ``constant_value``, a padded - tensor (``output``) is generated. - - The three supported ``modes`` are (similar to corresponding modes - supported by ``numpy.pad``): - - 1) ``constant``\ (default) - pads with a given constant value as - specified by ``constant_value`` (which defaults to 0, empty string, - or False) - - 2) ``reflect`` - pads with the reflection of the vector mirrored on the - first and last values of the vector along each axis - - 3) ``edge`` - pads with the edge values of array - - 4) ``wrap`` - wrap-around padding as if the data tensor forms a torus - - Example 1 (``constant`` mode): - - Insert 0 pads to the beginning of the second dimension. - - :: - - data = [ - [1.0, 1.2], - [2.3, 3.4], - [4.5, 5.7], - ] - - pads = [0, 2, 0, 0] - - mode = 'constant' - - constant_value = 0.0 - - output = [ - [0.0, 0.0, 1.0, 1.2], - [0.0, 0.0, 2.3, 3.4], - [0.0, 0.0, 4.5, 5.7], - ] - - Example 2 (``reflect`` mode): +Given a tensor containing the data to be padded (``data``), a tensor +containing the number of start and end pad values for axis (``pads``), +(optionally) a ``mode``, and (optionally) ``constant_value``, a padded +tensor (``output``) is generated. - :: - - data = [ - [1.0, 1.2], - [2.3, 3.4], - [4.5, 5.7], - ] - - pads = [0, 2, 0, 0] - - mode = 'reflect' +The three supported ``modes`` are (similar to corresponding modes +supported by ``numpy.pad``): - output = [ - [1.0, 1.2, 1.0, 1.2], - [2.3, 3.4, 2.3, 3.4], - [4.5, 5.7, 4.5, 5.7], - ] +1) ``constant``\ (default) - pads with a given constant value as + specified by ``constant_value`` (which defaults to 0, empty string, + or False) - Example 3 (``edge`` mode): +2) ``reflect`` - pads with the reflection of the vector mirrored on the + first and last values of the vector along each axis - :: - - data = [ - [1.0, 1.2], - [2.3, 3.4], - [4.5, 5.7], - ] +3) ``edge`` - pads with the edge values of array - pads = [0, 2, 0, 0] +4) ``wrap`` - wrap-around padding as if the data tensor forms a torus - mode = 'edge' +Example 1 (``constant`` mode): - output = [ - [1.0, 1.0, 1.0, 1.2], - [2.3, 2.3, 2.3, 3.4], - [4.5, 4.5, 4.5, 5.7], - ] +Insert 0 pads to the beginning of the second dimension. - Example 4 (``wrap`` mode): +:: - :: + data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] - data = [ - [1.0, 1.2], - [2.3, 3.4], - [4.5, 5.7], - ] - - pads = [2, 1, 1, 1] - - mode = 'wrap' - - output = [ - [3.4, 2.3, 3.4, 2.3], - [5.7, 4.5, 5.7, 4.5], - [1.2, 1.0, 1.2, 1.0], - [3.4, 2.3, 3.4, 2.3], - [5.7, 4.5, 5.7, 4.5], - [1.2, 1.0, 1.2, 1.0], - ] - - Parameters - ========== - data - Type T. - Input tensor. - pads - Type tensor(int64). - Tensor of integers indicating the number of padding elements to add or - remove (if negative) at the beginning and end of each axis. For 2D input - tensor, it is the number of pixels. ``pads`` should be a 1D tensor of - shape [2 \* num_axes] where ``num_axes`` refers to the number of - elements in the ``axes`` input or the input rank if ``axes`` are not - provided explicitly. ``pads`` format should be: [x1_begin, x2_begin, - ..., x1_end, x2_end,...], where xi_begin is the number of pad values - added at the beginning of axis ``axes[i]`` and xi_end, the number of pad - values added at the end of axis ``axes[i]``. - constant_value - Type T. - (Optional) A scalar value to be used if the mode chosen is ``constant`` - (by default it is 0, empty string or False). - axes - Type Tind. - 1-D tensor of axes that ``pads`` apply to. Negative value means counting - dimensions from the back. Accepted range is [-r, r-1] where r = - rank(data). Behavior is undefined if an axis is repeated. If not - provided, all axes are assumed (``[0, 1, ..., input_rank-1]``). - mode - Attribute. - Supported modes: ``constant``\ (default), ``reflect``, ``edge``, - ``wrap`` - - Returns - ======= - output : Var - Type T. - Tensor after padding. - - Notes - ===== - Signature: ``ai.onnx@19::Pad``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - Tind: `tensor(int32)`, `tensor(int64)` + pads = [0, 2, 0, 0] + + mode = 'constant' + + constant_value = 0.0 + + output = [ + [0.0, 0.0, 1.0, 1.2], + [0.0, 0.0, 2.3, 3.4], + [0.0, 0.0, 4.5, 5.7], + ] + +Example 2 (``reflect`` mode): + +:: + + data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + + pads = [0, 2, 0, 0] + + mode = 'reflect' + + output = [ + [1.0, 1.2, 1.0, 1.2], + [2.3, 3.4, 2.3, 3.4], + [4.5, 5.7, 4.5, 5.7], + ] + +Example 3 (``edge`` mode): + +:: + + data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + + pads = [0, 2, 0, 0] + + mode = 'edge' + + output = [ + [1.0, 1.0, 1.0, 1.2], + [2.3, 2.3, 2.3, 3.4], + [4.5, 4.5, 4.5, 5.7], + ] + +Example 4 (``wrap`` mode): + +:: + + data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + + pads = [2, 1, 1, 1] + + mode = 'wrap' + + output = [ + [3.4, 2.3, 3.4, 2.3], + [5.7, 4.5, 5.7, 4.5], + [1.2, 1.0, 1.2, 1.0], + [3.4, 2.3, 3.4, 2.3], + [5.7, 4.5, 5.7, 4.5], + [1.2, 1.0, 1.2, 1.0], + ] + +Parameters +========== +data + Type T. + Input tensor. +pads + Type tensor(int64). + Tensor of integers indicating the number of padding elements to add or + remove (if negative) at the beginning and end of each axis. For 2D input + tensor, it is the number of pixels. ``pads`` should be a 1D tensor of + shape [2 \* num_axes] where ``num_axes`` refers to the number of + elements in the ``axes`` input or the input rank if ``axes`` are not + provided explicitly. ``pads`` format should be: [x1_begin, x2_begin, + ..., x1_end, x2_end,...], where xi_begin is the number of pad values + added at the beginning of axis ``axes[i]`` and xi_end, the number of pad + values added at the end of axis ``axes[i]``. +constant_value + Type T. + (Optional) A scalar value to be used if the mode chosen is ``constant`` + (by default it is 0, empty string or False). +axes + Type Tind. + 1-D tensor of axes that ``pads`` apply to. Negative value means counting + dimensions from the back. Accepted range is [-r, r-1] where r = + rank(data). Behavior is undefined if an axis is repeated. If not + provided, all axes are assumed (``[0, 1, ..., input_rank-1]``). +mode + Attribute. + Supported modes: ``constant``\ (default), ``reflect``, ``edge``, + ``wrap`` + +Returns +======= +output : Var + Type T. + Tensor after padding. + +Notes +===== +Signature: ``ai.onnx@19::Pad``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - Tind: `tensor(int32)`, `tensor(int64)` """ - return ( - _Pad( - _Pad.Attributes( - mode=AttrString(mode, name="mode"), - ), - _Pad.Inputs( - data=unwrap_vars(data), - pads=unwrap_vars(pads), - constant_value=unwrap_vars(constant_value), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - pads=get_value(pads), - constant_value=get_value(constant_value), - axes=get_value(axes), - ) - .output - ) + return _Pad( + _Pad.Attributes( + mode=AttrString(mode, name="mode"), + ), _Pad.Inputs( + data=unwrap_vars(data), pads=unwrap_vars(pads), constant_value=unwrap_vars(constant_value), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), pads=get_value(pads), constant_value=get_value(constant_value), axes=get_value(axes), ).output -def quantize_linear( - x: Var, - y_scale: Var, - y_zero_point: Optional[Var] = None, - *, - axis: int = 1, - saturate: int = 1, -) -> Var: +def quantize_linear(x: Var, y_scale: Var, y_zero_point: Optional[Var] = None, *, axis: int = 1, saturate: int = 1, ) -> Var: r""" - The linear quantization operator. It consumes a high precision tensor, a - scale, and a zero point to compute the low precision / quantized tensor. - The scale factor and zero point must have same shape, and can be either - a scalar for per-tensor / per layer quantization, or a 1-D tensor for - per-axis quantization. The quantization formula is - ``y = saturate ((x / y_scale) + y_zero_point)``. For saturation, it - saturates to [0, 255] if it's uint8, or [-128, 127] if it's int8. For (x - / y_scale), it's rounding to the nearest even. Refer to - https://en.wikipedia.org/wiki/Rounding for details. 'y_zero_point' and - 'y' must have same type. 'y_zero_point' is usually not used for - quantization to float8e4m3fn, float8e4m3fnuz, float8e5m2, - float8e5m2fnuz, but the quantization formula remains the same for - consistency and the type of the attribute 'y_zero_point' still - determines the quantization type. - - Parameters - ========== - x - Type T1. - N-D full precision Input tensor to be quantized. - y_scale - Type T1. - Scale for doing quantization to get 'y'. It can be a scalar, which means - per-tensor/layer quantization, or a 1-D Tensor for per-axis - quantization. - y_zero_point - Type T2. - Zero point for doing quantization to get 'y'. Shape must match y_scale. - Default is uint8 with zero point of 0 if it's not specified. - axis - Attribute. - (Optional) The axis of the quantization dimension of the input tensor. - Ignored for per-tensor quantization. Negative value means counting - dimensions from the back. Accepted range is [-r, r-1] where r = - rank(input). - saturate - Attribute. - The parameter defines how the conversion behaves if an input value is - out of range of the destination type. It only applies for float 8 - quantization (float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). - It is true by default. All cases are fully described in two tables - inserted in the operator description. - - Returns - ======= - y : Var - Type T2. - N-D quantized output tensor. It has same shape as input 'x'. - - Notes - ===== - Signature: ``ai.onnx@19::QuantizeLinear``. - - Type constraints: - - T1: `tensor(bfloat16)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)` - - T2: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int8)`, `tensor(uint8)` +The linear quantization operator. It consumes a high precision tensor, a +scale, and a zero point to compute the low precision / quantized tensor. +The scale factor and zero point must have same shape, and can be either +a scalar for per-tensor / per layer quantization, or a 1-D tensor for +per-axis quantization. The quantization formula is +``y = saturate ((x / y_scale) + y_zero_point)``. For saturation, it +saturates to [0, 255] if it's uint8, or [-128, 127] if it's int8. For (x +/ y_scale), it's rounding to the nearest even. Refer to +https://en.wikipedia.org/wiki/Rounding for details. 'y_zero_point' and +'y' must have same type. 'y_zero_point' is usually not used for +quantization to float8e4m3fn, float8e4m3fnuz, float8e5m2, +float8e5m2fnuz, but the quantization formula remains the same for +consistency and the type of the attribute 'y_zero_point' still +determines the quantization type. + +Parameters +========== +x + Type T1. + N-D full precision Input tensor to be quantized. +y_scale + Type T1. + Scale for doing quantization to get 'y'. It can be a scalar, which means + per-tensor/layer quantization, or a 1-D Tensor for per-axis + quantization. +y_zero_point + Type T2. + Zero point for doing quantization to get 'y'. Shape must match y_scale. + Default is uint8 with zero point of 0 if it's not specified. +axis + Attribute. + (Optional) The axis of the quantization dimension of the input tensor. + Ignored for per-tensor quantization. Negative value means counting + dimensions from the back. Accepted range is [-r, r-1] where r = + rank(input). +saturate + Attribute. + The parameter defines how the conversion behaves if an input value is + out of range of the destination type. It only applies for float 8 + quantization (float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). + It is true by default. All cases are fully described in two tables + inserted in the operator description. + +Returns +======= +y : Var + Type T2. + N-D quantized output tensor. It has same shape as input 'x'. + +Notes +===== +Signature: ``ai.onnx@19::QuantizeLinear``. + +Type constraints: + - T1: `tensor(bfloat16)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)` + - T2: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int8)`, `tensor(uint8)` """ - return ( - _QuantizeLinear( - _QuantizeLinear.Attributes( - axis=AttrInt64(axis, name="axis"), - saturate=AttrInt64(saturate, name="saturate"), - ), - _QuantizeLinear.Inputs( - x=unwrap_vars(x), - y_scale=unwrap_vars(y_scale), - y_zero_point=unwrap_vars(y_zero_point), - ), - ) - .get_output_vars( - x=get_value(x), - y_scale=get_value(y_scale), - y_zero_point=get_value(y_zero_point), - ) - .y - ) + return _QuantizeLinear( + _QuantizeLinear.Attributes( + axis=AttrInt64(axis, name="axis"), + saturate=AttrInt64(saturate, name="saturate"), + ), _QuantizeLinear.Inputs( + x=unwrap_vars(x), y_scale=unwrap_vars(y_scale), y_zero_point=unwrap_vars(y_zero_point), ), ).get_output_vars( + x=get_value(x), y_scale=get_value(y_scale), y_zero_point=get_value(y_zero_point), ).y -def reshape( - data: Var, - shape: Var, - *, - allowzero: int = 0, -) -> Var: +def reshape(data: Var, shape: Var, *, allowzero: int = 0, ) -> Var: r""" - Reshape the input tensor similar to numpy.reshape. First input is the - data tensor, second input is a shape tensor which specifies the output - shape. It outputs the reshaped tensor. At most one dimension of the new - shape can be -1. In this case, the value is inferred from the size of - the tensor and the remaining dimensions. A dimension could also be 0, in - which case the actual dimension value is unchanged (i.e. taken from the - input tensor). If 'allowzero' is set, and the new shape includes 0, the - dimension will be set explicitly to zero (i.e. not taken from input - tensor). Shape (second input) could be an empty shape, which means - converting to a scalar. The input tensor's shape and the output tensor's - shape are required to have the same number of elements. - - If the attribute 'allowzero' is set, it is invalid for the specified - shape to contain both a zero value and -1, as the value of the dimension - corresponding to -1 cannot be determined uniquely. - - Parameters - ========== - data - Type T. - An input tensor. - shape - Type tensor(int64). - Specified shape for output. - allowzero - Attribute. - (Optional) By default, when any value in the 'shape' input is equal to - zero the corresponding dimension value is copied from the input tensor - dynamically. allowzero=1 indicates that if any value in the 'shape' - input is set to zero, the zero value is honored, similar to NumPy. - - Returns - ======= - reshaped : Var - Type T. - Reshaped data. - - Notes - ===== - Signature: ``ai.onnx@19::Reshape``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +Reshape the input tensor similar to numpy.reshape. First input is the +data tensor, second input is a shape tensor which specifies the output +shape. It outputs the reshaped tensor. At most one dimension of the new +shape can be -1. In this case, the value is inferred from the size of +the tensor and the remaining dimensions. A dimension could also be 0, in +which case the actual dimension value is unchanged (i.e. taken from the +input tensor). If 'allowzero' is set, and the new shape includes 0, the +dimension will be set explicitly to zero (i.e. not taken from input +tensor). Shape (second input) could be an empty shape, which means +converting to a scalar. The input tensor's shape and the output tensor's +shape are required to have the same number of elements. + +If the attribute 'allowzero' is set, it is invalid for the specified +shape to contain both a zero value and -1, as the value of the dimension +corresponding to -1 cannot be determined uniquely. + +Parameters +========== +data + Type T. + An input tensor. +shape + Type tensor(int64). + Specified shape for output. +allowzero + Attribute. + (Optional) By default, when any value in the 'shape' input is equal to + zero the corresponding dimension value is copied from the input tensor + dynamically. allowzero=1 indicates that if any value in the 'shape' + input is set to zero, the zero value is honored, similar to NumPy. + +Returns +======= +reshaped : Var + Type T. + Reshaped data. + +Notes +===== +Signature: ``ai.onnx@19::Reshape``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _Reshape( - _Reshape.Attributes( - allowzero=AttrInt64(allowzero, name="allowzero"), - ), - _Reshape.Inputs( - data=unwrap_vars(data), - shape=unwrap_vars(shape), - ), - ) - .get_output_vars( - data=get_value(data), - shape=get_value(shape), - ) - .reshaped - ) + return _Reshape( + _Reshape.Attributes( + allowzero=AttrInt64(allowzero, name="allowzero"), + ), _Reshape.Inputs( + data=unwrap_vars(data), shape=unwrap_vars(shape), ), ).get_output_vars( + data=get_value(data), shape=get_value(shape), ).reshaped -def resize( - X: Var, - roi: Optional[Var] = None, - scales: Optional[Var] = None, - sizes: Optional[Var] = None, - *, - antialias: int = 0, - axes: Optional[Iterable[int]] = None, - coordinate_transformation_mode: str = "half_pixel", - cubic_coeff_a: float = -0.75, - exclude_outside: int = 0, - extrapolation_value: float = 0.0, - keep_aspect_ratio_policy: str = "stretch", - mode: str = "nearest", - nearest_mode: str = "round_prefer_floor", -) -> Var: +def resize(X: Var, roi: Optional[Var] = None, scales: Optional[Var] = None, sizes: Optional[Var] = None, *, antialias: int = 0, axes: Optional[Iterable[int]] = None, coordinate_transformation_mode: str = "half_pixel", cubic_coeff_a: float = -0.75, exclude_outside: int = 0, extrapolation_value: float = 0.0, keep_aspect_ratio_policy: str = "stretch", mode: str = "nearest", nearest_mode: str = "round_prefer_floor", ) -> Var: r""" - Resize the input tensor. In general, it calculates every value in the - output tensor as a weighted average of neighborhood (a.k.a. sampling - locations) in the input tensor. Each dimension value of the output - tensor is: +Resize the input tensor. In general, it calculates every value in the +output tensor as a weighted average of neighborhood (a.k.a. sampling +locations) in the input tensor. Each dimension value of the output +tensor is: + +:: + + output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) + +if input "sizes" is not specified. + +Parameters +========== +X + Type T1. + N-D tensor +roi + Type T2. + 1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is + the rank of X or the length of axes, if provided. The RoIs' coordinates + are normalized in the coordinate system of the input image. It only + takes effect when coordinate_transformation_mode is "tf_crop_and_resize" +scales + Type tensor(float). + The scale array along each dimension. It takes value greater than 0. If + it's less than 1, it's sampling down, otherwise, it's upsampling. The + number of elements of 'scales' should be the same as the rank of input + 'X' or the length of 'axes', if provided. One of 'scales' and 'sizes' + MUST be specified and it is an error if both are specified. If 'sizes' + is needed, the user can use an empty string as the name of 'scales' in + this operator's input list. +sizes + Type tensor(int64). + Target size of the output tensor. Its interpretation depends on the + 'keep_aspect_ratio_policy' value.The number of elements of 'sizes' + should be the same as the rank of input 'X', or the length of 'axes', if + provided. Only one of 'scales' and 'sizes' can be specified. +antialias + Attribute. + If set to 1, "linear" and "cubic" interpolation modes will use an + antialiasing filter when downscaling. Antialiasing is achieved by + stretching the resampling filter by a factor max(1, 1 / scale), which + means that when downsampling, more input pixels contribute to an output + pixel. +axes + Attribute. + If provided, it specifies a subset of axes that 'roi', 'scales' and + 'sizes' refer to. If not provided, all axes are assumed [0, 1, ..., + r-1], where r = rank(data). Non-specified dimensions are interpreted as + non-resizable. Negative value means counting dimensions from the back. + Accepted range is [-r, r-1], where r = rank(data). Behavior is undefined + if an axis is repeated. +coordinate_transformation_mode + Attribute. + This attribute describes how to transform the coordinate in the resized + tensor to the coordinate in the original tensor. + + The coordinate of each dimension is transformed individually. Let's + describe a case using axis x as an example. Denote ``x_resized`` as the + coordinate of axis x in the resized tensor, ``x_original`` as the + coordinate of axis x in the original tensor, ``length_original`` as the + length of the original tensor in axis x, ``length_resized`` as the + length of the resized tensor in axis x, + ``scale = length_resized / length_original``, ``output_width`` the + target length on the axis x which can be a fractional number when it is + calculated out of a scale factor, and ``output_width_int`` the effective + output width as an integer. + + if coordinate_transformation_mode is ``"half_pixel"``, :: - output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) - - if input "sizes" is not specified. - - Parameters - ========== - X - Type T1. - N-D tensor - roi - Type T2. - 1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is - the rank of X or the length of axes, if provided. The RoIs' coordinates - are normalized in the coordinate system of the input image. It only - takes effect when coordinate_transformation_mode is "tf_crop_and_resize" - scales - Type tensor(float). - The scale array along each dimension. It takes value greater than 0. If - it's less than 1, it's sampling down, otherwise, it's upsampling. The - number of elements of 'scales' should be the same as the rank of input - 'X' or the length of 'axes', if provided. One of 'scales' and 'sizes' - MUST be specified and it is an error if both are specified. If 'sizes' - is needed, the user can use an empty string as the name of 'scales' in - this operator's input list. - sizes - Type tensor(int64). - Target size of the output tensor. Its interpretation depends on the - 'keep_aspect_ratio_policy' value.The number of elements of 'sizes' - should be the same as the rank of input 'X', or the length of 'axes', if - provided. Only one of 'scales' and 'sizes' can be specified. - antialias - Attribute. - If set to 1, "linear" and "cubic" interpolation modes will use an - antialiasing filter when downscaling. Antialiasing is achieved by - stretching the resampling filter by a factor max(1, 1 / scale), which - means that when downsampling, more input pixels contribute to an output - pixel. - axes - Attribute. - If provided, it specifies a subset of axes that 'roi', 'scales' and - 'sizes' refer to. If not provided, all axes are assumed [0, 1, ..., - r-1], where r = rank(data). Non-specified dimensions are interpreted as - non-resizable. Negative value means counting dimensions from the back. - Accepted range is [-r, r-1], where r = rank(data). Behavior is undefined - if an axis is repeated. - coordinate_transformation_mode - Attribute. - This attribute describes how to transform the coordinate in the resized - tensor to the coordinate in the original tensor. - - The coordinate of each dimension is transformed individually. Let's - describe a case using axis x as an example. Denote ``x_resized`` as the - coordinate of axis x in the resized tensor, ``x_original`` as the - coordinate of axis x in the original tensor, ``length_original`` as the - length of the original tensor in axis x, ``length_resized`` as the - length of the resized tensor in axis x, - ``scale = length_resized / length_original``, ``output_width`` the - target length on the axis x which can be a fractional number when it is - calculated out of a scale factor, and ``output_width_int`` the effective - output width as an integer. - - if coordinate_transformation_mode is ``"half_pixel"``, - - :: - - x_original = (x_resized + 0.5) / scale - 0.5 - - if coordinate_transformation_mode is ``"half_pixel_symmetric"``, - - :: - - adjustment = output_width_int / output_width - center = input_width / 2 - offset = center * (1 - adjustment) - x_ori = offset + (x + 0.5) / scale - 0.5 - - if coordinate_transformation_mode is ``"pytorch_half_pixel"``, - - :: - - x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0 - - if coordinate_transformation_mode is ``"align_corners"``, - - :: - - x_original = x_resized * (length_original - 1) / (length_resized - 1) - - if coordinate_transformation_mode is ``"asymmetric"``, - - :: - - x_original = x_resized / scale - - if coordinate_transformation_mode is ``"tf_crop_and_resize"``, - - :: - - x_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1) - - . - cubic_coeff_a - Attribute. - The coefficient 'a' used in cubic interpolation. Two common choice are - -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out - Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the - details. This attribute is valid only if mode is "cubic". - exclude_outside - Attribute. - If set to 1, the weight of sampling locations outside the tensor will be - set to 0 and the weight will be renormalized so that their sum is 1.0. - The default value is 0. - extrapolation_value - Attribute. - When coordinate_transformation_mode is "tf_crop_and_resize" and - x_original is outside the range [0, length_original - 1], this value is - used as the corresponding output value. Default is 0.0f. - keep_aspect_ratio_policy - Attribute. - This attribute describes how to interpret the ``sizes`` input with - regard to keeping the original aspect ratio of the input, and it is not - applicable when the ``scales`` input is used. - - Given a set of ``sizes``, associated with a subset of ``axes`` - (explicitly provided or default), and assuming ``d = axes[i]``, with - ``i`` being the index of the provided ``sizes``. - - If ``keep_aspect_ratio_policy`` is ``"stretch"``, the original aspect - ratio is disregarded, and the input is resized to the specified size: - ``out_size[d] = sizes[i]`` - - If ``keep_aspect_ratio_policy`` is ``"not_larger"``, the sizes are - adjusted so that no extent of the output is larger than the specified - size, while keeping the original aspect ratio: - - :: - - scale = Min(sizes[i] / in_size[d]) - out_size[d] = round_int(scale * in_size[i]) - - If ``keep_aspect_ratio_policy`` is ``"not_smaller"``, the sizes are - adjusted so that no extent of the output is smaller than the specified - size, while keeping the original aspect ratio: - - :: - - scale = Max(sizes[i] / in_size[d]) - out_size[d] = round_int(scale * in_size[i]) + x_original = (x_resized + 0.5) / scale - 0.5 - For non-resizable axes (those not specified in ``axes``), the output - size will be equal to the input size. - - Note: ``round_int`` stands for computing the nearest integer value, - rounding halfway cases up. - mode - Attribute. - Three interpolation modes: "nearest" (default), "linear" and "cubic". - The "linear" mode includes linear interpolation for 1D tensor and - N-linear interpolation for N-D tensor (for example, bilinear - interpolation for 2D tensor). The "cubic" mode includes cubic - interpolation for 1D tensor and N-cubic interpolation for N-D tensor - (for example, bicubic interpolation for 2D tensor). - nearest_mode - Attribute. - Four modes: "round_prefer_floor" (default, as known as round half down), - "round_prefer_ceil" (as known as round half up), "floor", "ceil". Only - used by nearest interpolation. It indicates how to get "nearest" pixel - in input tensor from x_original, so this attribute is valid only if - "mode" is "nearest". - - Returns - ======= - Y : Var - Type T1. - N-D tensor after resizing - - Notes - ===== - Signature: ``ai.onnx@19::Resize``. - - Type constraints: - - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` - """ - return ( - _Resize( - _Resize.Attributes( - antialias=AttrInt64(antialias, name="antialias"), - axes=AttrInt64s.maybe(axes, name="axes"), - coordinate_transformation_mode=AttrString( - coordinate_transformation_mode, - name="coordinate_transformation_mode", - ), - cubic_coeff_a=AttrFloat32(cubic_coeff_a, name="cubic_coeff_a"), - exclude_outside=AttrInt64(exclude_outside, name="exclude_outside"), - extrapolation_value=AttrFloat32( - extrapolation_value, name="extrapolation_value" - ), - keep_aspect_ratio_policy=AttrString( - keep_aspect_ratio_policy, name="keep_aspect_ratio_policy" - ), - mode=AttrString(mode, name="mode"), - nearest_mode=AttrString(nearest_mode, name="nearest_mode"), - ), - _Resize.Inputs( - X=unwrap_vars(X), - roi=unwrap_vars(roi), - scales=unwrap_vars(scales), - sizes=unwrap_vars(sizes), - ), - ) - .get_output_vars( - X=get_value(X), - roi=get_value(roi), - scales=get_value(scales), - sizes=get_value(sizes), - ) - .Y - ) - - -def scan( - initial_state_and_scan_inputs: Sequence[Var], - *, - body: Callable[..., Iterable[Var]], - num_scan_inputs: int, - scan_input_axes: Optional[Iterable[int]] = None, - scan_input_directions: Optional[Iterable[int]] = None, - scan_output_axes: Optional[Iterable[int]] = None, - scan_output_directions: Optional[Iterable[int]] = None, -) -> Sequence[Var]: - r""" - Scan can be used to iterate over one or more scan_input tensors, - constructing zero or more scan_output tensors. It combines ideas from - general recurrences, functional programming constructs such as scan, - fold, map, and zip, and is intended to enable generalizations of - RNN-like constructs for sequence-to-sequence processing. Other tensors - (referred to as state_variables here) can be used to carry a state when - iterating from one element to another (similar to hidden-state in RNNs, - also referred to as loop-carried dependences in the context of loops). - Many common usages involve a single scan_input tensor (where - functionality similar to scan, fold and map can be obtained). When more - than one scan_input is used, a behavior similar to zip is obtained. - - The attribute body must be a graph, specifying the computation to be - performed in every iteration. It takes as input the current values of - the state_variables and the current iterated element of the scan_inputs. - It must return the (updated) values of the state_variables and zero or - more scan_output_element tensors. The values of the scan_output_element - tensors are concatenated over all the iterations to produce the - scan_output values of the scan construct (similar to the concatenated - intermediate hidden-state values of RNN-like constructs). All the output - tensors (state_variables as well as scan_output_element tensors) are - required to have the same shape in each iteration of the loop (a - restriction imposed to enable efficient memory allocation). - - Note that the iterated element passed to the body subgraph does not have - a sequence axis. It will have a rank one less than the rank of the - corresponding scan_input. - - The scan operation returns the final values of the state_variables as - well as the scan_outputs. - - The optional attribute scan_input_directions specifies the direction - (forward or backward) for each scan input. If this attribute is omitted, - all sequences are scanned in the forward direction. A bidirectional scan - may be performed by specifying the same tensor input twice in the - scan_inputs, once with a forward direction, and once with a backward - direction. - - The scan_output of the operation is produced by concatenating the - scan_output_element values produced by the body in each iteration. The - optional attribute scan_output_directions specifies the direction in - which scan_output is constructed (by appending or prepending the - scan_output_element to scan_output in each iteration) for each - scan_output. If this attribute is omitted, the scan_output_element is - appended to the scan_output in each iteration. - - The optional attribute scan_input_axes specifies the axis to be scanned - for each scan_input. If omitted, every scan_input will be scanned in - axis 0. For example, if axis 0 is the batch axis and axis 1 is the time - axis (to be scanned), specify an axis value of 1. Note that scanning a - non-zero axis may be less efficient than scanning axis zero. - - The optional attribute scan_output_axes specifies the axis along which - the scan_outputs are accumulated for each scan_output. For example, if - axis 1 is the time axis (to be scanned) for both inputs and outputs, - specify a scan_input axis and scan_output axis value of 1. - - Note that because of the ONNX restriction that only the last parameter - of an operator can be variadic, the initial-states and scan-inputs are - listed together as one input parameter. Similarly, the final-states and - scan-outputs are listed together as one output parameter. The attribute - num_scan_inputs indicates the number M of scan-inputs. - - The behavior of + if coordinate_transformation_mode is ``"half_pixel_symmetric"``, :: - Scan < - num_scan_inputs = m, - body = loop-body, - scan_input_axes = [axis_1, ..., axis_m] - > (init_1, ..., init_n, scan_1, ..., scan_m) + adjustment = output_width_int / output_width + center = input_width / 2 + offset = center * (1 - adjustment) + x_ori = offset + (x + 0.5) / scale - 0.5 - is equivalent to the following pseudo-code: + if coordinate_transformation_mode is ``"pytorch_half_pixel"``, :: - // scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i - // scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j. - sequence_length = scan_1.shape[axis_1]; - - // initialize state-variables - st_1 = init_1; ... st_n = init_n; - // initialize scan-output variables: [] denotes an empty tensor - scan_out_1 = []; ...; scan_out_k = []; - // identify number of iterations: - - // execute loop - for (int t = 0; t < sequence_length; ++t) { - // generate the scan-input elements: the notation T[t] indicates the sub-tensor - // of rank one less than T obtained by indexing T at position t along axis k. - si_1 = scan_1[t]; - ... ; - si_m = scan_m[t]; - // execute loop-body - st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m) - // accumulate the scan-output elements - scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k); - } - - return st_1, ..., st_n, scan_out_1, ..., scan_out_k; - - *Sample usage: Encoding RNN using a Scan* - - The following example shows how a simple RNN over an input tensor %X, - with weight tensor %Wi, recurrence weight tensor %Ri, bias tensors %Wbi - and %Rbi, and initial hidden-state %H_0 can be encoded as a ScanLoop. - Note that the loop-body is a nested graph, and it directly computes %Wi, - %Ri, %Wbi, and %Rbi (typically constants or initializers in the body - graph). If these values are computed in the outer graph, they need to be - passed in as extra state_variables. + x_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0 - :: + if coordinate_transformation_mode is ``"align_corners"``, - graph rnn-encoding { - %H_0 = ... - %X = ... - %Y_h, %Y = Scan[body = , num_scan_inputs=1](%H_0, %X) - return %Y, %Y_h - } - - graph rnn-cell-1 ( - %H_tminus1[FLOAT, tensor] - %X_t[FLOAT, tensor] - ) { - %Wi = ... - %Ri = ... - %Wbi = ... - %Rbi = ... - %t1 = X_t * (Wi^T) - %t2 = H_tminus1*(Ri^T) - %t3 = Add(%t1, %t2) - %t4 = Add(%t3, %Wbi) - %t5 = Add(%t4, %Rbi) - %Ht = Tanh(%t5) - %Accumulate = Identity(%Ht) - return %Ht, %Accumulate - } - - Parameters - ========== - initial_state_and_scan_inputs - Type V. - Initial values of the loop's N state variables followed by M scan_inputs - body - Attribute. - The graph run each iteration. It has N+M inputs: (loop state - variables..., scan_input_elts...). It has N+K outputs: (loop state - variables..., scan_output_elts...). Each scan_output is created by - concatenating the value of the specified scan_output_elt value at the - end of each iteration of the loop. It is an error if the dimensions of - these values change across loop iterations. - num_scan_inputs - Attribute. - An attribute specifying the number of scan_inputs M. - scan_input_axes - Attribute. - An optional list of M flags. The i-th element of the list specifies the - axis to be scanned (the sequence axis) for the i-th scan_input. If - omitted, 0 will be used as the scan axis for every scan_input. Negative - value for an axis means counting dimensions from the back. Accepted - range is [-r, r-1] where r = rank(input). - scan_input_directions - Attribute. - An optional list of M flags. The i-th element of the list specifies the - direction to be scanned for the i-th scan_input tensor: 0 indicates - forward direction and 1 indicates reverse direction. If omitted, all - scan_input tensors will be scanned in the forward direction. - scan_output_axes - Attribute. - An optional list of K flags. The i-th element of the list specifies the - axis for the i-th scan_output. The scan outputs are accumulated along - the specified axis. If omitted, 0 will be used as the scan axis for - every scan_output. Negative value for an axis means counting dimensions - from the back. Accepted range is [-r, r-1]. - scan_output_directions - Attribute. - An optional list of K flags, one for each scan_output. The i-th element - of the list specifies whether the i-th scan_output should be constructed - by appending or prepending a new value in each iteration: 0 indicates - appending and 1 indicates prepending. If omitted, all scan_output - tensors will be produced by appending a value in each iteration. - - Returns - ======= - final_state_and_scan_outputs : Sequence[Var] - Type V. - Final values of the loop's N state variables followed by K scan_outputs - - Notes - ===== - Signature: ``ai.onnx@19::Scan``. - - Type constraints: - - V: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - """ - _body_subgraph: Graph = subgraph( - [ - Tensor( - var.unwrap_tensor().dtype, - (lambda x: x[1:] if x is not None else None)(var.unwrap_tensor().shape), - ) - for var in initial_state_and_scan_inputs[:num_scan_inputs] - ] - + [ - Tensor(var.unwrap_tensor().dtype) - for var in initial_state_and_scan_inputs[num_scan_inputs:] - ], - body, - ) - return ( - _Scan( - _Scan.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - num_scan_inputs=AttrInt64(num_scan_inputs, name="num_scan_inputs"), - scan_input_axes=AttrInt64s.maybe( - scan_input_axes, name="scan_input_axes" - ), - scan_input_directions=AttrInt64s.maybe( - scan_input_directions, name="scan_input_directions" - ), - scan_output_axes=AttrInt64s.maybe( - scan_output_axes, name="scan_output_axes" - ), - scan_output_directions=AttrInt64s.maybe( - scan_output_directions, name="scan_output_directions" - ), - ), - _Scan.Inputs( - initial_state_and_scan_inputs=unwrap_vars( - initial_state_and_scan_inputs - ), - ), - out_variadic=len(_body_subgraph.requested_results), - ) - .get_output_vars( - initial_state_and_scan_inputs=get_value(initial_state_and_scan_inputs), - ) - .final_state_and_scan_outputs - ) + :: + x_original = x_resized * (length_original - 1) / (length_resized - 1) -def shape( - data: Var, - *, - end: Optional[int] = None, - start: int = 0, -) -> Var: - r""" - Takes a tensor as input and outputs an 1D int64 tensor containing the - shape of the input tensor. Optional attributes start and end can be used - to compute a slice of the input tensor's shape. If start axis is - omitted, the slice starts from axis 0. The end axis, if specified, is - exclusive (and the returned value will not include the size of that - axis). If the end axis is omitted, the axes upto the last one will be - included. Negative axes indicate counting back from the last axis. Note - that axes will be clamped to the range [0, r-1], where r is the rank of - the input tensor if they are out-of-range (after adding r in the case of - negative axis). Thus, specifying any end value > r is equivalent to - specifying an end value of r, and specifying any start value < -r is - equivalent to specifying a start value of 0. - - Examples: + if coordinate_transformation_mode is ``"asymmetric"``, :: - Input tensor with shape: [2, 3, 4] - No attributes specified. - Output: [2, 3, 4] + x_original = x_resized / scale + + if coordinate_transformation_mode is ``"tf_crop_and_resize"``, :: - Input tensor with shape: [2, 3, 4] - start: -1 - Output: [4] + x_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1) + + . +cubic_coeff_a + Attribute. + The coefficient 'a' used in cubic interpolation. Two common choice are + -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out + Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the + details. This attribute is valid only if mode is "cubic". +exclude_outside + Attribute. + If set to 1, the weight of sampling locations outside the tensor will be + set to 0 and the weight will be renormalized so that their sum is 1.0. + The default value is 0. +extrapolation_value + Attribute. + When coordinate_transformation_mode is "tf_crop_and_resize" and + x_original is outside the range [0, length_original - 1], this value is + used as the corresponding output value. Default is 0.0f. +keep_aspect_ratio_policy + Attribute. + This attribute describes how to interpret the ``sizes`` input with + regard to keeping the original aspect ratio of the input, and it is not + applicable when the ``scales`` input is used. + + Given a set of ``sizes``, associated with a subset of ``axes`` + (explicitly provided or default), and assuming ``d = axes[i]``, with + ``i`` being the index of the provided ``sizes``. + + If ``keep_aspect_ratio_policy`` is ``"stretch"``, the original aspect + ratio is disregarded, and the input is resized to the specified size: + ``out_size[d] = sizes[i]`` + + If ``keep_aspect_ratio_policy`` is ``"not_larger"``, the sizes are + adjusted so that no extent of the output is larger than the specified + size, while keeping the original aspect ratio: :: - Input tensor with shape: [2, 3, 4] - end: -1 - Output: [2, 3] + scale = Min(sizes[i] / in_size[d]) + out_size[d] = round_int(scale * in_size[i]) + + If ``keep_aspect_ratio_policy`` is ``"not_smaller"``, the sizes are + adjusted so that no extent of the output is smaller than the specified + size, while keeping the original aspect ratio: :: - Input tensor with shape: [2, 3, 4] - start: 1 - end: 2 - Output: [3] - - Parameters - ========== - data - Type T. - An input tensor. - end - Attribute. - (Optional) Ending axis for slicing the shape. Negative value means - counting dimensions from the back. If omitted, sizes of all axes upto - (including) the last one will be included. - start - Attribute. - (Optional) Starting axis for slicing the shape. Default value is - 0.Negative value means counting dimensions from the back. - - Returns - ======= - shape : Var - Type T1. - Shape of the input tensor - - Notes - ===== - Signature: ``ai.onnx@19::Shape``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T1: `tensor(int64)` + scale = Max(sizes[i] / in_size[d]) + out_size[d] = round_int(scale * in_size[i]) + + For non-resizable axes (those not specified in ``axes``), the output + size will be equal to the input size. + + Note: ``round_int`` stands for computing the nearest integer value, + rounding halfway cases up. +mode + Attribute. + Three interpolation modes: "nearest" (default), "linear" and "cubic". + The "linear" mode includes linear interpolation for 1D tensor and + N-linear interpolation for N-D tensor (for example, bilinear + interpolation for 2D tensor). The "cubic" mode includes cubic + interpolation for 1D tensor and N-cubic interpolation for N-D tensor + (for example, bicubic interpolation for 2D tensor). +nearest_mode + Attribute. + Four modes: "round_prefer_floor" (default, as known as round half down), + "round_prefer_ceil" (as known as round half up), "floor", "ceil". Only + used by nearest interpolation. It indicates how to get "nearest" pixel + in input tensor from x_original, so this attribute is valid only if + "mode" is "nearest". + +Returns +======= +Y : Var + Type T1. + N-D tensor after resizing + +Notes +===== +Signature: ``ai.onnx@19::Resize``. + +Type constraints: + - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` + """ + return _Resize( + _Resize.Attributes( + antialias=AttrInt64(antialias, name="antialias"), + axes=AttrInt64s.maybe(axes, name="axes"), + coordinate_transformation_mode=AttrString(coordinate_transformation_mode, name="coordinate_transformation_mode"), + cubic_coeff_a=AttrFloat32(cubic_coeff_a, name="cubic_coeff_a"), + exclude_outside=AttrInt64(exclude_outside, name="exclude_outside"), + extrapolation_value=AttrFloat32(extrapolation_value, name="extrapolation_value"), + keep_aspect_ratio_policy=AttrString(keep_aspect_ratio_policy, name="keep_aspect_ratio_policy"), + mode=AttrString(mode, name="mode"), + nearest_mode=AttrString(nearest_mode, name="nearest_mode"), + ), _Resize.Inputs( + X=unwrap_vars(X), roi=unwrap_vars(roi), scales=unwrap_vars(scales), sizes=unwrap_vars(sizes), ), ).get_output_vars( + X=get_value(X), roi=get_value(roi), scales=get_value(scales), sizes=get_value(sizes), ).Y + + +def scan(initial_state_and_scan_inputs: Sequence[Var], *, body: Callable[..., Iterable[Var]], num_scan_inputs: int, scan_input_axes: Optional[Iterable[int]] = None, scan_input_directions: Optional[Iterable[int]] = None, scan_output_axes: Optional[Iterable[int]] = None, scan_output_directions: Optional[Iterable[int]] = None, ) -> Sequence[Var]: + r""" +Scan can be used to iterate over one or more scan_input tensors, +constructing zero or more scan_output tensors. It combines ideas from +general recurrences, functional programming constructs such as scan, +fold, map, and zip, and is intended to enable generalizations of +RNN-like constructs for sequence-to-sequence processing. Other tensors +(referred to as state_variables here) can be used to carry a state when +iterating from one element to another (similar to hidden-state in RNNs, +also referred to as loop-carried dependences in the context of loops). +Many common usages involve a single scan_input tensor (where +functionality similar to scan, fold and map can be obtained). When more +than one scan_input is used, a behavior similar to zip is obtained. + +The attribute body must be a graph, specifying the computation to be +performed in every iteration. It takes as input the current values of +the state_variables and the current iterated element of the scan_inputs. +It must return the (updated) values of the state_variables and zero or +more scan_output_element tensors. The values of the scan_output_element +tensors are concatenated over all the iterations to produce the +scan_output values of the scan construct (similar to the concatenated +intermediate hidden-state values of RNN-like constructs). All the output +tensors (state_variables as well as scan_output_element tensors) are +required to have the same shape in each iteration of the loop (a +restriction imposed to enable efficient memory allocation). + +Note that the iterated element passed to the body subgraph does not have +a sequence axis. It will have a rank one less than the rank of the +corresponding scan_input. + +The scan operation returns the final values of the state_variables as +well as the scan_outputs. + +The optional attribute scan_input_directions specifies the direction +(forward or backward) for each scan input. If this attribute is omitted, +all sequences are scanned in the forward direction. A bidirectional scan +may be performed by specifying the same tensor input twice in the +scan_inputs, once with a forward direction, and once with a backward +direction. + +The scan_output of the operation is produced by concatenating the +scan_output_element values produced by the body in each iteration. The +optional attribute scan_output_directions specifies the direction in +which scan_output is constructed (by appending or prepending the +scan_output_element to scan_output in each iteration) for each +scan_output. If this attribute is omitted, the scan_output_element is +appended to the scan_output in each iteration. + +The optional attribute scan_input_axes specifies the axis to be scanned +for each scan_input. If omitted, every scan_input will be scanned in +axis 0. For example, if axis 0 is the batch axis and axis 1 is the time +axis (to be scanned), specify an axis value of 1. Note that scanning a +non-zero axis may be less efficient than scanning axis zero. + +The optional attribute scan_output_axes specifies the axis along which +the scan_outputs are accumulated for each scan_output. For example, if +axis 1 is the time axis (to be scanned) for both inputs and outputs, +specify a scan_input axis and scan_output axis value of 1. + +Note that because of the ONNX restriction that only the last parameter +of an operator can be variadic, the initial-states and scan-inputs are +listed together as one input parameter. Similarly, the final-states and +scan-outputs are listed together as one output parameter. The attribute +num_scan_inputs indicates the number M of scan-inputs. + +The behavior of + +:: + + Scan < + num_scan_inputs = m, + body = loop-body, + scan_input_axes = [axis_1, ..., axis_m] + > (init_1, ..., init_n, scan_1, ..., scan_m) + +is equivalent to the following pseudo-code: + +:: + + // scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i + // scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j. + sequence_length = scan_1.shape[axis_1]; + + // initialize state-variables + st_1 = init_1; ... st_n = init_n; + // initialize scan-output variables: [] denotes an empty tensor + scan_out_1 = []; ...; scan_out_k = []; + // identify number of iterations: + + // execute loop + for (int t = 0; t < sequence_length; ++t) { + // generate the scan-input elements: the notation T[t] indicates the sub-tensor + // of rank one less than T obtained by indexing T at position t along axis k. + si_1 = scan_1[t]; + ... ; + si_m = scan_m[t]; + // execute loop-body + st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m) + // accumulate the scan-output elements + scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k); + } + + return st_1, ..., st_n, scan_out_1, ..., scan_out_k; + +*Sample usage: Encoding RNN using a Scan* + +The following example shows how a simple RNN over an input tensor %X, +with weight tensor %Wi, recurrence weight tensor %Ri, bias tensors %Wbi +and %Rbi, and initial hidden-state %H_0 can be encoded as a ScanLoop. +Note that the loop-body is a nested graph, and it directly computes %Wi, +%Ri, %Wbi, and %Rbi (typically constants or initializers in the body +graph). If these values are computed in the outer graph, they need to be +passed in as extra state_variables. + +:: + + graph rnn-encoding { + %H_0 = ... + %X = ... + %Y_h, %Y = Scan[body = , num_scan_inputs=1](%H_0, %X) + return %Y, %Y_h + } + + graph rnn-cell-1 ( + %H_tminus1[FLOAT, tensor] + %X_t[FLOAT, tensor] + ) { + %Wi = ... + %Ri = ... + %Wbi = ... + %Rbi = ... + %t1 = X_t * (Wi^T) + %t2 = H_tminus1*(Ri^T) + %t3 = Add(%t1, %t2) + %t4 = Add(%t3, %Wbi) + %t5 = Add(%t4, %Rbi) + %Ht = Tanh(%t5) + %Accumulate = Identity(%Ht) + return %Ht, %Accumulate + } + +Parameters +========== +initial_state_and_scan_inputs + Type V. + Initial values of the loop's N state variables followed by M scan_inputs +body + Attribute. + The graph run each iteration. It has N+M inputs: (loop state + variables..., scan_input_elts...). It has N+K outputs: (loop state + variables..., scan_output_elts...). Each scan_output is created by + concatenating the value of the specified scan_output_elt value at the + end of each iteration of the loop. It is an error if the dimensions of + these values change across loop iterations. +num_scan_inputs + Attribute. + An attribute specifying the number of scan_inputs M. +scan_input_axes + Attribute. + An optional list of M flags. The i-th element of the list specifies the + axis to be scanned (the sequence axis) for the i-th scan_input. If + omitted, 0 will be used as the scan axis for every scan_input. Negative + value for an axis means counting dimensions from the back. Accepted + range is [-r, r-1] where r = rank(input). +scan_input_directions + Attribute. + An optional list of M flags. The i-th element of the list specifies the + direction to be scanned for the i-th scan_input tensor: 0 indicates + forward direction and 1 indicates reverse direction. If omitted, all + scan_input tensors will be scanned in the forward direction. +scan_output_axes + Attribute. + An optional list of K flags. The i-th element of the list specifies the + axis for the i-th scan_output. The scan outputs are accumulated along + the specified axis. If omitted, 0 will be used as the scan axis for + every scan_output. Negative value for an axis means counting dimensions + from the back. Accepted range is [-r, r-1]. +scan_output_directions + Attribute. + An optional list of K flags, one for each scan_output. The i-th element + of the list specifies whether the i-th scan_output should be constructed + by appending or prepending a new value in each iteration: 0 indicates + appending and 1 indicates prepending. If omitted, all scan_output + tensors will be produced by appending a value in each iteration. + +Returns +======= +final_state_and_scan_outputs : Sequence[Var] + Type V. + Final values of the loop's N state variables followed by K scan_outputs + +Notes +===== +Signature: ``ai.onnx@19::Scan``. + +Type constraints: + - V: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _Shape( - _Shape.Attributes( - end=AttrInt64.maybe(end, name="end"), - start=AttrInt64(start, name="start"), - ), - _Shape.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .shape + _body_subgraph: Graph = subgraph( + [Tensor(var.unwrap_tensor().dtype, (lambda x: x[1:] if x is not None else None)(var.unwrap_tensor().shape)) for var in initial_state_and_scan_inputs[:num_scan_inputs]] + [Tensor(var.unwrap_tensor().dtype) for var in initial_state_and_scan_inputs[num_scan_inputs:]], + body ) + return _Scan( + _Scan.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + num_scan_inputs=AttrInt64(num_scan_inputs, name="num_scan_inputs"), + scan_input_axes=AttrInt64s.maybe(scan_input_axes, name="scan_input_axes"), + scan_input_directions=AttrInt64s.maybe(scan_input_directions, name="scan_input_directions"), + scan_output_axes=AttrInt64s.maybe(scan_output_axes, name="scan_output_axes"), + scan_output_directions=AttrInt64s.maybe(scan_output_directions, name="scan_output_directions"), + ), _Scan.Inputs( + initial_state_and_scan_inputs=unwrap_vars(initial_state_and_scan_inputs), ), out_variadic=len(_body_subgraph.requested_results), ).get_output_vars( + initial_state_and_scan_inputs=get_value(initial_state_and_scan_inputs), ).final_state_and_scan_outputs + + +def shape(data: Var, *, end: Optional[int] = None, start: int = 0, ) -> Var: + r""" +Takes a tensor as input and outputs an 1D int64 tensor containing the +shape of the input tensor. Optional attributes start and end can be used +to compute a slice of the input tensor's shape. If start axis is +omitted, the slice starts from axis 0. The end axis, if specified, is +exclusive (and the returned value will not include the size of that +axis). If the end axis is omitted, the axes upto the last one will be +included. Negative axes indicate counting back from the last axis. Note +that axes will be clamped to the range [0, r-1], where r is the rank of +the input tensor if they are out-of-range (after adding r in the case of +negative axis). Thus, specifying any end value > r is equivalent to +specifying an end value of r, and specifying any start value < -r is +equivalent to specifying a start value of 0. + +Examples: + +:: + + Input tensor with shape: [2, 3, 4] + No attributes specified. + Output: [2, 3, 4] + +:: + + Input tensor with shape: [2, 3, 4] + start: -1 + Output: [4] + +:: + + Input tensor with shape: [2, 3, 4] + end: -1 + Output: [2, 3] + +:: + + Input tensor with shape: [2, 3, 4] + start: 1 + end: 2 + Output: [3] + +Parameters +========== +data + Type T. + An input tensor. +end + Attribute. + (Optional) Ending axis for slicing the shape. Negative value means + counting dimensions from the back. If omitted, sizes of all axes upto + (including) the last one will be included. +start + Attribute. + (Optional) Starting axis for slicing the shape. Default value is + 0.Negative value means counting dimensions from the back. + +Returns +======= +shape : Var + Type T1. + Shape of the input tensor + +Notes +===== +Signature: ``ai.onnx@19::Shape``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T1: `tensor(int64)` + """ + return _Shape( + _Shape.Attributes( + end=AttrInt64.maybe(end, name="end"), + start=AttrInt64(start, name="start"), + ), _Shape.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).shape -def size( - data: Var, -) -> Var: +def size(data: Var, ) -> Var: r""" - Takes a tensor as input and outputs a int64 scalar that equals to the - total number of elements of the input tensor. - - Parameters - ========== - data - Type T. - An input tensor. - - Returns - ======= - size : Var - Type T1. - Total number of elements of the input tensor - - Notes - ===== - Signature: ``ai.onnx@19::Size``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T1: `tensor(int64)` +Takes a tensor as input and outputs a int64 scalar that equals to the +total number of elements of the input tensor. + +Parameters +========== +data + Type T. + An input tensor. + +Returns +======= +size : Var + Type T1. + Total number of elements of the input tensor + +Notes +===== +Signature: ``ai.onnx@19::Size``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T1: `tensor(int64)` """ - return ( - _Size( - _Size.Attributes(), - _Size.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .size - ) + return _Size( + _Size.Attributes( + ), _Size.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).size def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: @@ -3134,4 +2637,4 @@ def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: "Xor": xor, } -__all__ = [fun.__name__ for fun in _CONSTRUCTORS.values()] + ["const"] +__all__ = [fun.__name__ for fun in _CONSTRUCTORS.values()] + ["const"] diff --git a/src/spox/opset/ai/onnx/v20.py b/src/spox/opset/ai/onnx/v20.py index 0e1d406..004b613 100644 --- a/src/spox/opset/ai/onnx/v20.py +++ b/src/spox/opset/ai/onnx/v20.py @@ -1,384 +1,219 @@ -# Copyright (c) QuantCo 2023-2024 -# SPDX-License-Identifier: BSD-3-Clause - # ruff: noqa: E741 -- Allow ambiguous variable name +import typing +import warnings from dataclasses import dataclass +from collections.abc import Iterable, Sequence from typing import ( + Any, + Callable, Optional, + Union, ) +from typing import cast as typing_cast import numpy as np import numpy.typing as npt +from spox._var import Var, VarInfo, result_type, unwrap_vars, get_value +from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._attributes import ( + AttrDtype, + AttrFloat32, + AttrFloat32s, + AttrGraph, AttrInt64, + AttrInt64s, AttrString, + AttrStrings, AttrTensor, + AttrType, ) -from spox._fields import BaseAttributes, BaseInputs, BaseOutputs +from spox._graph import Graph, subgraph +from spox._internal_op import intro from spox._node import OpType -from spox._standard import StandardNode -from spox._var import Var, VarInfo, get_value, unwrap_vars -from spox.opset.ai.onnx.v19 import ( - _GRU, - _LRN, - _LSTM, - _RNN, - _STFT, - _Abs, - _Acos, - _Acosh, - _Add, - _And, - _ArgMax, - _ArgMin, - _Asin, - _Asinh, - _Atan, - _Atanh, - _AveragePool, - _BatchNormalization, - _Bernoulli, - _BitShift, - _BitwiseAnd, - _BitwiseNot, - _BitwiseOr, - _BitwiseXor, - _BlackmanWindow, - _Cast, - _CastLike, - _Ceil, - _Celu, - _CenterCropPad, - _Clip, - _Col2Im, - _Compress, - _Concat, - _ConcatFromSequence, - _Constant, - _Conv, - _ConvInteger, - _ConvTranspose, - _Cos, - _Cosh, - _CumSum, - _DeformConv, - _DepthToSpace, - _DequantizeLinear, - _Det, - _Div, - _Dropout, - _DynamicQuantizeLinear, - _Einsum, - _Elu, - _Equal, - _Erf, - _Exp, - _Expand, - _EyeLike, - _Flatten, - _Floor, - _Gather, - _GatherElements, - _GatherND, - _Gemm, - _GlobalAveragePool, - _GlobalLpPool, - _GlobalMaxPool, - _Greater, - _GreaterOrEqual, - _GroupNormalization, - _HammingWindow, - _HannWindow, - _Hardmax, - _HardSigmoid, - _HardSwish, - _Identity, - _If, - _InstanceNormalization, - _LayerNormalization, - _LeakyRelu, - _Less, - _LessOrEqual, - _Log, - _LogSoftmax, - _Loop, - _LpNormalization, - _LpPool, - _MatMul, - _MatMulInteger, - _Max, - _MaxPool, - _MaxRoiPool, - _MaxUnpool, - _Mean, - _MeanVarianceNormalization, - _MelWeightMatrix, - _Min, - _Mish, - _Mod, - _Mul, - _Multinomial, - _Neg, - _NegativeLogLikelihoodLoss, - _NonMaxSuppression, - _NonZero, - _Not, - _OneHot, - _Optional, - _OptionalGetElement, - _OptionalHasElement, - _Or, - _Pad, - _Pow, - _PRelu, - _QLinearConv, - _QLinearMatMul, - _QuantizeLinear, - _RandomNormal, - _RandomNormalLike, - _RandomUniform, - _RandomUniformLike, - _Range, - _Reciprocal, - _ReduceL1, - _ReduceL2, - _ReduceLogSum, - _ReduceLogSumExp, - _ReduceMean, - _ReduceProd, - _ReduceSum, - _ReduceSumSquare, - _Relu, - _Reshape, - _Resize, - _ReverseSequence, - _RoiAlign, - _Round, - _Scan, - _ScatterElements, - _ScatterND, - _Selu, - _SequenceAt, - _SequenceConstruct, - _SequenceEmpty, - _SequenceErase, - _SequenceInsert, - _SequenceLength, - _SequenceMap, - _Shape, - _Shrink, - _Sigmoid, - _Sign, - _Sin, - _Sinh, - _Size, - _Slice, - _Softmax, - _SoftmaxCrossEntropyLoss, - _Softplus, - _Softsign, - _SpaceToDepth, - _Split, - _SplitToSequence, - _Sqrt, - _Squeeze, - _StringNormalizer, - _Sub, - _Sum, - _Tan, - _Tanh, - _TfIdfVectorizer, - _ThresholdedRelu, - _Tile, - _TopK, - _Transpose, - _Trilu, - _Unique, - _Unsqueeze, - _Where, - _Xor, - abs, - acos, - acosh, - add, - and_, - arg_max, - arg_min, - asin, - asinh, - atan, - atanh, - average_pool, - batch_normalization, - bernoulli, - bit_shift, - bitwise_and, - bitwise_not, - bitwise_or, - bitwise_xor, - blackman_window, - cast, - cast_like, - ceil, - celu, - center_crop_pad, - clip, - col2_im, - compress, - concat, - concat_from_sequence, - constant, - conv, - conv_integer, - conv_transpose, - cos, - cosh, - cumsum, - deform_conv, - depth_to_space, - dequantize_linear, - det, - div, - dropout, - dynamic_quantize_linear, - einsum, - elu, - equal, - erf, - exp, - expand, - eye_like, - flatten, - floor, - gather, - gather_elements, - gather_nd, - gemm, - global_average_pool, - global_lp_pool, - global_max_pool, - greater, - greater_or_equal, - group_normalization, - gru, - hamming_window, - hann_window, - hard_sigmoid, - hard_swish, - hardmax, - identity, - if_, - instance_normalization, - layer_normalization, - leaky_relu, - less, - less_or_equal, - log, - log_softmax, - loop, - lp_normalization, - lp_pool, - lrn, - lstm, - matmul, - matmul_integer, - max, - max_pool, - max_roi_pool, - max_unpool, - mean, - mean_variance_normalization, - mel_weight_matrix, - min, - mish, - mod, - mul, - multinomial, - neg, - negative_log_likelihood_loss, - non_max_suppression, - non_zero, - not_, - one_hot, - optional, - optional_get_element, - optional_has_element, - or_, - pad, - pow, - prelu, - qlinear_conv, - qlinear_matmul, - quantize_linear, - random_normal, - random_normal_like, - random_uniform, - random_uniform_like, - range, - reciprocal, - reduce_l1, - reduce_l2, - reduce_log_sum, - reduce_log_sum_exp, - reduce_mean, - reduce_prod, - reduce_sum, - reduce_sum_square, - relu, - reshape, - resize, - reverse_sequence, - rnn, - roi_align, - round, - scan, - scatter_elements, - scatter_nd, - selu, - sequence_at, - sequence_construct, - sequence_empty, - sequence_erase, - sequence_insert, - sequence_length, - sequence_map, - shape, - shrink, - sigmoid, - sign, - sin, - sinh, - size, - slice, - softmax, - softmax_cross_entropy_loss, - softplus, - softsign, - space_to_depth, - split, - split_to_sequence, - sqrt, - squeeze, - stft, - string_normalizer, - sub, - sum, - tan, - tanh, - tf_idf_vectorizer, - thresholded_relu, - tile, - top_k, - transpose, - trilu, - unique, - unsqueeze, - where, - xor, -) - - +from spox._standard import InferenceError, StandardNode +from spox._type_system import Tensor, Type, Sequence as SpoxSequence +from spox._value_prop import PropValueType + + +from spox.opset.ai.onnx.v19 import _Abs, abs +from spox.opset.ai.onnx.v19 import _Acos, acos +from spox.opset.ai.onnx.v19 import _Acosh, acosh +from spox.opset.ai.onnx.v19 import _Add, add +from spox.opset.ai.onnx.v19 import _And, and_ +from spox.opset.ai.onnx.v19 import _ArgMax, arg_max +from spox.opset.ai.onnx.v19 import _ArgMin, arg_min +from spox.opset.ai.onnx.v19 import _Asin, asin +from spox.opset.ai.onnx.v19 import _Asinh, asinh +from spox.opset.ai.onnx.v19 import _Atan, atan +from spox.opset.ai.onnx.v19 import _Atanh, atanh +from spox.opset.ai.onnx.v19 import _AveragePool, average_pool +from spox.opset.ai.onnx.v19 import _BatchNormalization, batch_normalization +from spox.opset.ai.onnx.v19 import _Bernoulli, bernoulli +from spox.opset.ai.onnx.v19 import _BitShift, bit_shift +from spox.opset.ai.onnx.v19 import _BitwiseAnd, bitwise_and +from spox.opset.ai.onnx.v19 import _BitwiseNot, bitwise_not +from spox.opset.ai.onnx.v19 import _BitwiseOr, bitwise_or +from spox.opset.ai.onnx.v19 import _BitwiseXor, bitwise_xor +from spox.opset.ai.onnx.v19 import _BlackmanWindow, blackman_window +from spox.opset.ai.onnx.v19 import _Cast, cast +from spox.opset.ai.onnx.v19 import _CastLike, cast_like +from spox.opset.ai.onnx.v19 import _Ceil, ceil +from spox.opset.ai.onnx.v19 import _Celu, celu +from spox.opset.ai.onnx.v19 import _CenterCropPad, center_crop_pad +from spox.opset.ai.onnx.v19 import _Clip, clip +from spox.opset.ai.onnx.v19 import _Col2Im, col2_im +from spox.opset.ai.onnx.v19 import _Compress, compress +from spox.opset.ai.onnx.v19 import _Concat, concat +from spox.opset.ai.onnx.v19 import _ConcatFromSequence, concat_from_sequence +from spox.opset.ai.onnx.v19 import _Constant, constant +from spox.opset.ai.onnx.v19 import _Conv, conv +from spox.opset.ai.onnx.v19 import _ConvInteger, conv_integer +from spox.opset.ai.onnx.v19 import _ConvTranspose, conv_transpose +from spox.opset.ai.onnx.v19 import _Cos, cos +from spox.opset.ai.onnx.v19 import _Cosh, cosh +from spox.opset.ai.onnx.v19 import _CumSum, cumsum +from spox.opset.ai.onnx.v19 import _DeformConv, deform_conv +from spox.opset.ai.onnx.v19 import _DepthToSpace, depth_to_space +from spox.opset.ai.onnx.v19 import _DequantizeLinear, dequantize_linear +from spox.opset.ai.onnx.v19 import _Det, det +from spox.opset.ai.onnx.v19 import _Div, div +from spox.opset.ai.onnx.v19 import _Dropout, dropout +from spox.opset.ai.onnx.v19 import _DynamicQuantizeLinear, dynamic_quantize_linear +from spox.opset.ai.onnx.v19 import _Einsum, einsum +from spox.opset.ai.onnx.v19 import _Elu, elu +from spox.opset.ai.onnx.v19 import _Equal, equal +from spox.opset.ai.onnx.v19 import _Erf, erf +from spox.opset.ai.onnx.v19 import _Exp, exp +from spox.opset.ai.onnx.v19 import _Expand, expand +from spox.opset.ai.onnx.v19 import _EyeLike, eye_like +from spox.opset.ai.onnx.v19 import _Flatten, flatten +from spox.opset.ai.onnx.v19 import _Floor, floor +from spox.opset.ai.onnx.v19 import _GRU, gru +from spox.opset.ai.onnx.v19 import _Gather, gather +from spox.opset.ai.onnx.v19 import _GatherElements, gather_elements +from spox.opset.ai.onnx.v19 import _GatherND, gather_nd +from spox.opset.ai.onnx.v19 import _Gemm, gemm +from spox.opset.ai.onnx.v19 import _GlobalAveragePool, global_average_pool +from spox.opset.ai.onnx.v19 import _GlobalLpPool, global_lp_pool +from spox.opset.ai.onnx.v19 import _GlobalMaxPool, global_max_pool +from spox.opset.ai.onnx.v19 import _Greater, greater +from spox.opset.ai.onnx.v19 import _GreaterOrEqual, greater_or_equal +from spox.opset.ai.onnx.v19 import _GroupNormalization, group_normalization +from spox.opset.ai.onnx.v19 import _HammingWindow, hamming_window +from spox.opset.ai.onnx.v19 import _HannWindow, hann_window +from spox.opset.ai.onnx.v19 import _HardSigmoid, hard_sigmoid +from spox.opset.ai.onnx.v19 import _HardSwish, hard_swish +from spox.opset.ai.onnx.v19 import _Hardmax, hardmax +from spox.opset.ai.onnx.v19 import _Identity, identity +from spox.opset.ai.onnx.v19 import _If, if_ +from spox.opset.ai.onnx.v19 import _InstanceNormalization, instance_normalization +from spox.opset.ai.onnx.v19 import _LRN, lrn +from spox.opset.ai.onnx.v19 import _LSTM, lstm +from spox.opset.ai.onnx.v19 import _LayerNormalization, layer_normalization +from spox.opset.ai.onnx.v19 import _LeakyRelu, leaky_relu +from spox.opset.ai.onnx.v19 import _Less, less +from spox.opset.ai.onnx.v19 import _LessOrEqual, less_or_equal +from spox.opset.ai.onnx.v19 import _Log, log +from spox.opset.ai.onnx.v19 import _LogSoftmax, log_softmax +from spox.opset.ai.onnx.v19 import _Loop, loop +from spox.opset.ai.onnx.v19 import _LpNormalization, lp_normalization +from spox.opset.ai.onnx.v19 import _LpPool, lp_pool +from spox.opset.ai.onnx.v19 import _MatMul, matmul +from spox.opset.ai.onnx.v19 import _MatMulInteger, matmul_integer +from spox.opset.ai.onnx.v19 import _Max, max +from spox.opset.ai.onnx.v19 import _MaxPool, max_pool +from spox.opset.ai.onnx.v19 import _MaxRoiPool, max_roi_pool +from spox.opset.ai.onnx.v19 import _MaxUnpool, max_unpool +from spox.opset.ai.onnx.v19 import _Mean, mean +from spox.opset.ai.onnx.v19 import _MeanVarianceNormalization, mean_variance_normalization +from spox.opset.ai.onnx.v19 import _MelWeightMatrix, mel_weight_matrix +from spox.opset.ai.onnx.v19 import _Min, min +from spox.opset.ai.onnx.v19 import _Mish, mish +from spox.opset.ai.onnx.v19 import _Mod, mod +from spox.opset.ai.onnx.v19 import _Mul, mul +from spox.opset.ai.onnx.v19 import _Multinomial, multinomial +from spox.opset.ai.onnx.v19 import _Neg, neg +from spox.opset.ai.onnx.v19 import _NegativeLogLikelihoodLoss, negative_log_likelihood_loss +from spox.opset.ai.onnx.v19 import _NonMaxSuppression, non_max_suppression +from spox.opset.ai.onnx.v19 import _NonZero, non_zero +from spox.opset.ai.onnx.v19 import _Not, not_ +from spox.opset.ai.onnx.v19 import _OneHot, one_hot +from spox.opset.ai.onnx.v19 import _Optional, optional +from spox.opset.ai.onnx.v19 import _OptionalGetElement, optional_get_element +from spox.opset.ai.onnx.v19 import _OptionalHasElement, optional_has_element +from spox.opset.ai.onnx.v19 import _Or, or_ +from spox.opset.ai.onnx.v19 import _PRelu, prelu +from spox.opset.ai.onnx.v19 import _Pad, pad +from spox.opset.ai.onnx.v19 import _Pow, pow +from spox.opset.ai.onnx.v19 import _QLinearConv, qlinear_conv +from spox.opset.ai.onnx.v19 import _QLinearMatMul, qlinear_matmul +from spox.opset.ai.onnx.v19 import _QuantizeLinear, quantize_linear +from spox.opset.ai.onnx.v19 import _RNN, rnn +from spox.opset.ai.onnx.v19 import _RandomNormal, random_normal +from spox.opset.ai.onnx.v19 import _RandomNormalLike, random_normal_like +from spox.opset.ai.onnx.v19 import _RandomUniform, random_uniform +from spox.opset.ai.onnx.v19 import _RandomUniformLike, random_uniform_like +from spox.opset.ai.onnx.v19 import _Range, range +from spox.opset.ai.onnx.v19 import _Reciprocal, reciprocal +from spox.opset.ai.onnx.v19 import _ReduceL1, reduce_l1 +from spox.opset.ai.onnx.v19 import _ReduceL2, reduce_l2 +from spox.opset.ai.onnx.v19 import _ReduceLogSum, reduce_log_sum +from spox.opset.ai.onnx.v19 import _ReduceLogSumExp, reduce_log_sum_exp +from spox.opset.ai.onnx.v19 import _ReduceMean, reduce_mean +from spox.opset.ai.onnx.v19 import _ReduceProd, reduce_prod +from spox.opset.ai.onnx.v19 import _ReduceSum, reduce_sum +from spox.opset.ai.onnx.v19 import _ReduceSumSquare, reduce_sum_square +from spox.opset.ai.onnx.v19 import _Relu, relu +from spox.opset.ai.onnx.v19 import _Reshape, reshape +from spox.opset.ai.onnx.v19 import _Resize, resize +from spox.opset.ai.onnx.v19 import _ReverseSequence, reverse_sequence +from spox.opset.ai.onnx.v19 import _RoiAlign, roi_align +from spox.opset.ai.onnx.v19 import _Round, round +from spox.opset.ai.onnx.v19 import _STFT, stft +from spox.opset.ai.onnx.v19 import _Scan, scan +from spox.opset.ai.onnx.v19 import _ScatterElements, scatter_elements +from spox.opset.ai.onnx.v19 import _ScatterND, scatter_nd +from spox.opset.ai.onnx.v19 import _Selu, selu +from spox.opset.ai.onnx.v19 import _SequenceAt, sequence_at +from spox.opset.ai.onnx.v19 import _SequenceConstruct, sequence_construct +from spox.opset.ai.onnx.v19 import _SequenceEmpty, sequence_empty +from spox.opset.ai.onnx.v19 import _SequenceErase, sequence_erase +from spox.opset.ai.onnx.v19 import _SequenceInsert, sequence_insert +from spox.opset.ai.onnx.v19 import _SequenceLength, sequence_length +from spox.opset.ai.onnx.v19 import _SequenceMap, sequence_map +from spox.opset.ai.onnx.v19 import _Shape, shape +from spox.opset.ai.onnx.v19 import _Shrink, shrink +from spox.opset.ai.onnx.v19 import _Sigmoid, sigmoid +from spox.opset.ai.onnx.v19 import _Sign, sign +from spox.opset.ai.onnx.v19 import _Sin, sin +from spox.opset.ai.onnx.v19 import _Sinh, sinh +from spox.opset.ai.onnx.v19 import _Size, size +from spox.opset.ai.onnx.v19 import _Slice, slice +from spox.opset.ai.onnx.v19 import _Softmax, softmax +from spox.opset.ai.onnx.v19 import _SoftmaxCrossEntropyLoss, softmax_cross_entropy_loss +from spox.opset.ai.onnx.v19 import _Softplus, softplus +from spox.opset.ai.onnx.v19 import _Softsign, softsign +from spox.opset.ai.onnx.v19 import _SpaceToDepth, space_to_depth +from spox.opset.ai.onnx.v19 import _Split, split +from spox.opset.ai.onnx.v19 import _SplitToSequence, split_to_sequence +from spox.opset.ai.onnx.v19 import _Sqrt, sqrt +from spox.opset.ai.onnx.v19 import _Squeeze, squeeze +from spox.opset.ai.onnx.v19 import _StringNormalizer, string_normalizer +from spox.opset.ai.onnx.v19 import _Sub, sub +from spox.opset.ai.onnx.v19 import _Sum, sum +from spox.opset.ai.onnx.v19 import _Tan, tan +from spox.opset.ai.onnx.v19 import _Tanh, tanh +from spox.opset.ai.onnx.v19 import _TfIdfVectorizer, tf_idf_vectorizer +from spox.opset.ai.onnx.v19 import _ThresholdedRelu, thresholded_relu +from spox.opset.ai.onnx.v19 import _Tile, tile +from spox.opset.ai.onnx.v19 import _TopK, top_k +from spox.opset.ai.onnx.v19 import _Transpose, transpose +from spox.opset.ai.onnx.v19 import _Trilu, trilu +from spox.opset.ai.onnx.v19 import _Unique, unique +from spox.opset.ai.onnx.v19 import _Unsqueeze, unsqueeze +from spox.opset.ai.onnx.v19 import _Where, where +from spox.opset.ai.onnx.v19 import _Xor, xor class _AffineGrid(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -399,7 +234,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ConstantOfShape(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -419,7 +253,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _DFT(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -442,7 +275,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Gelu(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -462,7 +294,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _GridSample(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -485,7 +316,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ImageDecoder(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -505,7 +335,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _IsInf(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -526,7 +355,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _IsNaN(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -546,7 +374,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceMax(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -568,7 +395,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _ReduceMin(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -590,7 +416,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _RegexFullMatch(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -610,7 +435,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _StringConcat(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -631,7 +455,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _StringSplit(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -653,953 +476,770 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - -def affine_grid( - theta: Var, - size: Var, - *, - align_corners: int = 0, -) -> Var: +def affine_grid(theta: Var, size: Var, *, align_corners: int = 0, ) -> Var: r""" - Generates a 2D or 3D flow field (sampling grid), given a batch of affine - matrices theta - (https://pytorch.org/docs/stable/generated/torch.nn.functional.affine_grid.html). - An affine matrix ``theta`` is applied to a position tensor represented - in its homogeneous expression. Here is an example in 3D: - - :: - - [r00, r01, r02, t0] [x] [x'] - [r10, r11, r12, t1] * [y] = [y'] - [r20, r21, r22, t2] [z] [z'] - [0, 0, 0, 1 ] [1] [1 ] - - where ``(x, y, z)`` is the position in the original space, - ``(x', y', z')`` is the position in the output space. The last row is - always ``[0, 0, 0, 1]`` and is not stored in the affine matrix. - Therefore we have ``theta`` of shape ``(N, 2, 3)`` for 2D or - ``(N, 3, 4)`` for 3D. - - Input ``size`` is used to define grid of positions evenly spaced in the - original 2D or 3D space, with dimensions ranging from ``-1`` to ``1``. - The output ``grid`` contains positions in the output space. - - When ``align_corners=1``, consider ``-1`` and ``1`` to refer to the - centers of the corner pixels (mark ``v`` in illustration). - - :: - - v v v v - |-------------------|------------------| - -1 0 1 - - When ``align_corners=0``, consider ``-1`` and ``1`` to refer to the - outer edge of the corner pixels. - - :: - - v v v v - |------------------|-------------------| - -1 0 1 - - Parameters - ========== - theta - Type T1. - input batch of affine matrices with shape (N, 2, 3) for 2D or (N, 3, 4) - for 3D - size - Type T2. - the target output image size (N, C, H, W) for 2D or (N, C, D, H, W) for - 3D - align_corners - Attribute. - if align_corners=1, consider -1 and 1 to refer to the centers of the - corner pixels. if align_corners=0, consider -1 and 1 to refer to the - outer edge the corner pixels. - - Returns - ======= - grid : Var - Type T1. - output tensor of shape (N, H, W, 2) of 2D sample coordinates or (N, D, - H, W, 3) of 3D sample coordinates. - - Notes - ===== - Signature: ``ai.onnx@20::AffineGrid``. - - Type constraints: - - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - - T2: `tensor(int64)` - """ - return ( - _AffineGrid( - _AffineGrid.Attributes( - align_corners=AttrInt64(align_corners, name="align_corners"), - ), - _AffineGrid.Inputs( - theta=unwrap_vars(theta), - size=unwrap_vars(size), - ), - ) - .get_output_vars( - theta=get_value(theta), - size=get_value(size), - ) - .grid - ) - - -def constant_of_shape( - input: Var, - *, - value: Optional[np.ndarray] = None, -) -> Var: - r""" - Generate a tensor with given value and shape. - - Parameters - ========== - input - Type T1. - 1D tensor. The shape of the expected output tensor. If empty tensor is - given, the output would be a scalar. All values must be >= 0. - value - Attribute. - (Optional) The value of the output elements.Should be a one-element - tensor. If not specified, it defaults to a tensor of value 0 and - datatype float32 - - Returns - ======= - output : Var - Type T2. - Output tensor of shape specified by 'input'.If attribute 'value' is - specified, the value and datatype of the output tensor is taken from - 'value'.If attribute 'value' is not specified, the value in the output - defaults to 0, and the datatype defaults to float32. - - Notes - ===== - Signature: ``ai.onnx@20::ConstantOfShape``. - - Type constraints: - - T1: `tensor(int64)` - - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +Generates a 2D or 3D flow field (sampling grid), given a batch of affine +matrices theta +(https://pytorch.org/docs/stable/generated/torch.nn.functional.affine_grid.html). +An affine matrix ``theta`` is applied to a position tensor represented +in its homogeneous expression. Here is an example in 3D: + +:: + + [r00, r01, r02, t0] [x] [x'] + [r10, r11, r12, t1] * [y] = [y'] + [r20, r21, r22, t2] [z] [z'] + [0, 0, 0, 1 ] [1] [1 ] + +where ``(x, y, z)`` is the position in the original space, +``(x', y', z')`` is the position in the output space. The last row is +always ``[0, 0, 0, 1]`` and is not stored in the affine matrix. +Therefore we have ``theta`` of shape ``(N, 2, 3)`` for 2D or +``(N, 3, 4)`` for 3D. + +Input ``size`` is used to define grid of positions evenly spaced in the +original 2D or 3D space, with dimensions ranging from ``-1`` to ``1``. +The output ``grid`` contains positions in the output space. + +When ``align_corners=1``, consider ``-1`` and ``1`` to refer to the +centers of the corner pixels (mark ``v`` in illustration). + +:: + + v v v v + |-------------------|------------------| + -1 0 1 + +When ``align_corners=0``, consider ``-1`` and ``1`` to refer to the +outer edge of the corner pixels. + +:: + + v v v v + |------------------|-------------------| + -1 0 1 + +Parameters +========== +theta + Type T1. + input batch of affine matrices with shape (N, 2, 3) for 2D or (N, 3, 4) + for 3D +size + Type T2. + the target output image size (N, C, H, W) for 2D or (N, C, D, H, W) for + 3D +align_corners + Attribute. + if align_corners=1, consider -1 and 1 to refer to the centers of the + corner pixels. if align_corners=0, consider -1 and 1 to refer to the + outer edge the corner pixels. + +Returns +======= +grid : Var + Type T1. + output tensor of shape (N, H, W, 2) of 2D sample coordinates or (N, D, + H, W, 3) of 3D sample coordinates. + +Notes +===== +Signature: ``ai.onnx@20::AffineGrid``. + +Type constraints: + - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + - T2: `tensor(int64)` """ - return ( - _ConstantOfShape( - _ConstantOfShape.Attributes( - value=AttrTensor.maybe(value, name="value"), - ), - _ConstantOfShape.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) - - -def dft( - input: Var, - dft_length: Optional[Var] = None, - axis: Optional[Var] = None, - *, - inverse: int = 0, - onesided: int = 0, -) -> Var: - r""" - Computes the discrete Fourier Transform (DFT) of the input. - - Assuming the input has shape ``[M, N]``, where ``N`` is the dimension - over which the DFT is computed and ``M`` denotes the conceptual "all - other dimensions," the DFT ``y[m, k]`` of shape ``[M, N]`` is defined as - - .. math:: y[m, k] = \sum_{n=0}^{N-1} e^{-2 \pi j \frac{k n}{N} } x[m, n] , + return _AffineGrid( + _AffineGrid.Attributes( + align_corners=AttrInt64(align_corners, name="align_corners"), + ), _AffineGrid.Inputs( + theta=unwrap_vars(theta), size=unwrap_vars(size), ), ).get_output_vars( + theta=get_value(theta), size=get_value(size), ).grid - and the inverse transform is defined as - .. math:: x[m, n] = \frac{1}{N} \sum_{k=0}^{N-1} e^{2 \pi j \frac{k n}{N} } y[m, k] , +def constant_of_shape(input: Var, *, value: Optional[np.ndarray] = None, ) -> Var: + r""" +Generate a tensor with given value and shape. + +Parameters +========== +input + Type T1. + 1D tensor. The shape of the expected output tensor. If empty tensor is + given, the output would be a scalar. All values must be >= 0. +value + Attribute. + (Optional) The value of the output elements.Should be a one-element + tensor. If not specified, it defaults to a tensor of value 0 and + datatype float32 + +Returns +======= +output : Var + Type T2. + Output tensor of shape specified by 'input'.If attribute 'value' is + specified, the value and datatype of the output tensor is taken from + 'value'.If attribute 'value' is not specified, the value in the output + defaults to 0, and the datatype defaults to float32. + +Notes +===== +Signature: ``ai.onnx@20::ConstantOfShape``. + +Type constraints: + - T1: `tensor(int64)` + - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + """ + return _ConstantOfShape( + _ConstantOfShape.Attributes( + value=AttrTensor.maybe(value, name="value"), + ), _ConstantOfShape.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output - where :math:`j` is the imaginary unit. - The actual shape of the output is specified in the "output" section. +def dft(input: Var, dft_length: Optional[Var] = None, axis: Optional[Var] = None, *, inverse: int = 0, onesided: int = 0, ) -> Var: + r""" +Computes the discrete Fourier Transform (DFT) of the input. + +Assuming the input has shape ``[M, N]``, where ``N`` is the dimension +over which the DFT is computed and ``M`` denotes the conceptual "all +other dimensions," the DFT ``y[m, k]`` of shape ``[M, N]`` is defined as + +.. math:: y[m, k] = \sum_{n=0}^{N-1} e^{-2 \pi j \frac{k n}{N} } x[m, n] , + +and the inverse transform is defined as + +.. math:: x[m, n] = \frac{1}{N} \sum_{k=0}^{N-1} e^{2 \pi j \frac{k n}{N} } y[m, k] , + +where :math:`j` is the imaginary unit. + +The actual shape of the output is specified in the "output" section. + +Reference: https://docs.scipy.org/doc/scipy/tutorial/fft.html + +Parameters +========== +input + Type T1. + For real input, the following shape is expected: + ``[signal_dim0][signal_dim1][signal_dim2]...[signal_dimN][1]``. For + complex input, the following shape is expected: + ``[signal_dim0][signal_dim1][signal_dim2]...[signal_dimN][2]``. The + final dimension represents the real and imaginary parts of the value in + that order. +dft_length + Type T2. + The length of the signal as a scalar. If greater than the axis + dimension, the signal will be zero-padded up to ``dft_length``. If less + than the axis dimension, only the first ``dft_length`` values will be + used as the signal. +axis + Type tensor(int64). + The axis as a scalar on which to perform the DFT. Default is ``-2`` + (last signal axis). Negative value means counting dimensions from the + back. Accepted range is :math:`[-r, -2] \cup [0, r-2]` where + ``r = rank(input)``. The last dimension is for representing complex + numbers and thus is an invalid axis. +inverse + Attribute. + Whether to perform the inverse discrete Fourier Transform. Default is 0, + which corresponds to ``false``. +onesided + Attribute. + If ``onesided`` is ``1`` and input is real, only values for ``k`` in + ``[0, 1, 2, ..., floor(n_fft/2) + 1]`` are returned because the + real-to-complex Fourier transform satisfies the conjugate symmetry, + i.e., ``X[m, k] = X[m, n_fft-k]*``, where ``m`` denotes "all other + dimensions" DFT was not applied on. If the input tensor is complex, + onesided output is not possible. Value can be ``0`` or ``1``. Default is + ``0``. + +Returns +======= +output : Var + Type T1. + The Fourier Transform of the input vector. If ``onesided`` is ``0``, the + following shape is expected: + ``[signal_dim0][signal_dim1][signal_dim2]...[signal_dimN][2]``. If + ``axis=0`` and ``onesided`` is ``1``, the following shape is expected: + ``[floor(signal_dim0/2)+1][signal_dim1][signal_dim2]...[signal_dimN][2]``. + If ``axis=1`` and ``onesided`` is ``1``, the following shape is + expected: + ``[signal_dim0][floor(signal_dim1/2)+1][signal_dim2]...[signal_dimN][2]``. + If ``axis=N`` and ``onesided`` is ``1``, the following shape is + expected: + ``[signal_dim0][signal_dim1][signal_dim2]...[floor(signal_dimN/2)+1][2]``. + The ``signal_dim`` at the specified ``axis`` is equal to the + ``dft_length``. + +Notes +===== +Signature: ``ai.onnx@20::DFT``. + +Type constraints: + - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` + - T2: `tensor(int32)`, `tensor(int64)` + """ + return _DFT( + _DFT.Attributes( + inverse=AttrInt64(inverse, name="inverse"), + onesided=AttrInt64(onesided, name="onesided"), + ), _DFT.Inputs( + input=unwrap_vars(input), dft_length=unwrap_vars(dft_length), axis=unwrap_vars(axis), ), ).get_output_vars( + input=get_value(input), dft_length=get_value(dft_length), axis=get_value(axis), ).output - Reference: https://docs.scipy.org/doc/scipy/tutorial/fft.html - Parameters - ========== - input - Type T1. - For real input, the following shape is expected: - ``[signal_dim0][signal_dim1][signal_dim2]...[signal_dimN][1]``. For - complex input, the following shape is expected: - ``[signal_dim0][signal_dim1][signal_dim2]...[signal_dimN][2]``. The - final dimension represents the real and imaginary parts of the value in - that order. - dft_length - Type T2. - The length of the signal as a scalar. If greater than the axis - dimension, the signal will be zero-padded up to ``dft_length``. If less - than the axis dimension, only the first ``dft_length`` values will be - used as the signal. - axis - Type tensor(int64). - The axis as a scalar on which to perform the DFT. Default is ``-2`` - (last signal axis). Negative value means counting dimensions from the - back. Accepted range is :math:`[-r, -2] \cup [0, r-2]` where - ``r = rank(input)``. The last dimension is for representing complex - numbers and thus is an invalid axis. - inverse - Attribute. - Whether to perform the inverse discrete Fourier Transform. Default is 0, - which corresponds to ``false``. - onesided - Attribute. - If ``onesided`` is ``1`` and input is real, only values for ``k`` in - ``[0, 1, 2, ..., floor(n_fft/2) + 1]`` are returned because the - real-to-complex Fourier transform satisfies the conjugate symmetry, - i.e., ``X[m, k] = X[m, n_fft-k]*``, where ``m`` denotes "all other - dimensions" DFT was not applied on. If the input tensor is complex, - onesided output is not possible. Value can be ``0`` or ``1``. Default is - ``0``. - - Returns - ======= - output : Var - Type T1. - The Fourier Transform of the input vector. If ``onesided`` is ``0``, the - following shape is expected: - ``[signal_dim0][signal_dim1][signal_dim2]...[signal_dimN][2]``. If - ``axis=0`` and ``onesided`` is ``1``, the following shape is expected: - ``[floor(signal_dim0/2)+1][signal_dim1][signal_dim2]...[signal_dimN][2]``. - If ``axis=1`` and ``onesided`` is ``1``, the following shape is - expected: - ``[signal_dim0][floor(signal_dim1/2)+1][signal_dim2]...[signal_dimN][2]``. - If ``axis=N`` and ``onesided`` is ``1``, the following shape is - expected: - ``[signal_dim0][signal_dim1][signal_dim2]...[floor(signal_dimN/2)+1][2]``. - The ``signal_dim`` at the specified ``axis`` is equal to the - ``dft_length``. - - Notes - ===== - Signature: ``ai.onnx@20::DFT``. - - Type constraints: - - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - - T2: `tensor(int32)`, `tensor(int64)` - """ - return ( - _DFT( - _DFT.Attributes( - inverse=AttrInt64(inverse, name="inverse"), - onesided=AttrInt64(onesided, name="onesided"), - ), - _DFT.Inputs( - input=unwrap_vars(input), - dft_length=unwrap_vars(dft_length), - axis=unwrap_vars(axis), - ), - ) - .get_output_vars( - input=get_value(input), - dft_length=get_value(dft_length), - axis=get_value(axis), - ) - .output - ) - - -def gelu( - X: Var, - *, - approximate: str = "none", -) -> Var: +def gelu(X: Var, *, approximate: str = "none", ) -> Var: r""" - Gelu takes one input data (Tensor) and produces one output data - (Tensor) where the gaussian error linear units function, - :math:`y = 0.5 * x * (1 + erf(x/sqrt(2)))` is applied to the tensor - elementwise. If the attribute "approximate" is set to "tanh", the - function estimation, - :math:`y = 0.5 * x * (1 + Tanh(sqrt(2/\pi) * (x + 0.044715 * x^3)))` is - used and applied to the tensor elementwise. - - Parameters - ========== - X - Type T. - Input tensor - approximate - Attribute. - Gelu approximation algorithm: ``"tanh"``, - ``"none"``\ (default).\ ``"none"``: do not use - approximation.\ ``"tanh"``: use tanh approximation. - - Returns - ======= - Y : Var - Type T. - Output tensor - - Notes - ===== - Signature: ``ai.onnx@20::Gelu``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` +Gelu takes one input data (Tensor) and produces one output data +(Tensor) where the gaussian error linear units function, +:math:`y = 0.5 * x * (1 + erf(x/sqrt(2)))` is applied to the tensor +elementwise. If the attribute "approximate" is set to "tanh", the +function estimation, +:math:`y = 0.5 * x * (1 + Tanh(sqrt(2/\pi) * (x + 0.044715 * x^3)))` is +used and applied to the tensor elementwise. + +Parameters +========== +X + Type T. + Input tensor +approximate + Attribute. + Gelu approximation algorithm: ``"tanh"``, + ``"none"``\ (default).\ ``"none"``: do not use + approximation.\ ``"tanh"``: use tanh approximation. + +Returns +======= +Y : Var + Type T. + Output tensor + +Notes +===== +Signature: ``ai.onnx@20::Gelu``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return ( - _Gelu( - _Gelu.Attributes( - approximate=AttrString(approximate, name="approximate"), - ), - _Gelu.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def grid_sample( - X: Var, - grid: Var, - *, - align_corners: int = 0, - mode: str = "linear", - padding_mode: str = "zeros", -) -> Var: + return _Gelu( + _Gelu.Attributes( + approximate=AttrString(approximate, name="approximate"), + ), _Gelu.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def grid_sample(X: Var, grid: Var, *, align_corners: int = 0, mode: str = "linear", padding_mode: str = "zeros", ) -> Var: r""" - Given an input ``X`` and a flow-field ``grid``, computes the output - ``Y`` using ``X`` values and pixel locations from the ``grid``. For - spatial input ``X`` with shape (N, C, H, W), the ``grid`` will have - shape (N, H_out, W_out, 2), the output ``Y`` will have shape (N, C, - H_out, W_out). For volumetric input ``X`` with shape (N, C, D, H, W), - the ``grid`` will have shape (N, D_out, H_out, W_out, 3), the output - ``Y`` will have shape (N, C, D_out, H_out, W_out). More generally, for - an input ``X`` of rank r+2 with shape (N, C, d1, d2, ..., dr), the - ``grid`` will have shape (N, D1_out, D2_out, ..., Dr_out, r), the output - ``Y`` will have shape (N, C, D1_out, D2_out, ..., Dr_out). - - The tensor ``X`` contains values at centers of square pixels (voxels, - etc) locations such as (n, c, d1_in, d2_in, ..., dr_in). The (n, d1_out, - d2_out, ..., dr_out, :) values from the tensor ``grid`` are the - normalized positions for interpolating the values at the (n, c, d1_out, - d2_out, ..., dr_out) locations from the output tensor ``Y`` using a - specified interpolation method (the mode) and a padding mode (for - ``grid`` positions falling outside the 2-dimensional image). - - For example, the values in ``grid[n, h_out, w_out, :]`` are size-2 - vectors specifying normalized positions in the 2-dimensional space of - ``X``. They are used to interpolate output values of - ``Y[n, c, h_out, w_out]``. - - The GridSample operator is often used in doing grid generator and - sampler in the `Spatial Transformer - Networks `__. See also in - `torch.nn.functional.grid_sample `__. - - Parameters - ========== - X - Type T1. - Input tensor of rank r+2 that has shape (N, C, D1, D2, ..., Dr), where N - is the batch size, C is the number of channels, D1, D2, ..., Dr are the - spatial dimensions. - grid - Type T2. - Input offset of shape (N, D1_out, D2_out, ..., Dr_out, r), where D1_out, - D2_out, ..., Dr_out are the spatial dimensions of the grid and output, - and r is the number of spatial dimensions. Grid specifies the sampling - locations normalized by the input spatial dimensions. Therefore, it - should have most values in the range of [-1, 1]. If the grid has values - outside the range of [-1, 1], the corresponding outputs will be handled - as defined by padding_mode. Following computer vision convention, the - coordinates in the length-r location vector are listed from the - innermost tensor dimension to the outermost, the opposite of regular - tensor indexing. - align_corners - Attribute. - If align_corners=1, the extrema (-1 and 1) are considered as referring - to the center points of the input's corner pixels (voxels, etc.). If - align_corners=0, they are instead considered as referring to the corner - points of the input's corner pixels (voxels, etc.), making the sampling - more resolution agnostic. - mode - Attribute. - Three interpolation modes: linear (default), nearest and cubic. The - "linear" mode includes linear and N-linear interpolation modes depending - on the number of spatial dimensions of the input tensor (i.e. linear for - 1 spatial dimension, bilinear for 2 spatial dimensions, etc.). The - "cubic" mode also includes N-cubic interpolation modes following the - same rules. The "nearest" mode rounds to the nearest even index when the - sampling point falls halfway between two indices. - padding_mode - Attribute. - Support padding modes for outside grid values: ``zeros``\ (default), - ``border``, ``reflection``. zeros: use 0 for out-of-bound grid - locations, border: use border values for out-of-bound grid locations, - reflection: use values at locations reflected by the border for - out-of-bound grid locations. If index 0 represents the margin pixel, the - reflected value at index -1 will be the same as the value at index 1. - For location far away from the border, it will keep being reflected - until becoming in bound. If pixel location x = -3.5 reflects by border - -1 and becomes x' = 1.5, then reflects by border 1 and becomes x'' = - 0.5. - - Returns - ======= - Y : Var - Type T1. - Output tensor of rank r+2 that has shape (N, C, D1_out, D2_out, ..., - Dr_out) of the sampled values. For integer input types, intermediate - values are computed as floating point and cast to integer at the end. - - Notes - ===== - Signature: ``ai.onnx@20::GridSample``. - - Type constraints: - - T1: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` +Given an input ``X`` and a flow-field ``grid``, computes the output +``Y`` using ``X`` values and pixel locations from the ``grid``. For +spatial input ``X`` with shape (N, C, H, W), the ``grid`` will have +shape (N, H_out, W_out, 2), the output ``Y`` will have shape (N, C, +H_out, W_out). For volumetric input ``X`` with shape (N, C, D, H, W), +the ``grid`` will have shape (N, D_out, H_out, W_out, 3), the output +``Y`` will have shape (N, C, D_out, H_out, W_out). More generally, for +an input ``X`` of rank r+2 with shape (N, C, d1, d2, ..., dr), the +``grid`` will have shape (N, D1_out, D2_out, ..., Dr_out, r), the output +``Y`` will have shape (N, C, D1_out, D2_out, ..., Dr_out). + +The tensor ``X`` contains values at centers of square pixels (voxels, +etc) locations such as (n, c, d1_in, d2_in, ..., dr_in). The (n, d1_out, +d2_out, ..., dr_out, :) values from the tensor ``grid`` are the +normalized positions for interpolating the values at the (n, c, d1_out, +d2_out, ..., dr_out) locations from the output tensor ``Y`` using a +specified interpolation method (the mode) and a padding mode (for +``grid`` positions falling outside the 2-dimensional image). + +For example, the values in ``grid[n, h_out, w_out, :]`` are size-2 +vectors specifying normalized positions in the 2-dimensional space of +``X``. They are used to interpolate output values of +``Y[n, c, h_out, w_out]``. + +The GridSample operator is often used in doing grid generator and +sampler in the `Spatial Transformer +Networks `__. See also in +`torch.nn.functional.grid_sample `__. + +Parameters +========== +X + Type T1. + Input tensor of rank r+2 that has shape (N, C, D1, D2, ..., Dr), where N + is the batch size, C is the number of channels, D1, D2, ..., Dr are the + spatial dimensions. +grid + Type T2. + Input offset of shape (N, D1_out, D2_out, ..., Dr_out, r), where D1_out, + D2_out, ..., Dr_out are the spatial dimensions of the grid and output, + and r is the number of spatial dimensions. Grid specifies the sampling + locations normalized by the input spatial dimensions. Therefore, it + should have most values in the range of [-1, 1]. If the grid has values + outside the range of [-1, 1], the corresponding outputs will be handled + as defined by padding_mode. Following computer vision convention, the + coordinates in the length-r location vector are listed from the + innermost tensor dimension to the outermost, the opposite of regular + tensor indexing. +align_corners + Attribute. + If align_corners=1, the extrema (-1 and 1) are considered as referring + to the center points of the input's corner pixels (voxels, etc.). If + align_corners=0, they are instead considered as referring to the corner + points of the input's corner pixels (voxels, etc.), making the sampling + more resolution agnostic. +mode + Attribute. + Three interpolation modes: linear (default), nearest and cubic. The + "linear" mode includes linear and N-linear interpolation modes depending + on the number of spatial dimensions of the input tensor (i.e. linear for + 1 spatial dimension, bilinear for 2 spatial dimensions, etc.). The + "cubic" mode also includes N-cubic interpolation modes following the + same rules. The "nearest" mode rounds to the nearest even index when the + sampling point falls halfway between two indices. +padding_mode + Attribute. + Support padding modes for outside grid values: ``zeros``\ (default), + ``border``, ``reflection``. zeros: use 0 for out-of-bound grid + locations, border: use border values for out-of-bound grid locations, + reflection: use values at locations reflected by the border for + out-of-bound grid locations. If index 0 represents the margin pixel, the + reflected value at index -1 will be the same as the value at index 1. + For location far away from the border, it will keep being reflected + until becoming in bound. If pixel location x = -3.5 reflects by border + -1 and becomes x' = 1.5, then reflects by border 1 and becomes x'' = + 0.5. + +Returns +======= +Y : Var + Type T1. + Output tensor of rank r+2 that has shape (N, C, D1_out, D2_out, ..., + Dr_out) of the sampled values. For integer input types, intermediate + values are computed as floating point and cast to integer at the end. + +Notes +===== +Signature: ``ai.onnx@20::GridSample``. + +Type constraints: + - T1: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` + - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return ( - _GridSample( - _GridSample.Attributes( - align_corners=AttrInt64(align_corners, name="align_corners"), - mode=AttrString(mode, name="mode"), - padding_mode=AttrString(padding_mode, name="padding_mode"), - ), - _GridSample.Inputs( - X=unwrap_vars(X), - grid=unwrap_vars(grid), - ), - ) - .get_output_vars( - X=get_value(X), - grid=get_value(grid), - ) - .Y - ) - - -def image_decoder( - encoded_stream: Var, - *, - pixel_format: str = "RGB", -) -> Var: + return _GridSample( + _GridSample.Attributes( + align_corners=AttrInt64(align_corners, name="align_corners"), + mode=AttrString(mode, name="mode"), + padding_mode=AttrString(padding_mode, name="padding_mode"), + ), _GridSample.Inputs( + X=unwrap_vars(X), grid=unwrap_vars(grid), ), ).get_output_vars( + X=get_value(X), grid=get_value(grid), ).Y + + +def image_decoder(encoded_stream: Var, *, pixel_format: str = "RGB", ) -> Var: r""" - Loads and decodes and image from a file. If it can't decode for any - reason (e.g. corrupted encoded stream, invalid format, it will return an - empty matrix). The following image formats are supported: - - - BMP - - JPEG (note: Lossless JPEG support is optional) - - JPEG2000 - - TIFF - - PNG - - WebP - - Portable image format (PBM, PGM, PPM, PXM, PNM) Decoded images follow - a channel-last layout: (Height, Width, Channels). **JPEG chroma - upsampling method:** When upsampling the chroma components by a - factor of 2, the pixels are linearly interpolated so that the centers - of the output pixels are 1/4 and 3/4 of the way between input pixel - centers. When rounding, 0.5 is rounded down and up at alternative - pixels locations to prevent bias towards larger values (ordered - dither pattern). Considering adjacent input pixels A, B, and C, B is - upsampled to pixels B0 and B1 so that - - :: - - B0 = round_half_down((1/4) * A + (3/4) * B) - B1 = round_half_up((3/4) * B + (1/4) * C) - - This method, is the default chroma upsampling method in the - well-established libjpeg-turbo library, also referred as "smooth" or - "fancy" upsampling. - - Parameters - ========== - encoded_stream - Type T1. - Encoded stream - pixel_format - Attribute. - Pixel format. Can be one of "RGB", "BGR", or "Grayscale". - - Returns - ======= - image : Var - Type T2. - Decoded image - - Notes - ===== - Signature: ``ai.onnx@20::ImageDecoder``. - - Type constraints: - - T1: `tensor(uint8)` - - T2: `tensor(uint8)` +Loads and decodes and image from a file. If it can't decode for any +reason (e.g. corrupted encoded stream, invalid format, it will return an +empty matrix). The following image formats are supported: + +- BMP +- JPEG (note: Lossless JPEG support is optional) +- JPEG2000 +- TIFF +- PNG +- WebP +- Portable image format (PBM, PGM, PPM, PXM, PNM) Decoded images follow + a channel-last layout: (Height, Width, Channels). **JPEG chroma + upsampling method:** When upsampling the chroma components by a + factor of 2, the pixels are linearly interpolated so that the centers + of the output pixels are 1/4 and 3/4 of the way between input pixel + centers. When rounding, 0.5 is rounded down and up at alternative + pixels locations to prevent bias towards larger values (ordered + dither pattern). Considering adjacent input pixels A, B, and C, B is + upsampled to pixels B0 and B1 so that + +:: + + B0 = round_half_down((1/4) * A + (3/4) * B) + B1 = round_half_up((3/4) * B + (1/4) * C) + +This method, is the default chroma upsampling method in the +well-established libjpeg-turbo library, also referred as "smooth" or +"fancy" upsampling. + +Parameters +========== +encoded_stream + Type T1. + Encoded stream +pixel_format + Attribute. + Pixel format. Can be one of "RGB", "BGR", or "Grayscale". + +Returns +======= +image : Var + Type T2. + Decoded image + +Notes +===== +Signature: ``ai.onnx@20::ImageDecoder``. + +Type constraints: + - T1: `tensor(uint8)` + - T2: `tensor(uint8)` """ - return ( - _ImageDecoder( - _ImageDecoder.Attributes( - pixel_format=AttrString(pixel_format, name="pixel_format"), - ), - _ImageDecoder.Inputs( - encoded_stream=unwrap_vars(encoded_stream), - ), - ) - .get_output_vars( - encoded_stream=get_value(encoded_stream), - ) - .image - ) - - -def isinf( - X: Var, - *, - detect_negative: int = 1, - detect_positive: int = 1, -) -> Var: + return _ImageDecoder( + _ImageDecoder.Attributes( + pixel_format=AttrString(pixel_format, name="pixel_format"), + ), _ImageDecoder.Inputs( + encoded_stream=unwrap_vars(encoded_stream), ), ).get_output_vars( + encoded_stream=get_value(encoded_stream), ).image + + +def isinf(X: Var, *, detect_negative: int = 1, detect_positive: int = 1, ) -> Var: r""" - Map infinity to true and other values to false. - - Parameters - ========== - X - Type T1. - input - detect_negative - Attribute. - (Optional) Whether map negative infinity to true. Default to 1 so that - negative infinity induces true. Set this attribute to 0 if negative - infinity should be mapped to false. - detect_positive - Attribute. - (Optional) Whether map positive infinity to true. Default to 1 so that - positive infinity induces true. Set this attribute to 0 if positive - infinity should be mapped to false. - - Returns - ======= - Y : Var - Type T2. - output - - Notes - ===== - Signature: ``ai.onnx@20::IsInf``. - - Type constraints: - - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)` - - T2: `tensor(bool)` +Map infinity to true and other values to false. + +Parameters +========== +X + Type T1. + input +detect_negative + Attribute. + (Optional) Whether map negative infinity to true. Default to 1 so that + negative infinity induces true. Set this attribute to 0 if negative + infinity should be mapped to false. +detect_positive + Attribute. + (Optional) Whether map positive infinity to true. Default to 1 so that + positive infinity induces true. Set this attribute to 0 if positive + infinity should be mapped to false. + +Returns +======= +Y : Var + Type T2. + output + +Notes +===== +Signature: ``ai.onnx@20::IsInf``. + +Type constraints: + - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)` + - T2: `tensor(bool)` """ - return ( - _IsInf( - _IsInf.Attributes( - detect_negative=AttrInt64(detect_negative, name="detect_negative"), - detect_positive=AttrInt64(detect_positive, name="detect_positive"), - ), - _IsInf.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def isnan( - X: Var, -) -> Var: + return _IsInf( + _IsInf.Attributes( + detect_negative=AttrInt64(detect_negative, name="detect_negative"), + detect_positive=AttrInt64(detect_positive, name="detect_positive"), + ), _IsInf.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def isnan(X: Var, ) -> Var: r""" - Returns which elements of the input are NaN. - - Parameters - ========== - X - Type T1. - input - - Returns - ======= - Y : Var - Type T2. - output - - Notes - ===== - Signature: ``ai.onnx@20::IsNaN``. - - Type constraints: - - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)` - - T2: `tensor(bool)` +Returns which elements of the input are NaN. + +Parameters +========== +X + Type T1. + input + +Returns +======= +Y : Var + Type T2. + output + +Notes +===== +Signature: ``ai.onnx@20::IsNaN``. + +Type constraints: + - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)` + - T2: `tensor(bool)` """ - return ( - _IsNaN( - _IsNaN.Attributes(), - _IsNaN.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def reduce_max( - data: Var, - axes: Optional[Var] = None, - *, - keepdims: int = 1, - noop_with_empty_axes: int = 0, -) -> Var: + return _IsNaN( + _IsNaN.Attributes( + ), _IsNaN.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def reduce_max(data: Var, axes: Optional[Var] = None, *, keepdims: int = 1, noop_with_empty_axes: int = 0, ) -> Var: r""" - Computes the max of the input tensor's elements along the provided axes. - The resulting tensor has the same rank as the input if ``keepdims`` - equals 1. If ``keepdims`` equals 0, then the resulting tensor has the - reduced dimension pruned. Input tensors of rank zero are valid. - Reduction over an empty set of values yields minus infinity (if - supported by the datatype) or the minimum value of the data type - otherwise. - - If the input data type is Boolean, the comparison should consider - ``False < True``. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Type tensor(int64). - Optional input list of integers, along which to reduce. The default is - to reduce over all the dimensions of the input tensor if - 'noop_with_empty_axes' is false, else act as an Identity op when - 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = - rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - noop_with_empty_axes - Attribute. - Defines behavior if 'axes' is empty. Default behavior with 'false' is to - reduce all axes. When axes is empty and this attribute is set to true, - input tensor will not be reduced,and the output tensor would be - equivalent to input tensor. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@20::ReduceMax``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +Computes the max of the input tensor's elements along the provided axes. +The resulting tensor has the same rank as the input if ``keepdims`` +equals 1. If ``keepdims`` equals 0, then the resulting tensor has the +reduced dimension pruned. Input tensors of rank zero are valid. +Reduction over an empty set of values yields minus infinity (if +supported by the datatype) or the minimum value of the data type +otherwise. + +If the input data type is Boolean, the comparison should consider +``False < True``. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Type tensor(int64). + Optional input list of integers, along which to reduce. The default is + to reduce over all the dimensions of the input tensor if + 'noop_with_empty_axes' is false, else act as an Identity op when + 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = + rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. +noop_with_empty_axes + Attribute. + Defines behavior if 'axes' is empty. Default behavior with 'false' is to + reduce all axes. When axes is empty and this attribute is set to true, + input tensor will not be reduced,and the output tensor would be + equivalent to input tensor. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@20::ReduceMax``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _ReduceMax( - _ReduceMax.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" - ), - ), - _ReduceMax.Inputs( - data=unwrap_vars(data), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - axes=get_value(axes), - ) - .reduced - ) - - -def reduce_min( - data: Var, - axes: Optional[Var] = None, - *, - keepdims: int = 1, - noop_with_empty_axes: int = 0, -) -> Var: + return _ReduceMax( + _ReduceMax.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64(noop_with_empty_axes, name="noop_with_empty_axes"), + ), _ReduceMax.Inputs( + data=unwrap_vars(data), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), axes=get_value(axes), ).reduced + + +def reduce_min(data: Var, axes: Optional[Var] = None, *, keepdims: int = 1, noop_with_empty_axes: int = 0, ) -> Var: r""" - Computes the min of the input tensor's elements along the provided axes. - The resulting tensor has the same rank as the input if ``keepdims`` - equals 1. If ``keepdims`` equals 0, then the resulting tensor has the - reduced dimension pruned. Input tensors of rank zero are valid. - Reduction over an empty set of values yields plus infinity (if supported - by the datatype) or the maximum value of the data type otherwise. - - If the input data type is Boolean, the comparison should consider - ``False < True``. - - The above behavior is similar to numpy, with the exception that numpy - defaults ``keepdims`` to ``False`` instead of ``True``. - - Parameters - ========== - data - Type T. - An input tensor. - axes - Type tensor(int64). - Optional input list of integers, along which to reduce. The default is - to reduce over all the dimensions of the input tensor if - 'noop_with_empty_axes' is false, else act as an Identity op when - 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = - rank(data). - keepdims - Attribute. - Keep the reduced dimension or not, default 1 means keep reduced - dimension. - noop_with_empty_axes - Attribute. - Defines behavior if 'axes' is empty. Default behavior with 'false' is to - reduce all axes. When axes is empty and this attribute is set to true, - input tensor will not be reduced,and the output tensor would be - equivalent to input tensor. - - Returns - ======= - reduced : Var - Type T. - Reduced output tensor. - - Notes - ===== - Signature: ``ai.onnx@20::ReduceMin``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` +Computes the min of the input tensor's elements along the provided axes. +The resulting tensor has the same rank as the input if ``keepdims`` +equals 1. If ``keepdims`` equals 0, then the resulting tensor has the +reduced dimension pruned. Input tensors of rank zero are valid. +Reduction over an empty set of values yields plus infinity (if supported +by the datatype) or the maximum value of the data type otherwise. + +If the input data type is Boolean, the comparison should consider +``False < True``. + +The above behavior is similar to numpy, with the exception that numpy +defaults ``keepdims`` to ``False`` instead of ``True``. + +Parameters +========== +data + Type T. + An input tensor. +axes + Type tensor(int64). + Optional input list of integers, along which to reduce. The default is + to reduce over all the dimensions of the input tensor if + 'noop_with_empty_axes' is false, else act as an Identity op when + 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = + rank(data). +keepdims + Attribute. + Keep the reduced dimension or not, default 1 means keep reduced + dimension. +noop_with_empty_axes + Attribute. + Defines behavior if 'axes' is empty. Default behavior with 'false' is to + reduce all axes. When axes is empty and this attribute is set to true, + input tensor will not be reduced,and the output tensor would be + equivalent to input tensor. + +Returns +======= +reduced : Var + Type T. + Reduced output tensor. + +Notes +===== +Signature: ``ai.onnx@20::ReduceMin``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _ReduceMin( - _ReduceMin.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" - ), - ), - _ReduceMin.Inputs( - data=unwrap_vars(data), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - axes=get_value(axes), - ) - .reduced - ) - - -def regex_full_match( - X: Var, - *, - pattern: Optional[str] = None, -) -> Var: + return _ReduceMin( + _ReduceMin.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64(noop_with_empty_axes, name="noop_with_empty_axes"), + ), _ReduceMin.Inputs( + data=unwrap_vars(data), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), axes=get_value(axes), ).reduced + + +def regex_full_match(X: Var, *, pattern: Optional[str] = None, ) -> Var: r""" - RegexFullMatch performs a full regex match on each element of the input - tensor. If an element fully matches the regex pattern specified as an - attribute, the corresponding element in the output is True and it is - False otherwise. `RE2 `__ - regex syntax is used. - - Parameters - ========== - X - Type T1. - Tensor with strings to match on. - pattern - Attribute. - Regex pattern to match on. This must be valid RE2 syntax. - - Returns - ======= - Y : Var - Type T2. - Tensor of bools indicating if each input string fully matches the regex - pattern specified. - - Notes - ===== - Signature: ``ai.onnx@20::RegexFullMatch``. - - Type constraints: - - T1: `tensor(string)` - - T2: `tensor(bool)` +RegexFullMatch performs a full regex match on each element of the input +tensor. If an element fully matches the regex pattern specified as an +attribute, the corresponding element in the output is True and it is +False otherwise. `RE2 `__ +regex syntax is used. + +Parameters +========== +X + Type T1. + Tensor with strings to match on. +pattern + Attribute. + Regex pattern to match on. This must be valid RE2 syntax. + +Returns +======= +Y : Var + Type T2. + Tensor of bools indicating if each input string fully matches the regex + pattern specified. + +Notes +===== +Signature: ``ai.onnx@20::RegexFullMatch``. + +Type constraints: + - T1: `tensor(string)` + - T2: `tensor(bool)` """ - return ( - _RegexFullMatch( - _RegexFullMatch.Attributes( - pattern=AttrString.maybe(pattern, name="pattern"), - ), - _RegexFullMatch.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - .Y - ) - - -def string_concat( - X: Var, - Y: Var, -) -> Var: + return _RegexFullMatch( + _RegexFullMatch.Attributes( + pattern=AttrString.maybe(pattern, name="pattern"), + ), _RegexFullMatch.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), ).Y + + +def string_concat(X: Var, Y: Var, ) -> Var: r""" - StringConcat concatenates string tensors elementwise (with NumPy-style - broadcasting support) - - Parameters - ========== - X - Type T. - Tensor to prepend in concatenation - Y - Type T. - Tensor to append in concatenation - - Returns - ======= - Z : Var - Type T. - Concatenated string tensor - - Notes - ===== - Signature: ``ai.onnx@20::StringConcat``. - - Type constraints: - - T: `tensor(string)` +StringConcat concatenates string tensors elementwise (with NumPy-style +broadcasting support) + +Parameters +========== +X + Type T. + Tensor to prepend in concatenation +Y + Type T. + Tensor to append in concatenation + +Returns +======= +Z : Var + Type T. + Concatenated string tensor + +Notes +===== +Signature: ``ai.onnx@20::StringConcat``. + +Type constraints: + - T: `tensor(string)` """ - return ( - _StringConcat( - _StringConcat.Attributes(), - _StringConcat.Inputs( - X=unwrap_vars(X), - Y=unwrap_vars(Y), - ), - ) - .get_output_vars( - X=get_value(X), - Y=get_value(Y), - ) - .Z - ) - - -def string_split( - X: Var, - *, - delimiter: Optional[str] = None, - maxsplit: Optional[int] = None, -) -> tuple[Var, Var]: + return _StringConcat( + _StringConcat.Attributes( + ), _StringConcat.Inputs( + X=unwrap_vars(X), Y=unwrap_vars(Y), ), ).get_output_vars( + X=get_value(X), Y=get_value(Y), ).Z + + +def string_split(X: Var, *, delimiter: Optional[str] = None, maxsplit: Optional[int] = None, ) -> tuple[Var, Var]: r""" - StringSplit splits a string tensor's elements into substrings based on a - delimiter attribute and a maxsplit attribute. - - The first output of this operator is a tensor of strings representing - the substrings from splitting each input string on the ``delimiter`` - substring. This tensor has one additional rank compared to the input - tensor in order to store the substrings for each input element (where - the input tensor is not empty). Note that, in order to ensure the same - number of elements are present in the final dimension, this tensor will - pad empty strings as illustrated in the examples below. Consecutive - delimiters are not grouped together and are deemed to delimit empty - strings, except if the ``delimiter`` is unspecified or is the empty - string (""). In the case where the ``delimiter`` is unspecified or the - empty string, consecutive whitespace characters are regarded as a single - separator and leading or trailing whitespace is removed in the output. - - The second output tensor represents the number of substrings generated. - ``maxsplit`` can be used to limit the number of splits performed - after - the ``maxsplit``\ th split if the string is not fully split, the - trailing suffix of input string after the final split point is also - added. For elements where fewer splits are possible than specified in - ``maxsplit``, it has no effect. - - Parameters - ========== - X - Type T1. - Tensor of strings to split. - delimiter - Attribute. - Delimiter to split on. If left unset or set to the empty string (""), - the input is split on consecutive whitespace. - maxsplit - Attribute. - Maximum number of splits (from left to right). If left unset (or if the - number of possible splits are less than maxsplit), it will make as many - splits as possible. Note that the maximum possible number of substrings - returned with ``maxsplit`` specified is ``maxsplit+1`` since the - remaining suffix after the ``maxsplit``\ th split is included in the - output. - - Returns - ======= - Y : Var - Type T2. - Tensor of substrings representing the outcome of splitting the strings - in the input on the delimiter. Note that to ensure the same number of - elements are present in the final rank, this tensor will pad any - necessary empty strings. - Z : Var - Type T3. - The number of substrings generated for each input element. - - Notes - ===== - Signature: ``ai.onnx@20::StringSplit``. - - Type constraints: - - T1: `tensor(string)` - - T2: `tensor(string)` - - T3: `tensor(int64)` +StringSplit splits a string tensor's elements into substrings based on a +delimiter attribute and a maxsplit attribute. + +The first output of this operator is a tensor of strings representing +the substrings from splitting each input string on the ``delimiter`` +substring. This tensor has one additional rank compared to the input +tensor in order to store the substrings for each input element (where +the input tensor is not empty). Note that, in order to ensure the same +number of elements are present in the final dimension, this tensor will +pad empty strings as illustrated in the examples below. Consecutive +delimiters are not grouped together and are deemed to delimit empty +strings, except if the ``delimiter`` is unspecified or is the empty +string (""). In the case where the ``delimiter`` is unspecified or the +empty string, consecutive whitespace characters are regarded as a single +separator and leading or trailing whitespace is removed in the output. + +The second output tensor represents the number of substrings generated. +``maxsplit`` can be used to limit the number of splits performed - after +the ``maxsplit``\ th split if the string is not fully split, the +trailing suffix of input string after the final split point is also +added. For elements where fewer splits are possible than specified in +``maxsplit``, it has no effect. + +Parameters +========== +X + Type T1. + Tensor of strings to split. +delimiter + Attribute. + Delimiter to split on. If left unset or set to the empty string (""), + the input is split on consecutive whitespace. +maxsplit + Attribute. + Maximum number of splits (from left to right). If left unset (or if the + number of possible splits are less than maxsplit), it will make as many + splits as possible. Note that the maximum possible number of substrings + returned with ``maxsplit`` specified is ``maxsplit+1`` since the + remaining suffix after the ``maxsplit``\ th split is included in the + output. + +Returns +======= +Y : Var + Type T2. + Tensor of substrings representing the outcome of splitting the strings + in the input on the delimiter. Note that to ensure the same number of + elements are present in the final rank, this tensor will pad any + necessary empty strings. +Z : Var + Type T3. + The number of substrings generated for each input element. + +Notes +===== +Signature: ``ai.onnx@20::StringSplit``. + +Type constraints: + - T1: `tensor(string)` + - T2: `tensor(string)` + - T3: `tensor(int64)` """ - return ( - _StringSplit( - _StringSplit.Attributes( - delimiter=AttrString.maybe(delimiter, name="delimiter"), - maxsplit=AttrInt64.maybe(maxsplit, name="maxsplit"), - ), - _StringSplit.Inputs( - X=unwrap_vars(X), - ), - ) - .get_output_vars( - X=get_value(X), - ) - ._unpack_to_any() - ) + return _StringSplit( + _StringSplit.Attributes( + delimiter=AttrString.maybe(delimiter, name="delimiter"), + maxsplit=AttrInt64.maybe(maxsplit, name="maxsplit"), + ), _StringSplit.Inputs( + X=unwrap_vars(X), ), ).get_output_vars( + X=get_value(X), )._unpack_to_any() def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: @@ -2001,4 +1641,4 @@ def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: "Xor": xor, } -__all__ = [fun.__name__ for fun in _CONSTRUCTORS.values()] + ["const"] +__all__ = [fun.__name__ for fun in _CONSTRUCTORS.values()] + ["const"] diff --git a/src/spox/opset/ai/onnx/v21.py b/src/spox/opset/ai/onnx/v21.py index 52e3027..b51ae18 100644 --- a/src/spox/opset/ai/onnx/v21.py +++ b/src/spox/opset/ai/onnx/v21.py @@ -1,18 +1,21 @@ -# Copyright (c) QuantCo 2023-2024 -# SPDX-License-Identifier: BSD-3-Clause - # ruff: noqa: E741 -- Allow ambiguous variable name -from collections.abc import Iterable, Sequence +import typing +import warnings from dataclasses import dataclass +from collections.abc import Iterable, Sequence from typing import ( + Any, Callable, Optional, + Union, ) from typing import cast as typing_cast import numpy as np import numpy.typing as npt +from spox._var import Var, VarInfo, result_type, unwrap_vars, get_value +from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._attributes import ( AttrDtype, AttrFloat32, @@ -23,360 +26,187 @@ AttrString, AttrStrings, AttrTensor, + AttrType, ) -from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._graph import Graph, subgraph +from spox._internal_op import intro from spox._node import OpType -from spox._standard import StandardNode -from spox._type_system import Tensor, Type +from spox._standard import InferenceError, StandardNode +from spox._type_system import Tensor, Type, Sequence as SpoxSequence from spox._value_prop import PropValueType -from spox._var import Var, VarInfo, get_value, unwrap_vars -from spox.opset.ai.onnx.v20 import ( - _DFT, - _GRU, - _LRN, - _LSTM, - _RNN, - _STFT, - _Abs, - _Acos, - _Acosh, - _Add, - _AffineGrid, - _And, - _ArgMax, - _ArgMin, - _Asin, - _Asinh, - _Atan, - _Atanh, - _AveragePool, - _BatchNormalization, - _Bernoulli, - _BitShift, - _BitwiseAnd, - _BitwiseNot, - _BitwiseOr, - _BitwiseXor, - _BlackmanWindow, - _Ceil, - _Celu, - _CenterCropPad, - _Clip, - _Col2Im, - _Compress, - _Concat, - _ConcatFromSequence, - _Conv, - _ConvInteger, - _ConvTranspose, - _Cos, - _Cosh, - _CumSum, - _DeformConv, - _DepthToSpace, - _Det, - _Div, - _Dropout, - _DynamicQuantizeLinear, - _Einsum, - _Elu, - _Equal, - _Erf, - _Exp, - _Expand, - _EyeLike, - _Floor, - _Gather, - _GatherElements, - _GatherND, - _Gelu, - _Gemm, - _GlobalAveragePool, - _GlobalLpPool, - _GlobalMaxPool, - _Greater, - _GreaterOrEqual, - _GridSample, - _HammingWindow, - _HannWindow, - _Hardmax, - _HardSigmoid, - _HardSwish, - _ImageDecoder, - _InstanceNormalization, - _IsInf, - _IsNaN, - _LayerNormalization, - _LeakyRelu, - _Less, - _LessOrEqual, - _Log, - _LogSoftmax, - _LpNormalization, - _LpPool, - _MatMul, - _MatMulInteger, - _Max, - _MaxPool, - _MaxRoiPool, - _MaxUnpool, - _Mean, - _MeanVarianceNormalization, - _MelWeightMatrix, - _Min, - _Mish, - _Mod, - _Mul, - _Multinomial, - _Neg, - _NegativeLogLikelihoodLoss, - _NonMaxSuppression, - _NonZero, - _Not, - _OneHot, - _Optional, - _OptionalGetElement, - _OptionalHasElement, - _Or, - _Pow, - _PRelu, - _QLinearConv, - _RandomNormal, - _RandomNormalLike, - _RandomUniform, - _RandomUniformLike, - _Range, - _Reciprocal, - _ReduceL1, - _ReduceL2, - _ReduceLogSum, - _ReduceLogSumExp, - _ReduceMax, - _ReduceMean, - _ReduceMin, - _ReduceProd, - _ReduceSum, - _ReduceSumSquare, - _RegexFullMatch, - _Relu, - _Resize, - _ReverseSequence, - _RoiAlign, - _Round, - _ScatterElements, - _ScatterND, - _Selu, - _SequenceAt, - _SequenceConstruct, - _SequenceEmpty, - _SequenceErase, - _SequenceInsert, - _SequenceLength, - _SequenceMap, - _Shrink, - _Sigmoid, - _Sign, - _Sin, - _Sinh, - _Slice, - _Softmax, - _SoftmaxCrossEntropyLoss, - _Softplus, - _Softsign, - _SpaceToDepth, - _Split, - _SplitToSequence, - _Sqrt, - _StringConcat, - _StringNormalizer, - _StringSplit, - _Sub, - _Sum, - _Tan, - _Tanh, - _TfIdfVectorizer, - _ThresholdedRelu, - _Tile, - _TopK, - _Trilu, - _Unique, - _Where, - _Xor, - abs, - acos, - acosh, - add, - affine_grid, - and_, - arg_max, - arg_min, - asin, - asinh, - atan, - atanh, - average_pool, - batch_normalization, - bernoulli, - bit_shift, - bitwise_and, - bitwise_not, - bitwise_or, - bitwise_xor, - blackman_window, - ceil, - celu, - center_crop_pad, - clip, - col2_im, - compress, - concat, - concat_from_sequence, - conv, - conv_integer, - conv_transpose, - cos, - cosh, - cumsum, - deform_conv, - depth_to_space, - det, - dft, - div, - dropout, - dynamic_quantize_linear, - einsum, - elu, - equal, - erf, - exp, - expand, - eye_like, - floor, - gather, - gather_elements, - gather_nd, - gelu, - gemm, - global_average_pool, - global_lp_pool, - global_max_pool, - greater, - greater_or_equal, - grid_sample, - gru, - hamming_window, - hann_window, - hard_sigmoid, - hard_swish, - hardmax, - image_decoder, - instance_normalization, - isinf, - isnan, - layer_normalization, - leaky_relu, - less, - less_or_equal, - log, - log_softmax, - lp_normalization, - lp_pool, - lrn, - lstm, - matmul, - matmul_integer, - max, - max_pool, - max_roi_pool, - max_unpool, - mean, - mean_variance_normalization, - mel_weight_matrix, - min, - mish, - mod, - mul, - multinomial, - neg, - negative_log_likelihood_loss, - non_max_suppression, - non_zero, - not_, - one_hot, - optional, - optional_get_element, - optional_has_element, - or_, - pow, - prelu, - qlinear_conv, - random_normal, - random_normal_like, - random_uniform, - random_uniform_like, - range, - reciprocal, - reduce_l1, - reduce_l2, - reduce_log_sum, - reduce_log_sum_exp, - reduce_max, - reduce_mean, - reduce_min, - reduce_prod, - reduce_sum, - reduce_sum_square, - regex_full_match, - relu, - resize, - reverse_sequence, - rnn, - roi_align, - round, - scatter_elements, - scatter_nd, - selu, - sequence_at, - sequence_construct, - sequence_empty, - sequence_erase, - sequence_insert, - sequence_length, - sequence_map, - shrink, - sigmoid, - sign, - sin, - sinh, - slice, - softmax, - softmax_cross_entropy_loss, - softplus, - softsign, - space_to_depth, - split, - split_to_sequence, - sqrt, - stft, - string_concat, - string_normalizer, - string_split, - sub, - sum, - tan, - tanh, - tf_idf_vectorizer, - thresholded_relu, - tile, - top_k, - trilu, - unique, - where, - xor, -) +from spox.opset.ai.onnx.v20 import _Abs, abs +from spox.opset.ai.onnx.v20 import _Acos, acos +from spox.opset.ai.onnx.v20 import _Acosh, acosh +from spox.opset.ai.onnx.v20 import _Add, add +from spox.opset.ai.onnx.v20 import _AffineGrid, affine_grid +from spox.opset.ai.onnx.v20 import _And, and_ +from spox.opset.ai.onnx.v20 import _ArgMax, arg_max +from spox.opset.ai.onnx.v20 import _ArgMin, arg_min +from spox.opset.ai.onnx.v20 import _Asin, asin +from spox.opset.ai.onnx.v20 import _Asinh, asinh +from spox.opset.ai.onnx.v20 import _Atan, atan +from spox.opset.ai.onnx.v20 import _Atanh, atanh +from spox.opset.ai.onnx.v20 import _AveragePool, average_pool +from spox.opset.ai.onnx.v20 import _BatchNormalization, batch_normalization +from spox.opset.ai.onnx.v20 import _Bernoulli, bernoulli +from spox.opset.ai.onnx.v20 import _BitShift, bit_shift +from spox.opset.ai.onnx.v20 import _BitwiseAnd, bitwise_and +from spox.opset.ai.onnx.v20 import _BitwiseNot, bitwise_not +from spox.opset.ai.onnx.v20 import _BitwiseOr, bitwise_or +from spox.opset.ai.onnx.v20 import _BitwiseXor, bitwise_xor +from spox.opset.ai.onnx.v20 import _BlackmanWindow, blackman_window +from spox.opset.ai.onnx.v20 import _Ceil, ceil +from spox.opset.ai.onnx.v20 import _Celu, celu +from spox.opset.ai.onnx.v20 import _CenterCropPad, center_crop_pad +from spox.opset.ai.onnx.v20 import _Clip, clip +from spox.opset.ai.onnx.v20 import _Col2Im, col2_im +from spox.opset.ai.onnx.v20 import _Compress, compress +from spox.opset.ai.onnx.v20 import _Concat, concat +from spox.opset.ai.onnx.v20 import _ConcatFromSequence, concat_from_sequence +from spox.opset.ai.onnx.v20 import _Conv, conv +from spox.opset.ai.onnx.v20 import _ConvInteger, conv_integer +from spox.opset.ai.onnx.v20 import _ConvTranspose, conv_transpose +from spox.opset.ai.onnx.v20 import _Cos, cos +from spox.opset.ai.onnx.v20 import _Cosh, cosh +from spox.opset.ai.onnx.v20 import _CumSum, cumsum +from spox.opset.ai.onnx.v20 import _DFT, dft +from spox.opset.ai.onnx.v20 import _DeformConv, deform_conv +from spox.opset.ai.onnx.v20 import _DepthToSpace, depth_to_space +from spox.opset.ai.onnx.v20 import _Det, det +from spox.opset.ai.onnx.v20 import _Div, div +from spox.opset.ai.onnx.v20 import _Dropout, dropout +from spox.opset.ai.onnx.v20 import _DynamicQuantizeLinear, dynamic_quantize_linear +from spox.opset.ai.onnx.v20 import _Einsum, einsum +from spox.opset.ai.onnx.v20 import _Elu, elu +from spox.opset.ai.onnx.v20 import _Equal, equal +from spox.opset.ai.onnx.v20 import _Erf, erf +from spox.opset.ai.onnx.v20 import _Exp, exp +from spox.opset.ai.onnx.v20 import _Expand, expand +from spox.opset.ai.onnx.v20 import _EyeLike, eye_like +from spox.opset.ai.onnx.v20 import _Floor, floor +from spox.opset.ai.onnx.v20 import _GRU, gru +from spox.opset.ai.onnx.v20 import _Gather, gather +from spox.opset.ai.onnx.v20 import _GatherElements, gather_elements +from spox.opset.ai.onnx.v20 import _GatherND, gather_nd +from spox.opset.ai.onnx.v20 import _Gelu, gelu +from spox.opset.ai.onnx.v20 import _Gemm, gemm +from spox.opset.ai.onnx.v20 import _GlobalAveragePool, global_average_pool +from spox.opset.ai.onnx.v20 import _GlobalLpPool, global_lp_pool +from spox.opset.ai.onnx.v20 import _GlobalMaxPool, global_max_pool +from spox.opset.ai.onnx.v20 import _Greater, greater +from spox.opset.ai.onnx.v20 import _GreaterOrEqual, greater_or_equal +from spox.opset.ai.onnx.v20 import _GridSample, grid_sample +from spox.opset.ai.onnx.v20 import _HammingWindow, hamming_window +from spox.opset.ai.onnx.v20 import _HannWindow, hann_window +from spox.opset.ai.onnx.v20 import _HardSigmoid, hard_sigmoid +from spox.opset.ai.onnx.v20 import _HardSwish, hard_swish +from spox.opset.ai.onnx.v20 import _Hardmax, hardmax +from spox.opset.ai.onnx.v20 import _ImageDecoder, image_decoder +from spox.opset.ai.onnx.v20 import _InstanceNormalization, instance_normalization +from spox.opset.ai.onnx.v20 import _IsInf, isinf +from spox.opset.ai.onnx.v20 import _IsNaN, isnan +from spox.opset.ai.onnx.v20 import _LRN, lrn +from spox.opset.ai.onnx.v20 import _LSTM, lstm +from spox.opset.ai.onnx.v20 import _LayerNormalization, layer_normalization +from spox.opset.ai.onnx.v20 import _LeakyRelu, leaky_relu +from spox.opset.ai.onnx.v20 import _Less, less +from spox.opset.ai.onnx.v20 import _LessOrEqual, less_or_equal +from spox.opset.ai.onnx.v20 import _Log, log +from spox.opset.ai.onnx.v20 import _LogSoftmax, log_softmax +from spox.opset.ai.onnx.v20 import _LpNormalization, lp_normalization +from spox.opset.ai.onnx.v20 import _LpPool, lp_pool +from spox.opset.ai.onnx.v20 import _MatMul, matmul +from spox.opset.ai.onnx.v20 import _MatMulInteger, matmul_integer +from spox.opset.ai.onnx.v20 import _Max, max +from spox.opset.ai.onnx.v20 import _MaxPool, max_pool +from spox.opset.ai.onnx.v20 import _MaxRoiPool, max_roi_pool +from spox.opset.ai.onnx.v20 import _MaxUnpool, max_unpool +from spox.opset.ai.onnx.v20 import _Mean, mean +from spox.opset.ai.onnx.v20 import _MeanVarianceNormalization, mean_variance_normalization +from spox.opset.ai.onnx.v20 import _MelWeightMatrix, mel_weight_matrix +from spox.opset.ai.onnx.v20 import _Min, min +from spox.opset.ai.onnx.v20 import _Mish, mish +from spox.opset.ai.onnx.v20 import _Mod, mod +from spox.opset.ai.onnx.v20 import _Mul, mul +from spox.opset.ai.onnx.v20 import _Multinomial, multinomial +from spox.opset.ai.onnx.v20 import _Neg, neg +from spox.opset.ai.onnx.v20 import _NegativeLogLikelihoodLoss, negative_log_likelihood_loss +from spox.opset.ai.onnx.v20 import _NonMaxSuppression, non_max_suppression +from spox.opset.ai.onnx.v20 import _NonZero, non_zero +from spox.opset.ai.onnx.v20 import _Not, not_ +from spox.opset.ai.onnx.v20 import _OneHot, one_hot +from spox.opset.ai.onnx.v20 import _Optional, optional +from spox.opset.ai.onnx.v20 import _OptionalGetElement, optional_get_element +from spox.opset.ai.onnx.v20 import _OptionalHasElement, optional_has_element +from spox.opset.ai.onnx.v20 import _Or, or_ +from spox.opset.ai.onnx.v20 import _PRelu, prelu +from spox.opset.ai.onnx.v20 import _Pow, pow +from spox.opset.ai.onnx.v20 import _QLinearConv, qlinear_conv +from spox.opset.ai.onnx.v20 import _RNN, rnn +from spox.opset.ai.onnx.v20 import _RandomNormal, random_normal +from spox.opset.ai.onnx.v20 import _RandomNormalLike, random_normal_like +from spox.opset.ai.onnx.v20 import _RandomUniform, random_uniform +from spox.opset.ai.onnx.v20 import _RandomUniformLike, random_uniform_like +from spox.opset.ai.onnx.v20 import _Range, range +from spox.opset.ai.onnx.v20 import _Reciprocal, reciprocal +from spox.opset.ai.onnx.v20 import _ReduceL1, reduce_l1 +from spox.opset.ai.onnx.v20 import _ReduceL2, reduce_l2 +from spox.opset.ai.onnx.v20 import _ReduceLogSum, reduce_log_sum +from spox.opset.ai.onnx.v20 import _ReduceLogSumExp, reduce_log_sum_exp +from spox.opset.ai.onnx.v20 import _ReduceMax, reduce_max +from spox.opset.ai.onnx.v20 import _ReduceMean, reduce_mean +from spox.opset.ai.onnx.v20 import _ReduceMin, reduce_min +from spox.opset.ai.onnx.v20 import _ReduceProd, reduce_prod +from spox.opset.ai.onnx.v20 import _ReduceSum, reduce_sum +from spox.opset.ai.onnx.v20 import _ReduceSumSquare, reduce_sum_square +from spox.opset.ai.onnx.v20 import _RegexFullMatch, regex_full_match +from spox.opset.ai.onnx.v20 import _Relu, relu +from spox.opset.ai.onnx.v20 import _Resize, resize +from spox.opset.ai.onnx.v20 import _ReverseSequence, reverse_sequence +from spox.opset.ai.onnx.v20 import _RoiAlign, roi_align +from spox.opset.ai.onnx.v20 import _Round, round +from spox.opset.ai.onnx.v20 import _STFT, stft +from spox.opset.ai.onnx.v20 import _ScatterElements, scatter_elements +from spox.opset.ai.onnx.v20 import _ScatterND, scatter_nd +from spox.opset.ai.onnx.v20 import _Selu, selu +from spox.opset.ai.onnx.v20 import _SequenceAt, sequence_at +from spox.opset.ai.onnx.v20 import _SequenceConstruct, sequence_construct +from spox.opset.ai.onnx.v20 import _SequenceEmpty, sequence_empty +from spox.opset.ai.onnx.v20 import _SequenceErase, sequence_erase +from spox.opset.ai.onnx.v20 import _SequenceInsert, sequence_insert +from spox.opset.ai.onnx.v20 import _SequenceLength, sequence_length +from spox.opset.ai.onnx.v20 import _SequenceMap, sequence_map +from spox.opset.ai.onnx.v20 import _Shrink, shrink +from spox.opset.ai.onnx.v20 import _Sigmoid, sigmoid +from spox.opset.ai.onnx.v20 import _Sign, sign +from spox.opset.ai.onnx.v20 import _Sin, sin +from spox.opset.ai.onnx.v20 import _Sinh, sinh +from spox.opset.ai.onnx.v20 import _Slice, slice +from spox.opset.ai.onnx.v20 import _Softmax, softmax +from spox.opset.ai.onnx.v20 import _SoftmaxCrossEntropyLoss, softmax_cross_entropy_loss +from spox.opset.ai.onnx.v20 import _Softplus, softplus +from spox.opset.ai.onnx.v20 import _Softsign, softsign +from spox.opset.ai.onnx.v20 import _SpaceToDepth, space_to_depth +from spox.opset.ai.onnx.v20 import _Split, split +from spox.opset.ai.onnx.v20 import _SplitToSequence, split_to_sequence +from spox.opset.ai.onnx.v20 import _Sqrt, sqrt +from spox.opset.ai.onnx.v20 import _StringConcat, string_concat +from spox.opset.ai.onnx.v20 import _StringNormalizer, string_normalizer +from spox.opset.ai.onnx.v20 import _StringSplit, string_split +from spox.opset.ai.onnx.v20 import _Sub, sub +from spox.opset.ai.onnx.v20 import _Sum, sum +from spox.opset.ai.onnx.v20 import _Tan, tan +from spox.opset.ai.onnx.v20 import _Tanh, tanh +from spox.opset.ai.onnx.v20 import _TfIdfVectorizer, tf_idf_vectorizer +from spox.opset.ai.onnx.v20 import _ThresholdedRelu, thresholded_relu +from spox.opset.ai.onnx.v20 import _Tile, tile +from spox.opset.ai.onnx.v20 import _TopK, top_k +from spox.opset.ai.onnx.v20 import _Trilu, trilu +from spox.opset.ai.onnx.v20 import _Unique, unique +from spox.opset.ai.onnx.v20 import _Where, where +from spox.opset.ai.onnx.v20 import _Xor, xor class _Cast(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -397,7 +227,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _CastLike(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -418,7 +247,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Constant(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -437,9 +265,7 @@ class Outputs(BaseOutputs): output: VarInfo def propagate_values(self, initializers) -> dict[str, PropValueType]: - ((key, raw),) = ( - (k, v.value) for k, v in self.attrs.get_fields().items() if v is not None - ) + ((key, raw),) = ((k, v.value) for k, v in self.attrs.get_fields().items() if v is not None) if key == "value": value = raw elif key == "value_float": @@ -457,18 +283,14 @@ def propagate_values(self, initializers) -> dict[str, PropValueType]: elif key == "sparse_value": return {} else: - raise RuntimeError( - f"Could not extract the set Constant value attribute, got: {key}" - ) + raise RuntimeError(f"Could not extract the set Constant value attribute, got: {key}") return {"output": value} - op_type = OpType("Constant", "", 21) attrs: Attributes inputs: BaseInputs outputs: Outputs - class _ConstantOfShape(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -488,7 +310,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _DequantizeLinear(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -511,7 +332,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Flatten(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -531,7 +351,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _GroupNormalization(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -555,7 +374,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Identity(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -575,7 +393,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _If(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -596,7 +413,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Loop(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -618,7 +434,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Pad(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -641,7 +456,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _QLinearMatMul(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -668,7 +482,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _QuantizeLinear(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -693,7 +506,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Reshape(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -714,7 +526,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Scan(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -739,7 +550,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Shape(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -760,7 +570,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Size(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -780,7 +589,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Squeeze(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -801,7 +609,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Transpose(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -821,7 +628,6 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - class _Unsqueeze(StandardNode): @dataclass class Attributes(BaseAttributes): @@ -842,1917 +648,1592 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - -def cast( - input: Var, - *, - saturate: int = 1, - to: npt.DTypeLike, -) -> Var: +def cast(input: Var, *, saturate: int = 1, to: npt.DTypeLike, ) -> Var: r""" - The operator casts the elements of a given input tensor to a data type - specified by the 'to' argument and returns an output tensor of the same - size in the converted type. The 'to' argument must be one of the data - types specified in the 'DataType' enum field in the TensorProto message. - - Casting from string tensor in plain (e.g., "3.14" and "1000") and - scientific numeric representations (e.g., "1e-5" and "1E8") to float - types is supported. For example, converting string "100.5" to an integer - may yield result 100. There are some string literals reserved for - special floating-point values; "+INF" (and "INF"), "-INF", and "NaN" are - positive infinity, negative infinity, and not-a-number, respectively. - Any string which can exactly match "+INF" in a case-insensitive way - would be mapped to positive infinite. Similarly, this case-insensitive - rule is applied to "INF" and "NaN". When casting from numeric tensors to - string tensors, plain floating-point representation (such as - "314.15926") would be used. Converting non-numerical-literal string such - as "Hello World!" is an undefined behavior. Cases of converting string - representing floating-point arithmetic value, such as "2.718", to INT is - an undefined behavior. - - Conversion from a numerical type to any numerical type is always - allowed. User must be aware of precision loss and value change caused by - range difference between two types. For example, a 64-bit float - 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, - converting an integer 36 to Boolean may produce 1 because we truncate - bits which can't be stored in the targeted type. - - In more detail, the conversion among numerical types should follow these - rules if the destination type is not a float 8 type. - - - Casting from floating point to: - - - floating point: +/- infinity if OOR (out of range). - - fixed point: undefined if OOR. - - bool: +/- 0.0 to False; all else to True. - - - Casting from fixed point to: - - - floating point: +/- infinity if OOR. (+ infinity in the case of - uint) - - fixed point: when OOR, discard higher bits and reinterpret (with - respect to two's complement representation for signed types). For - example, 200 (int16) -> -56 (int8). - - bool: zero to False; nonzero to True. - - - Casting from bool to: - - - floating point: ``{1.0, 0.0}``. - - fixed point: ``{1, 0}``. - - bool: no change. - - Float 8 type were introduced to speed up the training of deep models. By - default the conversion of a float *x* obeys to the following rules. - ``[x]`` means the value rounded to the target mantissa width. - - ============== =========== ======== ======== ======== - x E4M3FN E4M3FNUZ E5M2 E5M2FNUZ - ============== =========== ======== ======== ======== - 0 0 0 0 0 - -0 -0 0 -0 0 - NaN NaN NaN NaN NaN - +/- Inf +/- FLT_MAX NaN FLT_MAX NaN - [x] > FLT_MAX FLT_MAX FLT_MAX FLT_MAX FLT_MAX - [x] < -FLT_MAX -FLT_MAX -FLT_MAX -FLT_MAX -FLT_MAX - else RNE RNE RNE RNE - ============== =========== ======== ======== ======== - - The behavior changes if the parameter 'saturate' is set to False. The - rules then become: - - ============== ====== ======== ======= ======== - x E4M3FN E4M3FNUZ E5M2 E5M2FNUZ - ============== ====== ======== ======= ======== - 0 0 0 0 0 - -0 -0 0 -0 0 - NaN NaN NaN NaN NaN - +/- Inf NaN NaN +/- Inf NaN - [x] > FLT_MAX NaN NaN Inf NaN - [x] < -FLT_MAX NaN NaN -Inf NaN - else RNE RNE RNE RNE - ============== ====== ======== ======= ======== - - Parameters - ========== - input - Type T1. - Input tensor to be cast. - saturate - Attribute. - The parameter defines how the conversion behaves if an input value is - out of range of the destination type. It only applies for float 8 - conversion (float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). - It is true by default. All cases are fully described in two tables - inserted in the operator description. - to - Attribute. - The data type to which the elements of the input tensor are cast. - Strictly must be one of the types from DataType enum in TensorProto - - Returns - ======= - output : Var - Type T2. - Output tensor with the same shape as input with type specified by the - 'to' argument - - Notes - ===== - Signature: ``ai.onnx@21::Cast``. - - Type constraints: - - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` - - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` +The operator casts the elements of a given input tensor to a data type +specified by the 'to' argument and returns an output tensor of the same +size in the converted type. The 'to' argument must be one of the data +types specified in the 'DataType' enum field in the TensorProto message. + +Casting from string tensor in plain (e.g., "3.14" and "1000") and +scientific numeric representations (e.g., "1e-5" and "1E8") to float +types is supported. For example, converting string "100.5" to an integer +may yield result 100. There are some string literals reserved for +special floating-point values; "+INF" (and "INF"), "-INF", and "NaN" are +positive infinity, negative infinity, and not-a-number, respectively. +Any string which can exactly match "+INF" in a case-insensitive way +would be mapped to positive infinite. Similarly, this case-insensitive +rule is applied to "INF" and "NaN". When casting from numeric tensors to +string tensors, plain floating-point representation (such as +"314.15926") would be used. Converting non-numerical-literal string such +as "Hello World!" is an undefined behavior. Cases of converting string +representing floating-point arithmetic value, such as "2.718", to INT is +an undefined behavior. + +Conversion from a numerical type to any numerical type is always +allowed. User must be aware of precision loss and value change caused by +range difference between two types. For example, a 64-bit float +3.1415926459 may be round to a 32-bit float 3.141592. Similarly, +converting an integer 36 to Boolean may produce 1 because we truncate +bits which can't be stored in the targeted type. + +In more detail, the conversion among numerical types should follow these +rules if the destination type is not a float 8 type. + +- Casting from floating point to: + + - floating point: +/- infinity if OOR (out of range). + - fixed point: undefined if OOR. + - bool: +/- 0.0 to False; all else to True. + +- Casting from fixed point to: + + - floating point: +/- infinity if OOR. (+ infinity in the case of + uint) + - fixed point: when OOR, discard higher bits and reinterpret (with + respect to two's complement representation for signed types). For + example, 200 (int16) -> -56 (int8). + - bool: zero to False; nonzero to True. + +- Casting from bool to: + + - floating point: ``{1.0, 0.0}``. + - fixed point: ``{1, 0}``. + - bool: no change. + +Float 8 type were introduced to speed up the training of deep models. By +default the conversion of a float *x* obeys to the following rules. +``[x]`` means the value rounded to the target mantissa width. + +============== =========== ======== ======== ======== +x E4M3FN E4M3FNUZ E5M2 E5M2FNUZ +============== =========== ======== ======== ======== +0 0 0 0 0 +-0 -0 0 -0 0 +NaN NaN NaN NaN NaN ++/- Inf +/- FLT_MAX NaN FLT_MAX NaN +[x] > FLT_MAX FLT_MAX FLT_MAX FLT_MAX FLT_MAX +[x] < -FLT_MAX -FLT_MAX -FLT_MAX -FLT_MAX -FLT_MAX +else RNE RNE RNE RNE +============== =========== ======== ======== ======== + +The behavior changes if the parameter 'saturate' is set to False. The +rules then become: + +============== ====== ======== ======= ======== +x E4M3FN E4M3FNUZ E5M2 E5M2FNUZ +============== ====== ======== ======= ======== +0 0 0 0 0 +-0 -0 0 -0 0 +NaN NaN NaN NaN NaN ++/- Inf NaN NaN +/- Inf NaN +[x] > FLT_MAX NaN NaN Inf NaN +[x] < -FLT_MAX NaN NaN -Inf NaN +else RNE RNE RNE RNE +============== ====== ======== ======= ======== + +Parameters +========== +input + Type T1. + Input tensor to be cast. +saturate + Attribute. + The parameter defines how the conversion behaves if an input value is + out of range of the destination type. It only applies for float 8 + conversion (float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). + It is true by default. All cases are fully described in two tables + inserted in the operator description. +to + Attribute. + The data type to which the elements of the input tensor are cast. + Strictly must be one of the types from DataType enum in TensorProto + +Returns +======= +output : Var + Type T2. + Output tensor with the same shape as input with type specified by the + 'to' argument + +Notes +===== +Signature: ``ai.onnx@21::Cast``. + +Type constraints: + - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` + - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _Cast( - _Cast.Attributes( - saturate=AttrInt64(saturate, name="saturate"), - to=AttrDtype(to, name="to"), - ), - _Cast.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) + return _Cast( + _Cast.Attributes( + saturate=AttrInt64(saturate, name="saturate"), + to=AttrDtype(to, name="to"), + ), _Cast.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output -def cast_like( - input: Var, - target_type: Var, - *, - saturate: int = 1, -) -> Var: +def cast_like(input: Var, target_type: Var, *, saturate: int = 1, ) -> Var: r""" - The operator casts the elements of a given input tensor (the first - input) to the same data type as the elements of the second input tensor. - See documentation of the Cast operator for further details. - - Parameters - ========== - input - Type T1. - Input tensor to be cast. - target_type - Type T2. - The (first) input tensor will be cast to produce a tensor of the same - type as this (second input) tensor. - saturate - Attribute. - The parameter defines how the conversion behaves if an input value is - out of range of the destination type. It only applies for float 8 - conversion (float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). - It is true by default. Please refer to operator Cast description for - further details. - - Returns - ======= - output : Var - Type T2. - Output tensor produced by casting the first input tensor to have the - same type as the second input tensor. - - Notes - ===== - Signature: ``ai.onnx@21::CastLike``. - - Type constraints: - - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` - - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` +The operator casts the elements of a given input tensor (the first +input) to the same data type as the elements of the second input tensor. +See documentation of the Cast operator for further details. + +Parameters +========== +input + Type T1. + Input tensor to be cast. +target_type + Type T2. + The (first) input tensor will be cast to produce a tensor of the same + type as this (second input) tensor. +saturate + Attribute. + The parameter defines how the conversion behaves if an input value is + out of range of the destination type. It only applies for float 8 + conversion (float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). + It is true by default. Please refer to operator Cast description for + further details. + +Returns +======= +output : Var + Type T2. + Output tensor produced by casting the first input tensor to have the + same type as the second input tensor. + +Notes +===== +Signature: ``ai.onnx@21::CastLike``. + +Type constraints: + - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` + - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _CastLike( - _CastLike.Attributes( - saturate=AttrInt64(saturate, name="saturate"), - ), - _CastLike.Inputs( - input=unwrap_vars(input), - target_type=unwrap_vars(target_type), - ), - ) - .get_output_vars( - input=get_value(input), - target_type=get_value(target_type), - ) - .output - ) + return _CastLike( + _CastLike.Attributes( + saturate=AttrInt64(saturate, name="saturate"), + ), _CastLike.Inputs( + input=unwrap_vars(input), target_type=unwrap_vars(target_type), ), ).get_output_vars( + input=get_value(input), target_type=get_value(target_type), ).output -def constant( - *, - value: Optional[np.ndarray] = None, - value_float: Optional[float] = None, - value_floats: Optional[Iterable[float]] = None, - value_int: Optional[int] = None, - value_ints: Optional[Iterable[int]] = None, - value_string: Optional[str] = None, - value_strings: Optional[Iterable[str]] = None, -) -> Var: +def constant(*, value: Optional[np.ndarray] = None, value_float: Optional[float] = None, value_floats: Optional[Iterable[float]] = None, value_int: Optional[int] = None, value_ints: Optional[Iterable[int]] = None, value_string: Optional[str] = None, value_strings: Optional[Iterable[str]] = None, ) -> Var: r""" - This operator produces a constant tensor. Exactly one of the provided - attributes, either value, sparse_value, or value\_\* must be specified. - - Parameters - ========== - sparse_value - Attribute. - The value for the elements of the output tensor in sparse format. - value - Attribute. - The value for the elements of the output tensor. - value_float - Attribute. - The value for the sole element for the scalar, float32, output tensor. - value_floats - Attribute. - The values for the elements for the 1D, float32, output tensor. - value_int - Attribute. - The value for the sole element for the scalar, int64, output tensor. - value_ints - Attribute. - The values for the elements for the 1D, int64, output tensor. - value_string - Attribute. - The value for the sole element for the scalar, UTF-8 string, output - tensor. - value_strings - Attribute. - The values for the elements for the 1D, UTF-8 string, output tensor. - - Returns - ======= - output : Var - Type T. - Output tensor containing the same value of the provided tensor. - - Notes - ===== - Signature: ``ai.onnx@21::Constant``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` +This operator produces a constant tensor. Exactly one of the provided +attributes, either value, sparse_value, or value\_\* must be specified. + +Parameters +========== +sparse_value + Attribute. + The value for the elements of the output tensor in sparse format. +value + Attribute. + The value for the elements of the output tensor. +value_float + Attribute. + The value for the sole element for the scalar, float32, output tensor. +value_floats + Attribute. + The values for the elements for the 1D, float32, output tensor. +value_int + Attribute. + The value for the sole element for the scalar, int64, output tensor. +value_ints + Attribute. + The values for the elements for the 1D, int64, output tensor. +value_string + Attribute. + The value for the sole element for the scalar, UTF-8 string, output + tensor. +value_strings + Attribute. + The values for the elements for the 1D, UTF-8 string, output tensor. + +Returns +======= +output : Var + Type T. + Output tensor containing the same value of the provided tensor. + +Notes +===== +Signature: ``ai.onnx@21::Constant``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _Constant( - _Constant.Attributes( - value=AttrTensor.maybe(value, name="value"), - value_float=AttrFloat32.maybe(value_float, name="value_float"), - value_floats=AttrFloat32s.maybe(value_floats, name="value_floats"), - value_int=AttrInt64.maybe(value_int, name="value_int"), - value_ints=AttrInt64s.maybe(value_ints, name="value_ints"), - value_string=AttrString.maybe(value_string, name="value_string"), - value_strings=AttrStrings.maybe(value_strings, name="value_strings"), - ), - _Constant.Inputs(), - ) - .get_output_vars() - .output - ) - - -def constant_of_shape( - input: Var, - *, - value: Optional[np.ndarray] = None, -) -> Var: + return _Constant( + _Constant.Attributes( + value=AttrTensor.maybe(value, name="value"), + value_float=AttrFloat32.maybe(value_float, name="value_float"), + value_floats=AttrFloat32s.maybe(value_floats, name="value_floats"), + value_int=AttrInt64.maybe(value_int, name="value_int"), + value_ints=AttrInt64s.maybe(value_ints, name="value_ints"), + value_string=AttrString.maybe(value_string, name="value_string"), + value_strings=AttrStrings.maybe(value_strings, name="value_strings"), + ), _Constant.Inputs( + ), ).get_output_vars( + ).output + + +def constant_of_shape(input: Var, *, value: Optional[np.ndarray] = None, ) -> Var: r""" - Generate a tensor with given value and shape. - - Parameters - ========== - input - Type T1. - 1D tensor. The shape of the expected output tensor. If empty tensor is - given, the output would be a scalar. All values must be >= 0. - value - Attribute. - (Optional) The value of the output elements.Should be a one-element - tensor. If not specified, it defaults to a tensor of value 0 and - datatype float32 - - Returns - ======= - output : Var - Type T2. - Output tensor of shape specified by 'input'.If attribute 'value' is - specified, the value and datatype of the output tensor is taken from - 'value'.If attribute 'value' is not specified, the value in the output - defaults to 0, and the datatype defaults to float32. - - Notes - ===== - Signature: ``ai.onnx@21::ConstantOfShape``. - - Type constraints: - - T1: `tensor(int64)` - - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` +Generate a tensor with given value and shape. + +Parameters +========== +input + Type T1. + 1D tensor. The shape of the expected output tensor. If empty tensor is + given, the output would be a scalar. All values must be >= 0. +value + Attribute. + (Optional) The value of the output elements.Should be a one-element + tensor. If not specified, it defaults to a tensor of value 0 and + datatype float32 + +Returns +======= +output : Var + Type T2. + Output tensor of shape specified by 'input'.If attribute 'value' is + specified, the value and datatype of the output tensor is taken from + 'value'.If attribute 'value' is not specified, the value in the output + defaults to 0, and the datatype defaults to float32. + +Notes +===== +Signature: ``ai.onnx@21::ConstantOfShape``. + +Type constraints: + - T1: `tensor(int64)` + - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _ConstantOfShape( - _ConstantOfShape.Attributes( - value=AttrTensor.maybe(value, name="value"), - ), - _ConstantOfShape.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) + return _ConstantOfShape( + _ConstantOfShape.Attributes( + value=AttrTensor.maybe(value, name="value"), + ), _ConstantOfShape.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output -def dequantize_linear( - x: Var, - x_scale: Var, - x_zero_point: Optional[Var] = None, - *, - axis: int = 1, - block_size: int = 0, -) -> Var: +def dequantize_linear(x: Var, x_scale: Var, x_zero_point: Optional[Var] = None, *, axis: int = 1, block_size: int = 0, ) -> Var: r""" - The linear dequantization operator. It consumes a quantized tensor, a - scale, and a zero point to compute the full-precision tensor. The - dequantization formula is ``y = (x - x_zero_point) * x_scale``. - ``x_scale`` and ``x_zero_point`` must have the same shape, determining - the quantization's granularity: a scalar for per-tensor/per-layer - quantization, a 1-D tensor for per-axis quantization, or have a rank - identical to the input for blocked quantization. See QuantizeLinear for - details on quantization granularity. - - ``x_zero_point`` and ``x`` must have the same type. ``x`` and ``y`` must - have the same shape. In the case of dequantizing ``int32``, there's no - zero point (zero point is supposed to be 0). ``zero-point`` is usually - not used in the case of float8 types quantization, but the - dequantization formula remains the same for consistency, and ``x_scale`` - still determines the output type. - - Parameters - ========== - x - Type T1. - N-D quantized input tensor to be de-quantized. - x_scale - Type T2. - Scale for input ``x``. For per-tensor/layer dequantization the scale is - a scalar, for per per-axis dequantization it is a 1-D Tensor and for - blocked dequantization it has the same shape as the input, except for - one dimension in which blocking is performed. - x_zero_point - Type T1. - Zero point for input ``x``. Shape must match x_scale. It's optional. - Zero point is 0 when it's not specified. - axis - Attribute. - (Optional) The axis of the dequantizing dimension of the input tensor. - Used for per-axis and blocked quantization. Negative value means - counting dimensions from the back. Accepted range is ``[-r, r-1]`` where - ``r = rank(input)``. - block_size - Attribute. - (Optional) The size of the quantization block (number of times every - scale is replicated). Used only for blocked quantization. The block size - is a positive integer. Given ``x`` shape ``(D0, ..., Di, ..., Dn)``, - ``y_scale`` shape ``(S0, ... Si, ...Sn)`` and ``axis=i``, the accepted - range is ``[ceil(Di/Si), ceil(Di/(Si-1))-1]`` - - Returns - ======= - y : Var - Type T2. - N-D full precision output tensor. It has same shape as input ``x``. - - Notes - ===== - Signature: ``ai.onnx@21::DequantizeLinear``. - - Type constraints: - - T1: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint4)`, `tensor(uint8)` - - T2: `tensor(bfloat16)`, `tensor(float)`, `tensor(float16)` +The linear dequantization operator. It consumes a quantized tensor, a +scale, and a zero point to compute the full-precision tensor. The +dequantization formula is ``y = (x - x_zero_point) * x_scale``. +``x_scale`` and ``x_zero_point`` must have the same shape, determining +the quantization's granularity: a scalar for per-tensor/per-layer +quantization, a 1-D tensor for per-axis quantization, or have a rank +identical to the input for blocked quantization. See QuantizeLinear for +details on quantization granularity. + +``x_zero_point`` and ``x`` must have the same type. ``x`` and ``y`` must +have the same shape. In the case of dequantizing ``int32``, there's no +zero point (zero point is supposed to be 0). ``zero-point`` is usually +not used in the case of float8 types quantization, but the +dequantization formula remains the same for consistency, and ``x_scale`` +still determines the output type. + +Parameters +========== +x + Type T1. + N-D quantized input tensor to be de-quantized. +x_scale + Type T2. + Scale for input ``x``. For per-tensor/layer dequantization the scale is + a scalar, for per per-axis dequantization it is a 1-D Tensor and for + blocked dequantization it has the same shape as the input, except for + one dimension in which blocking is performed. +x_zero_point + Type T1. + Zero point for input ``x``. Shape must match x_scale. It's optional. + Zero point is 0 when it's not specified. +axis + Attribute. + (Optional) The axis of the dequantizing dimension of the input tensor. + Used for per-axis and blocked quantization. Negative value means + counting dimensions from the back. Accepted range is ``[-r, r-1]`` where + ``r = rank(input)``. +block_size + Attribute. + (Optional) The size of the quantization block (number of times every + scale is replicated). Used only for blocked quantization. The block size + is a positive integer. Given ``x`` shape ``(D0, ..., Di, ..., Dn)``, + ``y_scale`` shape ``(S0, ... Si, ...Sn)`` and ``axis=i``, the accepted + range is ``[ceil(Di/Si), ceil(Di/(Si-1))-1]`` + +Returns +======= +y : Var + Type T2. + N-D full precision output tensor. It has same shape as input ``x``. + +Notes +===== +Signature: ``ai.onnx@21::DequantizeLinear``. + +Type constraints: + - T1: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint4)`, `tensor(uint8)` + - T2: `tensor(bfloat16)`, `tensor(float)`, `tensor(float16)` """ - return ( - _DequantizeLinear( - _DequantizeLinear.Attributes( - axis=AttrInt64(axis, name="axis"), - block_size=AttrInt64(block_size, name="block_size"), - ), - _DequantizeLinear.Inputs( - x=unwrap_vars(x), - x_scale=unwrap_vars(x_scale), - x_zero_point=unwrap_vars(x_zero_point), - ), - ) - .get_output_vars( - x=get_value(x), - x_scale=get_value(x_scale), - x_zero_point=get_value(x_zero_point), - ) - .y - ) + return _DequantizeLinear( + _DequantizeLinear.Attributes( + axis=AttrInt64(axis, name="axis"), + block_size=AttrInt64(block_size, name="block_size"), + ), _DequantizeLinear.Inputs( + x=unwrap_vars(x), x_scale=unwrap_vars(x_scale), x_zero_point=unwrap_vars(x_zero_point), ), ).get_output_vars( + x=get_value(x), x_scale=get_value(x_scale), x_zero_point=get_value(x_zero_point), ).y -def flatten( - input: Var, - *, - axis: int = 1, -) -> Var: +def flatten(input: Var, *, axis: int = 1, ) -> Var: r""" - Flattens the input tensor into a 2D matrix. If input tensor has shape - (d_0, d_1, ... d_n) then the output will have shape (d_0 X d_1 ... - d\_(axis-1), d_axis X d\_(axis+1) ... X dn). - - Parameters - ========== - input - Type T. - A tensor of rank >= axis. - axis - Attribute. - Indicate up to which input dimensions (exclusive) should be flattened to - the outer dimension of the output. The value for axis must be in the - range [-r, r], where r is the rank of the input tensor. Negative value - means counting dimensions from the back. When axis = 0, the shape of the - output tensor is (1, (d_0 X d_1 ... d_n), where the shape of the input - tensor is (d_0, d_1, ... d_n). - - Returns - ======= - output : Var - Type T. - A 2D tensor with the contents of the input tensor, with input dimensions - up to axis flattened to the outer dimension of the output and remaining - input dimensions flattened into the inner dimension of the output. - - Notes - ===== - Signature: ``ai.onnx@21::Flatten``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` +Flattens the input tensor into a 2D matrix. If input tensor has shape +(d_0, d_1, ... d_n) then the output will have shape (d_0 X d_1 ... +d\_(axis-1), d_axis X d\_(axis+1) ... X dn). + +Parameters +========== +input + Type T. + A tensor of rank >= axis. +axis + Attribute. + Indicate up to which input dimensions (exclusive) should be flattened to + the outer dimension of the output. The value for axis must be in the + range [-r, r], where r is the rank of the input tensor. Negative value + means counting dimensions from the back. When axis = 0, the shape of the + output tensor is (1, (d_0 X d_1 ... d_n), where the shape of the input + tensor is (d_0, d_1, ... d_n). + +Returns +======= +output : Var + Type T. + A 2D tensor with the contents of the input tensor, with input dimensions + up to axis flattened to the outer dimension of the output and remaining + input dimensions flattened into the inner dimension of the output. + +Notes +===== +Signature: ``ai.onnx@21::Flatten``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _Flatten( - _Flatten.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Flatten.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) + return _Flatten( + _Flatten.Attributes( + axis=AttrInt64(axis, name="axis"), + ), _Flatten.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output -def group_normalization( - X: Var, - scale: Var, - bias: Var, - *, - epsilon: float = 9.999999747378752e-06, - num_groups: int, - stash_type: int = 1, -) -> Var: +def group_normalization(X: Var, scale: Var, bias: Var, *, epsilon: float = 9.999999747378752e-06, num_groups: int, stash_type: int = 1, ) -> Var: r""" - A GroupNormalization function. Carries out group normalization as - described in the paper https://arxiv.org/abs/1803.08494 - - This operator transforms input according to - - :: - - y = scale * (x - mean) / sqrt(variance + epsilon) + bias, - - where the mean and variance are computed per instance per group of - channels, and ``scale`` and ``bias`` should be specified for each group - of channels. The number of groups ``num_groups`` should be divisible by - the number of channels so that there are an equal number of channels per - group. - - The overall computation has two stages: the first stage normalizes the - elements to have zero mean and unit variance for each instance in each - group, and the second stage scales and shifts the results of the first - stage. The floating-point precision used in the first stage is - determined by the ``stash_type`` attribute. For example, if - ``stash_type`` is 1, the operator casts all input variables to 32-bit - float, performs the computation, and finally casts the normalized - results back to the original type of ``X``. The second stage does not - depend on ``stash_type``. - - When the number of groups is the same as the number of channels, this - operator is equivalent to InstanceNormalization. When there is only one - group, this operator is equivalent to LayerNormalization. - - Parameters - ========== - X - Type T. - Input data tensor. Dimensions for image cases are ``(N x C x H x W)``, - where ``N`` is the batch size, ``C`` is the number of channels, and - ``H`` and ``W`` are the height and width of the data. Statistics are - computed for every group of channels over ``C``, ``H``, and ``W``. For - non-image cases, the dimensions are in the form of - ``(N x C x D1 x D2 ... Dn)``. - scale - Type T. - Scale tensor of shape ``(C)``. - bias - Type T. - Bias tensor of shape ``(C)``. - epsilon - Attribute. - The epsilon value to use to avoid division by zero. - num_groups - Attribute. - The number of groups of channels. It should be a divisor of the number - of channels ``C``. - stash_type - Attribute. - The floating-point precision used in stage one of the computation. - - Returns - ======= - Y : Var - Type T. - The output tensor of the same shape as ``X``. - - Notes - ===== - Signature: ``ai.onnx@21::GroupNormalization``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` +A GroupNormalization function. Carries out group normalization as +described in the paper https://arxiv.org/abs/1803.08494 + +This operator transforms input according to + +:: + + y = scale * (x - mean) / sqrt(variance + epsilon) + bias, + +where the mean and variance are computed per instance per group of +channels, and ``scale`` and ``bias`` should be specified for each group +of channels. The number of groups ``num_groups`` should be divisible by +the number of channels so that there are an equal number of channels per +group. + +The overall computation has two stages: the first stage normalizes the +elements to have zero mean and unit variance for each instance in each +group, and the second stage scales and shifts the results of the first +stage. The floating-point precision used in the first stage is +determined by the ``stash_type`` attribute. For example, if +``stash_type`` is 1, the operator casts all input variables to 32-bit +float, performs the computation, and finally casts the normalized +results back to the original type of ``X``. The second stage does not +depend on ``stash_type``. + +When the number of groups is the same as the number of channels, this +operator is equivalent to InstanceNormalization. When there is only one +group, this operator is equivalent to LayerNormalization. + +Parameters +========== +X + Type T. + Input data tensor. Dimensions for image cases are ``(N x C x H x W)``, + where ``N`` is the batch size, ``C`` is the number of channels, and + ``H`` and ``W`` are the height and width of the data. Statistics are + computed for every group of channels over ``C``, ``H``, and ``W``. For + non-image cases, the dimensions are in the form of + ``(N x C x D1 x D2 ... Dn)``. +scale + Type T. + Scale tensor of shape ``(C)``. +bias + Type T. + Bias tensor of shape ``(C)``. +epsilon + Attribute. + The epsilon value to use to avoid division by zero. +num_groups + Attribute. + The number of groups of channels. It should be a divisor of the number + of channels ``C``. +stash_type + Attribute. + The floating-point precision used in stage one of the computation. + +Returns +======= +Y : Var + Type T. + The output tensor of the same shape as ``X``. + +Notes +===== +Signature: ``ai.onnx@21::GroupNormalization``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return ( - _GroupNormalization( - _GroupNormalization.Attributes( - epsilon=AttrFloat32(epsilon, name="epsilon"), - num_groups=AttrInt64(num_groups, name="num_groups"), - stash_type=AttrInt64(stash_type, name="stash_type"), - ), - _GroupNormalization.Inputs( - X=unwrap_vars(X), - scale=unwrap_vars(scale), - bias=unwrap_vars(bias), - ), - ) - .get_output_vars( - X=get_value(X), - scale=get_value(scale), - bias=get_value(bias), - ) - .Y - ) + return _GroupNormalization( + _GroupNormalization.Attributes( + epsilon=AttrFloat32(epsilon, name="epsilon"), + num_groups=AttrInt64(num_groups, name="num_groups"), + stash_type=AttrInt64(stash_type, name="stash_type"), + ), _GroupNormalization.Inputs( + X=unwrap_vars(X), scale=unwrap_vars(scale), bias=unwrap_vars(bias), ), ).get_output_vars( + X=get_value(X), scale=get_value(scale), bias=get_value(bias), ).Y -def identity( - input: Var, -) -> Var: +def identity(input: Var, ) -> Var: r""" - Identity operator - - Parameters - ========== - input - Type V. - Input tensor - - Returns - ======= - output : Var - Type V. - Tensor to copy input into. - - Notes - ===== - Signature: ``ai.onnx@21::Identity``. - - Type constraints: - - V: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` +Identity operator + +Parameters +========== +input + Type V. + Input tensor + +Returns +======= +output : Var + Type V. + Tensor to copy input into. + +Notes +===== +Signature: ``ai.onnx@21::Identity``. + +Type constraints: + - V: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _Identity( - _Identity.Attributes(), - _Identity.Inputs( - input=unwrap_vars(input), - ), - ) - .get_output_vars( - input=get_value(input), - ) - .output - ) + return _Identity( + _Identity.Attributes( + ), _Identity.Inputs( + input=unwrap_vars(input), ), ).get_output_vars( + input=get_value(input), ).output -def if_( - cond: Var, - *, - else_branch: Callable[[], Iterable[Var]], - then_branch: Callable[[], Iterable[Var]], -) -> Sequence[Var]: +def if_(cond: Var, *, else_branch: Callable[[], Iterable[Var]], then_branch: Callable[[], Iterable[Var]], ) -> Sequence[Var]: r""" - If conditional - - Parameters - ========== - cond - Type B. - Condition for the if. The tensor must contain a single element. - else_branch - Attribute. - Graph to run if condition is false. Has N outputs: values you wish to be - live-out to the enclosing scope. The number of outputs must match the - number of outputs in the then_branch. - then_branch - Attribute. - Graph to run if condition is true. Has N outputs: values you wish to be - live-out to the enclosing scope. The number of outputs must match the - number of outputs in the else_branch. - - Returns - ======= - outputs : Sequence[Var] - Type V. - Values that are live-out to the enclosing scope. The return values in - the ``then_branch`` and ``else_branch`` must be of the same data type. - The ``then_branch`` and ``else_branch`` may produce tensors with the - same element type and different shapes. If corresponding outputs from - the then-branch and the else-branch have static shapes S1 and S2, then - the shape of the corresponding output variable of the if-node (if - present) must be compatible with both S1 and S2 as it represents the - union of both possible shapes.For example, if in a model file, the first - output of ``then_branch`` is typed float tensor with shape [2] and the - first output of ``else_branch`` is another float tensor with shape [3], - If's first output should have (a) no shape set, or (b) a shape of rank 1 - with neither ``dim_value`` nor ``dim_param`` set, or (c) a shape of rank - 1 with a unique ``dim_param``. In contrast, the first output cannot have - the shape [2] since [2] and [3] are not compatible. - - Notes - ===== - Signature: ``ai.onnx@21::If``. - - Type constraints: - - B: `tensor(bool)` - - V: `optional(seq(tensor(bfloat16)))`, `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bfloat16))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(float8e4m3fn))`, `optional(tensor(float8e4m3fnuz))`, `optional(tensor(float8e5m2))`, `optional(tensor(float8e5m2fnuz))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int4))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint4))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bfloat16))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(float8e4m3fn))`, `seq(tensor(float8e4m3fnuz))`, `seq(tensor(float8e5m2))`, `seq(tensor(float8e5m2fnuz))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int4))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint4))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` +If conditional + +Parameters +========== +cond + Type B. + Condition for the if. The tensor must contain a single element. +else_branch + Attribute. + Graph to run if condition is false. Has N outputs: values you wish to be + live-out to the enclosing scope. The number of outputs must match the + number of outputs in the then_branch. +then_branch + Attribute. + Graph to run if condition is true. Has N outputs: values you wish to be + live-out to the enclosing scope. The number of outputs must match the + number of outputs in the else_branch. + +Returns +======= +outputs : Sequence[Var] + Type V. + Values that are live-out to the enclosing scope. The return values in + the ``then_branch`` and ``else_branch`` must be of the same data type. + The ``then_branch`` and ``else_branch`` may produce tensors with the + same element type and different shapes. If corresponding outputs from + the then-branch and the else-branch have static shapes S1 and S2, then + the shape of the corresponding output variable of the if-node (if + present) must be compatible with both S1 and S2 as it represents the + union of both possible shapes.For example, if in a model file, the first + output of ``then_branch`` is typed float tensor with shape [2] and the + first output of ``else_branch`` is another float tensor with shape [3], + If's first output should have (a) no shape set, or (b) a shape of rank 1 + with neither ``dim_value`` nor ``dim_param`` set, or (c) a shape of rank + 1 with a unique ``dim_param``. In contrast, the first output cannot have + the shape [2] since [2] and [3] are not compatible. + +Notes +===== +Signature: ``ai.onnx@21::If``. + +Type constraints: + - B: `tensor(bool)` + - V: `optional(seq(tensor(bfloat16)))`, `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bfloat16))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(float8e4m3fn))`, `optional(tensor(float8e4m3fnuz))`, `optional(tensor(float8e5m2))`, `optional(tensor(float8e5m2fnuz))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int4))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint4))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bfloat16))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(float8e4m3fn))`, `seq(tensor(float8e4m3fnuz))`, `seq(tensor(float8e5m2))`, `seq(tensor(float8e5m2fnuz))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int4))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint4))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - _else_branch_subgraph: Graph = subgraph((), else_branch) - _then_branch_subgraph: Graph = subgraph((), then_branch) - return ( - _If( - _If.Attributes( - else_branch=AttrGraph(_else_branch_subgraph, name="else_branch"), - then_branch=AttrGraph(_then_branch_subgraph, name="then_branch"), - ), - _If.Inputs( - cond=unwrap_vars(cond), - ), - out_variadic=len(_else_branch_subgraph.requested_results), - ) - .get_output_vars( - cond=get_value(cond), - ) - .outputs + _else_branch_subgraph: Graph = subgraph( + (), + else_branch ) + _then_branch_subgraph: Graph = subgraph( + (), + then_branch + ) + return _If( + _If.Attributes( + else_branch=AttrGraph(_else_branch_subgraph, name="else_branch"), + then_branch=AttrGraph(_then_branch_subgraph, name="then_branch"), + ), _If.Inputs( + cond=unwrap_vars(cond), ), out_variadic=len(_else_branch_subgraph.requested_results), ).get_output_vars( + cond=get_value(cond), ).outputs -def loop( - M: Optional[Var] = None, - cond: Optional[Var] = None, - v_initial: Sequence[Var] = (), - *, - body: Callable[..., Iterable[Var]], -) -> Sequence[Var]: +def loop(M: Optional[Var] = None, cond: Optional[Var] = None, v_initial: Sequence[Var] = (), *, body: Callable[..., Iterable[Var]], ) -> Sequence[Var]: r""" - Generic Looping construct. This loop has multiple termination - conditions: - - 1) Trip count. Iteration count specified at runtime. Set by specifying - the input M. Optional. Set to empty string to omit. Note that a - static trip count (specified at graph construction time) can be - specified by passing in a constant node for input M. - 2) Loop termination condition. This is an input to the op that - determines whether to run the first iteration and also a loop-carried - dependency for the body graph. The body graph must yield a value for - the condition variable, whether this input is provided or not. - - This table summarizes the operating modes of this operator with - equivalent C-style code: - - Operator inputs defined as (max_trip_count, condition_var). - - - input ("", ""): for (int i=0; ; ++i) { cond = ... // Note this value - is ignored, but is required in the body } - - - input ("", cond) // Note this is analogous to a while loop bool cond - = ...; for (int i=0; cond; ++i) { cond = ...; } - - - input ("", 1) // Note this is analogous to a do-while loop bool cond - = true for (int i=0; cond; ++i) { cond = ...; } - - - input (trip_count, "") // Note this is analogous to a for loop int - trip_count = ... for (int i=0; i < trip_count; ++i) { cond = ...; // - ignored } - - - input (trip_count, cond) int trip_count = ...; bool cond = ...; for - (int i=0; i < trip_count && cond; ++i) { cond = ...; } - - *Sample usage - cond as well as trip count* - - :: - - graph predict-net { - %a = Constant[value = ]() - %b = Constant[value = ]() - %keepgoing = Constant[value = ]() - %max_trip_count = Constant[value = ]() - %keepgoing_out, %b_out, %user_defined_vals = Loop[body = ](%max_trip_count, %keepgoing, %b) - return - } - - graph body-net ( - %i[INT32, scalar] // iteration number - %keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used - %b_in[INT32, scalar] // incoming value of loop-carried-dependency b - ) { - %my_local = Add(%a, %b_in) - %b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b - %keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition - %user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated - return %keepgoing_out, %b_out, %user_defined_val - } - - *Sample equivalent C code* - - :: - - { - /* User-defined code (enclosing scope) */ - int a = 3, b = 6; - bool keepgoing = true; // Analogous to input cond - /* End user-defined code */ - - /* Implicitly-defined code */ - const int max_trip_count = 10; // Analogous to input M - int user_defined_vals[]; // Imagine this is resizable - /* End implicitly-defined code */ - /* initialize loop-carried variables and scan-output variables */ - bool keepgoing_out = keepgoing - int b_out = b - - for (int i=0; i < max_trip_count && keepgoing_out; ++i) { - /* Implicitly-defined code: bind actual parameter values - to formal parameter variables of loop-body */ - bool keepgoing_in = keepgoing_out; - bool b_in = b_out; - - /* User-defined code (loop body) */ - int my_local = a + b_in; // Reading value "a" from the enclosing scope is fine - b_out = a - b_in; - keepgoing_out = my_local > b_out; - user_defined_val = b_in + b_in; // b_in and b_out are different variables - /* End user-defined code */ - - /* Implicitly defined-code */ - user_defined_vals[i] = user_defined_val // accumulate scan-output values - } - // int t = my_local; // Can't do this. my_local is not accessible here. - - // The values below are bound to the output variables of the loop and therefore accessible - // b_out; user_defined_vals; keepgoing_out; - } - - There are several things of note in this code snippet: - - 1) Values from the enclosing scope (i.e. variable "a" here) are in scope - and can be referenced in the inputs of the loop. - 2) Any values computed in the loop body that needs to be used in a - subsequent iteration or after the loop are modelled using a pair of - variables in the loop-body, consisting of an input variable (eg., - b_in) and an output variable (eg., b_out). These are referred to as - loop-carried dependences. The loop operation node supplies the input - value of the input variable for the first iteration, and returns the - output value of the output variable produced by the final iteration. - 3) Scan_output variables are used to implicitly concatenate values - computed across all the iterations. In the above example, the value - of user_defined_val computed over all iterations are concatenated and - returned as the value of user_defined_vals after the loop. - 4) Values created in the body cannot be accessed in the enclosing scope, - except using the mechanism described above. - - Note that the semantics of this op support "diagonal" or "wavefront" - execution. (See Step 3 here for an example: - https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). - Frontends should emit multi-layer RNNs as a series of While operators - (with time being the inner looping dimension), with each successive - layer consuming the scan_outputs from the previous layer, possibly going - through several point-wise operators (e.g. dropout, residual - connections, linear layer). - - The input/output of subgraph (produced by loop node) matching is based - on order instead of name. The implementation will figure out the names - based on this order. - - Parameters - ========== - M - Type I. - A maximum trip-count for the loop specified at runtime. Optional. Pass - empty string to skip. - cond - Type B. - A boolean termination condition. Optional. Pass empty string to skip. - v_initial - Type V. - The initial values of any loop-carried dependencies (values that change - across loop iterations) - body - Attribute. - The graph run each iteration. It has 2+N inputs: (iteration_num, - condition, loop carried dependencies...). It has 1+N+K outputs: - (condition, loop carried dependencies..., scan_outputs...). Each - scan_output is created by concatenating the value of the specified - output value at the end of each iteration of the loop. It is an error if - the dimensions or data type of these scan_outputs change across loop - iterations. - - Returns - ======= - v_final_and_scan_outputs : Sequence[Var] - Type V. - Final N loop carried dependency values then K scan_outputs. Scan outputs - must be Tensors. - - Notes - ===== - Signature: ``ai.onnx@21::Loop``. - - Type constraints: - - I: `tensor(int64)` - - B: `tensor(bool)` - - V: `optional(seq(tensor(bfloat16)))`, `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bfloat16))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(float8e4m3fn))`, `optional(tensor(float8e4m3fnuz))`, `optional(tensor(float8e5m2))`, `optional(tensor(float8e5m2fnuz))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int4))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint4))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bfloat16))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(float8e4m3fn))`, `seq(tensor(float8e4m3fnuz))`, `seq(tensor(float8e5m2))`, `seq(tensor(float8e5m2fnuz))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int4))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint4))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` +Generic Looping construct. This loop has multiple termination +conditions: + +1) Trip count. Iteration count specified at runtime. Set by specifying + the input M. Optional. Set to empty string to omit. Note that a + static trip count (specified at graph construction time) can be + specified by passing in a constant node for input M. +2) Loop termination condition. This is an input to the op that + determines whether to run the first iteration and also a loop-carried + dependency for the body graph. The body graph must yield a value for + the condition variable, whether this input is provided or not. + +This table summarizes the operating modes of this operator with +equivalent C-style code: + +Operator inputs defined as (max_trip_count, condition_var). + +- input ("", ""): for (int i=0; ; ++i) { cond = ... // Note this value + is ignored, but is required in the body } + +- input ("", cond) // Note this is analogous to a while loop bool cond + = ...; for (int i=0; cond; ++i) { cond = ...; } + +- input ("", 1) // Note this is analogous to a do-while loop bool cond + = true for (int i=0; cond; ++i) { cond = ...; } + +- input (trip_count, "") // Note this is analogous to a for loop int + trip_count = ... for (int i=0; i < trip_count; ++i) { cond = ...; // + ignored } + +- input (trip_count, cond) int trip_count = ...; bool cond = ...; for + (int i=0; i < trip_count && cond; ++i) { cond = ...; } + +*Sample usage - cond as well as trip count* + +:: + + graph predict-net { + %a = Constant[value = ]() + %b = Constant[value = ]() + %keepgoing = Constant[value = ]() + %max_trip_count = Constant[value = ]() + %keepgoing_out, %b_out, %user_defined_vals = Loop[body = ](%max_trip_count, %keepgoing, %b) + return + } + + graph body-net ( + %i[INT32, scalar] // iteration number + %keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used + %b_in[INT32, scalar] // incoming value of loop-carried-dependency b + ) { + %my_local = Add(%a, %b_in) + %b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b + %keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition + %user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated + return %keepgoing_out, %b_out, %user_defined_val + } + +*Sample equivalent C code* + +:: + + { + /* User-defined code (enclosing scope) */ + int a = 3, b = 6; + bool keepgoing = true; // Analogous to input cond + /* End user-defined code */ + + /* Implicitly-defined code */ + const int max_trip_count = 10; // Analogous to input M + int user_defined_vals[]; // Imagine this is resizable + /* End implicitly-defined code */ + /* initialize loop-carried variables and scan-output variables */ + bool keepgoing_out = keepgoing + int b_out = b + + for (int i=0; i < max_trip_count && keepgoing_out; ++i) { + /* Implicitly-defined code: bind actual parameter values + to formal parameter variables of loop-body */ + bool keepgoing_in = keepgoing_out; + bool b_in = b_out; + + /* User-defined code (loop body) */ + int my_local = a + b_in; // Reading value "a" from the enclosing scope is fine + b_out = a - b_in; + keepgoing_out = my_local > b_out; + user_defined_val = b_in + b_in; // b_in and b_out are different variables + /* End user-defined code */ + + /* Implicitly defined-code */ + user_defined_vals[i] = user_defined_val // accumulate scan-output values + } + // int t = my_local; // Can't do this. my_local is not accessible here. + + // The values below are bound to the output variables of the loop and therefore accessible + // b_out; user_defined_vals; keepgoing_out; + } + +There are several things of note in this code snippet: + +1) Values from the enclosing scope (i.e. variable "a" here) are in scope + and can be referenced in the inputs of the loop. +2) Any values computed in the loop body that needs to be used in a + subsequent iteration or after the loop are modelled using a pair of + variables in the loop-body, consisting of an input variable (eg., + b_in) and an output variable (eg., b_out). These are referred to as + loop-carried dependences. The loop operation node supplies the input + value of the input variable for the first iteration, and returns the + output value of the output variable produced by the final iteration. +3) Scan_output variables are used to implicitly concatenate values + computed across all the iterations. In the above example, the value + of user_defined_val computed over all iterations are concatenated and + returned as the value of user_defined_vals after the loop. +4) Values created in the body cannot be accessed in the enclosing scope, + except using the mechanism described above. + +Note that the semantics of this op support "diagonal" or "wavefront" +execution. (See Step 3 here for an example: +https://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/). +Frontends should emit multi-layer RNNs as a series of While operators +(with time being the inner looping dimension), with each successive +layer consuming the scan_outputs from the previous layer, possibly going +through several point-wise operators (e.g. dropout, residual +connections, linear layer). + +The input/output of subgraph (produced by loop node) matching is based +on order instead of name. The implementation will figure out the names +based on this order. + +Parameters +========== +M + Type I. + A maximum trip-count for the loop specified at runtime. Optional. Pass + empty string to skip. +cond + Type B. + A boolean termination condition. Optional. Pass empty string to skip. +v_initial + Type V. + The initial values of any loop-carried dependencies (values that change + across loop iterations) +body + Attribute. + The graph run each iteration. It has 2+N inputs: (iteration_num, + condition, loop carried dependencies...). It has 1+N+K outputs: + (condition, loop carried dependencies..., scan_outputs...). Each + scan_output is created by concatenating the value of the specified + output value at the end of each iteration of the loop. It is an error if + the dimensions or data type of these scan_outputs change across loop + iterations. + +Returns +======= +v_final_and_scan_outputs : Sequence[Var] + Type V. + Final N loop carried dependency values then K scan_outputs. Scan outputs + must be Tensors. + +Notes +===== +Signature: ``ai.onnx@21::Loop``. + +Type constraints: + - I: `tensor(int64)` + - B: `tensor(bool)` + - V: `optional(seq(tensor(bfloat16)))`, `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bfloat16))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(float8e4m3fn))`, `optional(tensor(float8e4m3fnuz))`, `optional(tensor(float8e5m2))`, `optional(tensor(float8e5m2fnuz))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int4))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint4))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bfloat16))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(float8e4m3fn))`, `seq(tensor(float8e4m3fnuz))`, `seq(tensor(float8e5m2))`, `seq(tensor(float8e5m2fnuz))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int4))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint4))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ _body_subgraph: Graph = subgraph( - typing_cast(list[Type], [Tensor(np.int64, (1,)), Tensor(np.bool_, (1,))]) - + [var.unwrap_type() for var in v_initial], - body, - ) - return ( - _Loop( - _Loop.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - ), - _Loop.Inputs( - M=unwrap_vars(M), - cond=unwrap_vars(cond), - v_initial=unwrap_vars(v_initial), - ), - out_variadic=len(_body_subgraph.requested_results) - 1, - ) - .get_output_vars( - M=get_value(M), - cond=get_value(cond), - v_initial=get_value(v_initial), - ) - .v_final_and_scan_outputs + typing_cast(list[Type], [Tensor(np.int64, (1,)), Tensor(np.bool_, (1,))])+ [var.unwrap_type() for var in v_initial], + body ) + return _Loop( + _Loop.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + ), _Loop.Inputs( + M=unwrap_vars(M), cond=unwrap_vars(cond), v_initial=unwrap_vars(v_initial), ), out_variadic=len(_body_subgraph.requested_results) - 1, ).get_output_vars( + M=get_value(M), cond=get_value(cond), v_initial=get_value(v_initial), ).v_final_and_scan_outputs -def pad( - data: Var, - pads: Var, - constant_value: Optional[Var] = None, - axes: Optional[Var] = None, - *, - mode: str = "constant", -) -> Var: +def pad(data: Var, pads: Var, constant_value: Optional[Var] = None, axes: Optional[Var] = None, *, mode: str = "constant", ) -> Var: r""" - Given a tensor containing the data to be padded (``data``), a tensor - containing the number of start and end pad values for axis (``pads``), - (optionally) a ``mode``, and (optionally) ``constant_value``, a padded - tensor (``output``) is generated. +Given a tensor containing the data to be padded (``data``), a tensor +containing the number of start and end pad values for axis (``pads``), +(optionally) a ``mode``, and (optionally) ``constant_value``, a padded +tensor (``output``) is generated. - The three supported ``modes`` are (similar to corresponding modes - supported by ``numpy.pad``): +The three supported ``modes`` are (similar to corresponding modes +supported by ``numpy.pad``): - 1) ``constant``\ (default) - pads with a given constant value as - specified by ``constant_value`` (which defaults to 0, empty string, - or False) +1) ``constant``\ (default) - pads with a given constant value as + specified by ``constant_value`` (which defaults to 0, empty string, + or False) - 2) ``reflect`` - pads with the reflection of the vector mirrored on the - first and last values of the vector along each axis +2) ``reflect`` - pads with the reflection of the vector mirrored on the + first and last values of the vector along each axis - 3) ``edge`` - pads with the edge values of array +3) ``edge`` - pads with the edge values of array - 4) ``wrap`` - wrap-around padding as if the data tensor forms a torus +4) ``wrap`` - wrap-around padding as if the data tensor forms a torus - Example 1 (``constant`` mode): +Example 1 (``constant`` mode): - Insert 0 pads to the beginning of the second dimension. +Insert 0 pads to the beginning of the second dimension. - :: +:: - data = [ - [1.0, 1.2], - [2.3, 3.4], - [4.5, 5.7], - ] + data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] - pads = [0, 2, 0, 0] - - mode = 'constant' - - constant_value = 0.0 - - output = [ - [0.0, 0.0, 1.0, 1.2], - [0.0, 0.0, 2.3, 3.4], - [0.0, 0.0, 4.5, 5.7], - ] - - Example 2 (``reflect`` mode): - - :: - - data = [ - [1.0, 1.2], - [2.3, 3.4], - [4.5, 5.7], - ] - - pads = [0, 2, 0, 0] - - mode = 'reflect' - - output = [ - [1.0, 1.2, 1.0, 1.2], - [2.3, 3.4, 2.3, 3.4], - [4.5, 5.7, 4.5, 5.7], - ] - - Example 3 (``edge`` mode): - - :: - - data = [ - [1.0, 1.2], - [2.3, 3.4], - [4.5, 5.7], - ] - - pads = [0, 2, 0, 0] - - mode = 'edge' - - output = [ - [1.0, 1.0, 1.0, 1.2], - [2.3, 2.3, 2.3, 3.4], - [4.5, 4.5, 4.5, 5.7], - ] - - Example 4 (``wrap`` mode): - - :: - - data = [ - [1.0, 1.2], - [2.3, 3.4], - [4.5, 5.7], - ] - - pads = [2, 1, 1, 1] - - mode = 'wrap' - - output = [ - [3.4, 2.3, 3.4, 2.3], - [5.7, 4.5, 5.7, 4.5], - [1.2, 1.0, 1.2, 1.0], - [3.4, 2.3, 3.4, 2.3], - [5.7, 4.5, 5.7, 4.5], - [1.2, 1.0, 1.2, 1.0], - ] - - Parameters - ========== - data - Type T. - Input tensor. - pads - Type tensor(int64). - Tensor of integers indicating the number of padding elements to add or - remove (if negative) at the beginning and end of each axis. For 2D input - tensor, it is the number of pixels. ``pads`` should be a 1D tensor of - shape [2 \* num_axes] where ``num_axes`` refers to the number of - elements in the ``axes`` input or the input rank if ``axes`` are not - provided explicitly. ``pads`` format should be: [x1_begin, x2_begin, - ..., x1_end, x2_end,...], where xi_begin is the number of pad values - added at the beginning of axis ``axes[i]`` and xi_end, the number of pad - values added at the end of axis ``axes[i]``. - constant_value - Type T. - (Optional) A scalar value to be used if the mode chosen is ``constant`` - (by default it is 0, empty string or False). - axes - Type Tind. - 1-D tensor of axes that ``pads`` apply to. Negative value means counting - dimensions from the back. Accepted range is [-r, r-1] where r = - rank(data). Behavior is undefined if an axis is repeated. If not - provided, all axes are assumed (``[0, 1, ..., input_rank-1]``). - mode - Attribute. - Supported modes: ``constant``\ (default), ``reflect``, ``edge``, - ``wrap`` - - Returns - ======= - output : Var - Type T. - Tensor after padding. - - Notes - ===== - Signature: ``ai.onnx@21::Pad``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` - - Tind: `tensor(int32)`, `tensor(int64)` + pads = [0, 2, 0, 0] + + mode = 'constant' + + constant_value = 0.0 + + output = [ + [0.0, 0.0, 1.0, 1.2], + [0.0, 0.0, 2.3, 3.4], + [0.0, 0.0, 4.5, 5.7], + ] + +Example 2 (``reflect`` mode): + +:: + + data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + + pads = [0, 2, 0, 0] + + mode = 'reflect' + + output = [ + [1.0, 1.2, 1.0, 1.2], + [2.3, 3.4, 2.3, 3.4], + [4.5, 5.7, 4.5, 5.7], + ] + +Example 3 (``edge`` mode): + +:: + + data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + + pads = [0, 2, 0, 0] + + mode = 'edge' + + output = [ + [1.0, 1.0, 1.0, 1.2], + [2.3, 2.3, 2.3, 3.4], + [4.5, 4.5, 4.5, 5.7], + ] + +Example 4 (``wrap`` mode): + +:: + + data = [ + [1.0, 1.2], + [2.3, 3.4], + [4.5, 5.7], + ] + + pads = [2, 1, 1, 1] + + mode = 'wrap' + + output = [ + [3.4, 2.3, 3.4, 2.3], + [5.7, 4.5, 5.7, 4.5], + [1.2, 1.0, 1.2, 1.0], + [3.4, 2.3, 3.4, 2.3], + [5.7, 4.5, 5.7, 4.5], + [1.2, 1.0, 1.2, 1.0], + ] + +Parameters +========== +data + Type T. + Input tensor. +pads + Type tensor(int64). + Tensor of integers indicating the number of padding elements to add or + remove (if negative) at the beginning and end of each axis. For 2D input + tensor, it is the number of pixels. ``pads`` should be a 1D tensor of + shape [2 \* num_axes] where ``num_axes`` refers to the number of + elements in the ``axes`` input or the input rank if ``axes`` are not + provided explicitly. ``pads`` format should be: [x1_begin, x2_begin, + ..., x1_end, x2_end,...], where xi_begin is the number of pad values + added at the beginning of axis ``axes[i]`` and xi_end, the number of pad + values added at the end of axis ``axes[i]``. +constant_value + Type T. + (Optional) A scalar value to be used if the mode chosen is ``constant`` + (by default it is 0, empty string or False). +axes + Type Tind. + 1-D tensor of axes that ``pads`` apply to. Negative value means counting + dimensions from the back. Accepted range is [-r, r-1] where r = + rank(data). Behavior is undefined if an axis is repeated. If not + provided, all axes are assumed (``[0, 1, ..., input_rank-1]``). +mode + Attribute. + Supported modes: ``constant``\ (default), ``reflect``, ``edge``, + ``wrap`` + +Returns +======= +output : Var + Type T. + Tensor after padding. + +Notes +===== +Signature: ``ai.onnx@21::Pad``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` + - Tind: `tensor(int32)`, `tensor(int64)` """ - return ( - _Pad( - _Pad.Attributes( - mode=AttrString(mode, name="mode"), - ), - _Pad.Inputs( - data=unwrap_vars(data), - pads=unwrap_vars(pads), - constant_value=unwrap_vars(constant_value), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - pads=get_value(pads), - constant_value=get_value(constant_value), - axes=get_value(axes), - ) - .output - ) + return _Pad( + _Pad.Attributes( + mode=AttrString(mode, name="mode"), + ), _Pad.Inputs( + data=unwrap_vars(data), pads=unwrap_vars(pads), constant_value=unwrap_vars(constant_value), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), pads=get_value(pads), constant_value=get_value(constant_value), axes=get_value(axes), ).output -def qlinear_matmul( - a: Var, - a_scale: Var, - a_zero_point: Var, - b: Var, - b_scale: Var, - b_zero_point: Var, - y_scale: Var, - y_zero_point: Var, -) -> Var: +def qlinear_matmul(a: Var, a_scale: Var, a_zero_point: Var, b: Var, b_scale: Var, b_zero_point: Var, y_scale: Var, y_zero_point: Var, ) -> Var: r""" - Matrix product that behaves like numpy.matmul: - https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. - It consumes two quantized input tensors, their scales and zero points, - scale and zero point of output, and computes the quantized output. The - quantization formula is y = saturate((x / y_scale) + y_zero_point). For - (x / y_scale), it is rounding to nearest ties to even. Refer to - https://en.wikipedia.org/wiki/Rounding for details. Scale and zero point - must have same shape. They must be either scalar (per tensor) or N-D - tensor (per row for 'a' and per column for 'b'). Scalar refers to per - tensor quantization whereas N-D refers to per row or per column - quantization. If the input is 2D of shape [M, K] then zero point and - scale tensor may be an M element vector [v_1, v_2, ..., v_M] for per row - quantization and K element vector of shape [v_1, v_2, ..., v_K] for per - column quantization. If the input is N-D tensor with shape [D1, D2, M, - K] then zero point and scale tensor may have shape [D1, D2, M, 1] for - per row quantization and shape [D1, D2, 1, K] for per column - quantization. Production must never overflow, and accumulation may - overflow if and only if in 32 bits. - - Parameters - ========== - a - Type T1. - N-dimensional quantized matrix a - a_scale - Type TS. - scale of quantized input a - a_zero_point - Type T1. - zero point of quantized input a - b - Type T2. - N-dimensional quantized matrix b - b_scale - Type TS. - scale of quantized input b - b_zero_point - Type T2. - zero point of quantized input b - y_scale - Type TS. - scale of quantized output y - y_zero_point - Type T3. - zero point of quantized output y - - Returns - ======= - y : Var - Type T3. - Quantized matrix multiply results from a \* b - - Notes - ===== - Signature: ``ai.onnx@21::QLinearMatMul``. - - Type constraints: - - T1: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int8)`, `tensor(uint8)` - - TS: `tensor(bfloat16)`, `tensor(float)`, `tensor(float16)` - - T2: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int8)`, `tensor(uint8)` - - T3: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int8)`, `tensor(uint8)` +Matrix product that behaves like numpy.matmul: +https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. +It consumes two quantized input tensors, their scales and zero points, +scale and zero point of output, and computes the quantized output. The +quantization formula is y = saturate((x / y_scale) + y_zero_point). For +(x / y_scale), it is rounding to nearest ties to even. Refer to +https://en.wikipedia.org/wiki/Rounding for details. Scale and zero point +must have same shape. They must be either scalar (per tensor) or N-D +tensor (per row for 'a' and per column for 'b'). Scalar refers to per +tensor quantization whereas N-D refers to per row or per column +quantization. If the input is 2D of shape [M, K] then zero point and +scale tensor may be an M element vector [v_1, v_2, ..., v_M] for per row +quantization and K element vector of shape [v_1, v_2, ..., v_K] for per +column quantization. If the input is N-D tensor with shape [D1, D2, M, +K] then zero point and scale tensor may have shape [D1, D2, M, 1] for +per row quantization and shape [D1, D2, 1, K] for per column +quantization. Production must never overflow, and accumulation may +overflow if and only if in 32 bits. + +Parameters +========== +a + Type T1. + N-dimensional quantized matrix a +a_scale + Type TS. + scale of quantized input a +a_zero_point + Type T1. + zero point of quantized input a +b + Type T2. + N-dimensional quantized matrix b +b_scale + Type TS. + scale of quantized input b +b_zero_point + Type T2. + zero point of quantized input b +y_scale + Type TS. + scale of quantized output y +y_zero_point + Type T3. + zero point of quantized output y + +Returns +======= +y : Var + Type T3. + Quantized matrix multiply results from a \* b + +Notes +===== +Signature: ``ai.onnx@21::QLinearMatMul``. + +Type constraints: + - T1: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int8)`, `tensor(uint8)` + - TS: `tensor(bfloat16)`, `tensor(float)`, `tensor(float16)` + - T2: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int8)`, `tensor(uint8)` + - T3: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int8)`, `tensor(uint8)` """ - return ( - _QLinearMatMul( - _QLinearMatMul.Attributes(), - _QLinearMatMul.Inputs( - a=unwrap_vars(a), - a_scale=unwrap_vars(a_scale), - a_zero_point=unwrap_vars(a_zero_point), - b=unwrap_vars(b), - b_scale=unwrap_vars(b_scale), - b_zero_point=unwrap_vars(b_zero_point), - y_scale=unwrap_vars(y_scale), - y_zero_point=unwrap_vars(y_zero_point), - ), - ) - .get_output_vars( - a=get_value(a), - a_scale=get_value(a_scale), - a_zero_point=get_value(a_zero_point), - b=get_value(b), - b_scale=get_value(b_scale), - b_zero_point=get_value(b_zero_point), - y_scale=get_value(y_scale), - y_zero_point=get_value(y_zero_point), - ) - .y - ) + return _QLinearMatMul( + _QLinearMatMul.Attributes( + ), _QLinearMatMul.Inputs( + a=unwrap_vars(a), a_scale=unwrap_vars(a_scale), a_zero_point=unwrap_vars(a_zero_point), b=unwrap_vars(b), b_scale=unwrap_vars(b_scale), b_zero_point=unwrap_vars(b_zero_point), y_scale=unwrap_vars(y_scale), y_zero_point=unwrap_vars(y_zero_point), ), ).get_output_vars( + a=get_value(a), a_scale=get_value(a_scale), a_zero_point=get_value(a_zero_point), b=get_value(b), b_scale=get_value(b_scale), b_zero_point=get_value(b_zero_point), y_scale=get_value(y_scale), y_zero_point=get_value(y_zero_point), ).y -def quantize_linear( - x: Var, - y_scale: Var, - y_zero_point: Optional[Var] = None, - *, - axis: int = 1, - block_size: int = 0, - output_dtype: int = 0, - saturate: int = 1, -) -> Var: +def quantize_linear(x: Var, y_scale: Var, y_zero_point: Optional[Var] = None, *, axis: int = 1, block_size: int = 0, output_dtype: int = 0, saturate: int = 1, ) -> Var: r""" - The linear quantization operator consumes a high-precision tensor, a - scale, and a zero point to compute the low-precision/quantized tensor. - The scale factor and zero point must have the same shape, determining - the quantization granularity. The quantization formula is - ``y = saturate((x / y_scale) + y_zero_point)``. - - Saturation is done according to: - - - uint16: [0, 65535] - - int16: [-32768, 32767] - - uint8: [0, 255] - - int8: [-128, 127] - - uint4: [0, 15] - - int4: [-8, 7] - - For ``(x / y_scale)``, it rounds to the nearest even. Refer to - https://en.wikipedia.org/wiki/Rounding for details. - - ``y_zero_point`` and ``y`` must have the same type. ``y_zero_point`` is - usually not used for quantization to float8 types, but the quantization - formula remains the same for consistency, and the type of the attribute - ``y_zero_point`` still determines the quantization type. - - There are three supported quantization granularities, determined by the - shape of ``y_scale``. In all cases, ``y_zero_point`` must have the same - shape as ``y_scale``. - - - Per-tensor (per-layer) quantization: ``y_scale`` is a scalar. - - Per-axis quantization: The scale must be a 1-D tensor, with the - length of the quantization axis. For an input shape - ``(D0, ..., Di, ..., Dn)`` and ``axis=i``, ``y_scale`` is a 1-D - tensor of length ``Di``. - - Blocked quantization: The scale's shape is identical to the input's - shape, except for one dimension, in which blocking is performed. - Given ``x`` shape ``(D0, ..., Di, ..., Dn)``, ``axis=i``, and block - size ``B``: ``y_scale`` shape is ``(D0, ..., ceil(Di/B), ..., Dn)``. - - Parameters - ========== - x - Type T1. - N-D full precision Input tensor to be quantized. - y_scale - Type T1. - Scale for doing quantization to get ``y``. For per-tensor/layer - quantization the scale is a scalar, for per-axis quantization it is a - 1-D Tensor and for blocked quantization it has the same shape as the - input, except for one dimension in which blocking is performed. - y_zero_point - Type T2. - Zero point for doing quantization to get ``y``. Shape must match - ``y_scale``.Default is uint8 with zero point of 0 if it's not specified. - axis - Attribute. - (Optional) The axis of the dequantizing dimension of the input tensor. - Used for per-axis and blocked quantization. Negative value means - counting dimensions from the back. Accepted range is ``[-r, r-1]`` where - ``r = rank(input)``. - block_size - Attribute. - (Optional) The size of the quantization block (number of times every - scale is replicated). Used only for blocked quantization. The block size - is a positive integer. Given ``x`` shape ``(D0, ..., Di, ..., Dn)``, - ``y_scale`` shape ``(S0, ... Si, ...Sn)`` and ``axis=i``, the accepted - range is ``[ceil(Di/Si), ceil(Di/(Si-1))-1]`` - output_dtype - Attribute. - (Optional) The output data type. If not supplied, the output data type - is inferred from ``y_zero_point`` data type (``T2``). If neither - ``output_dtype`` nor ``y_zero_point`` are supplied, output data type is - uint8. If both ``output_dtype`` and ``y_zero_point`` are specified, - ``output_dtype`` must be ``T2``. - saturate - Attribute. - The parameter defines how the conversion behaves if an input value is - out of range of the destination type. It only applies for float 8 - quantization (float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). - It is true by default. All cases are fully described in two tables - inserted in the operator description. - - Returns - ======= - y : Var - Type T2. - N-D quantized output tensor. It has same shape as input ``x``. - - Notes - ===== - Signature: ``ai.onnx@21::QuantizeLinear``. - - Type constraints: - - T1: `tensor(bfloat16)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)` - - T2: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int4)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint4)`, `tensor(uint8)` +The linear quantization operator consumes a high-precision tensor, a +scale, and a zero point to compute the low-precision/quantized tensor. +The scale factor and zero point must have the same shape, determining +the quantization granularity. The quantization formula is +``y = saturate((x / y_scale) + y_zero_point)``. + +Saturation is done according to: + +- uint16: [0, 65535] +- int16: [-32768, 32767] +- uint8: [0, 255] +- int8: [-128, 127] +- uint4: [0, 15] +- int4: [-8, 7] + +For ``(x / y_scale)``, it rounds to the nearest even. Refer to +https://en.wikipedia.org/wiki/Rounding for details. + +``y_zero_point`` and ``y`` must have the same type. ``y_zero_point`` is +usually not used for quantization to float8 types, but the quantization +formula remains the same for consistency, and the type of the attribute +``y_zero_point`` still determines the quantization type. + +There are three supported quantization granularities, determined by the +shape of ``y_scale``. In all cases, ``y_zero_point`` must have the same +shape as ``y_scale``. + +- Per-tensor (per-layer) quantization: ``y_scale`` is a scalar. +- Per-axis quantization: The scale must be a 1-D tensor, with the + length of the quantization axis. For an input shape + ``(D0, ..., Di, ..., Dn)`` and ``axis=i``, ``y_scale`` is a 1-D + tensor of length ``Di``. +- Blocked quantization: The scale's shape is identical to the input's + shape, except for one dimension, in which blocking is performed. + Given ``x`` shape ``(D0, ..., Di, ..., Dn)``, ``axis=i``, and block + size ``B``: ``y_scale`` shape is ``(D0, ..., ceil(Di/B), ..., Dn)``. + +Parameters +========== +x + Type T1. + N-D full precision Input tensor to be quantized. +y_scale + Type T1. + Scale for doing quantization to get ``y``. For per-tensor/layer + quantization the scale is a scalar, for per-axis quantization it is a + 1-D Tensor and for blocked quantization it has the same shape as the + input, except for one dimension in which blocking is performed. +y_zero_point + Type T2. + Zero point for doing quantization to get ``y``. Shape must match + ``y_scale``.Default is uint8 with zero point of 0 if it's not specified. +axis + Attribute. + (Optional) The axis of the dequantizing dimension of the input tensor. + Used for per-axis and blocked quantization. Negative value means + counting dimensions from the back. Accepted range is ``[-r, r-1]`` where + ``r = rank(input)``. +block_size + Attribute. + (Optional) The size of the quantization block (number of times every + scale is replicated). Used only for blocked quantization. The block size + is a positive integer. Given ``x`` shape ``(D0, ..., Di, ..., Dn)``, + ``y_scale`` shape ``(S0, ... Si, ...Sn)`` and ``axis=i``, the accepted + range is ``[ceil(Di/Si), ceil(Di/(Si-1))-1]`` +output_dtype + Attribute. + (Optional) The output data type. If not supplied, the output data type + is inferred from ``y_zero_point`` data type (``T2``). If neither + ``output_dtype`` nor ``y_zero_point`` are supplied, output data type is + uint8. If both ``output_dtype`` and ``y_zero_point`` are specified, + ``output_dtype`` must be ``T2``. +saturate + Attribute. + The parameter defines how the conversion behaves if an input value is + out of range of the destination type. It only applies for float 8 + quantization (float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz). + It is true by default. All cases are fully described in two tables + inserted in the operator description. + +Returns +======= +y : Var + Type T2. + N-D quantized output tensor. It has same shape as input ``x``. + +Notes +===== +Signature: ``ai.onnx@21::QuantizeLinear``. + +Type constraints: + - T1: `tensor(bfloat16)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)` + - T2: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int4)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint4)`, `tensor(uint8)` """ - return ( - _QuantizeLinear( - _QuantizeLinear.Attributes( - axis=AttrInt64(axis, name="axis"), - block_size=AttrInt64(block_size, name="block_size"), - output_dtype=AttrInt64(output_dtype, name="output_dtype"), - saturate=AttrInt64(saturate, name="saturate"), - ), - _QuantizeLinear.Inputs( - x=unwrap_vars(x), - y_scale=unwrap_vars(y_scale), - y_zero_point=unwrap_vars(y_zero_point), - ), - ) - .get_output_vars( - x=get_value(x), - y_scale=get_value(y_scale), - y_zero_point=get_value(y_zero_point), - ) - .y - ) - - -def reshape( - data: Var, - shape: Var, - *, - allowzero: int = 0, -) -> Var: + return _QuantizeLinear( + _QuantizeLinear.Attributes( + axis=AttrInt64(axis, name="axis"), + block_size=AttrInt64(block_size, name="block_size"), + output_dtype=AttrInt64(output_dtype, name="output_dtype"), + saturate=AttrInt64(saturate, name="saturate"), + ), _QuantizeLinear.Inputs( + x=unwrap_vars(x), y_scale=unwrap_vars(y_scale), y_zero_point=unwrap_vars(y_zero_point), ), ).get_output_vars( + x=get_value(x), y_scale=get_value(y_scale), y_zero_point=get_value(y_zero_point), ).y + + +def reshape(data: Var, shape: Var, *, allowzero: int = 0, ) -> Var: r""" - Reshape the input tensor similar to numpy.reshape. First input is the - data tensor, second input is a shape tensor which specifies the output - shape. It outputs the reshaped tensor. At most one dimension of the new - shape can be -1. In this case, the value is inferred from the size of - the tensor and the remaining dimensions. A dimension could also be 0, in - which case the actual dimension value is unchanged (i.e. taken from the - input tensor). If 'allowzero' is set, and the new shape includes 0, the - dimension will be set explicitly to zero (i.e. not taken from input - tensor). Shape (second input) could be an empty shape, which means - converting to a scalar. The input tensor's shape and the output tensor's - shape are required to have the same number of elements. - - If the attribute 'allowzero' is set, it is invalid for the specified - shape to contain both a zero value and -1, as the value of the dimension - corresponding to -1 cannot be determined uniquely. - - Parameters - ========== - data - Type T. - An input tensor. - shape - Type tensor(int64). - Specified shape for output. - allowzero - Attribute. - (Optional) By default, when any value in the 'shape' input is equal to - zero the corresponding dimension value is copied from the input tensor - dynamically. allowzero=1 indicates that if any value in the 'shape' - input is set to zero, the zero value is honored, similar to NumPy. - - Returns - ======= - reshaped : Var - Type T. - Reshaped data. - - Notes - ===== - Signature: ``ai.onnx@21::Reshape``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` +Reshape the input tensor similar to numpy.reshape. First input is the +data tensor, second input is a shape tensor which specifies the output +shape. It outputs the reshaped tensor. At most one dimension of the new +shape can be -1. In this case, the value is inferred from the size of +the tensor and the remaining dimensions. A dimension could also be 0, in +which case the actual dimension value is unchanged (i.e. taken from the +input tensor). If 'allowzero' is set, and the new shape includes 0, the +dimension will be set explicitly to zero (i.e. not taken from input +tensor). Shape (second input) could be an empty shape, which means +converting to a scalar. The input tensor's shape and the output tensor's +shape are required to have the same number of elements. + +If the attribute 'allowzero' is set, it is invalid for the specified +shape to contain both a zero value and -1, as the value of the dimension +corresponding to -1 cannot be determined uniquely. + +Parameters +========== +data + Type T. + An input tensor. +shape + Type tensor(int64). + Specified shape for output. +allowzero + Attribute. + (Optional) By default, when any value in the 'shape' input is equal to + zero the corresponding dimension value is copied from the input tensor + dynamically. allowzero=1 indicates that if any value in the 'shape' + input is set to zero, the zero value is honored, similar to NumPy. + +Returns +======= +reshaped : Var + Type T. + Reshaped data. + +Notes +===== +Signature: ``ai.onnx@21::Reshape``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _Reshape( - _Reshape.Attributes( - allowzero=AttrInt64(allowzero, name="allowzero"), - ), - _Reshape.Inputs( - data=unwrap_vars(data), - shape=unwrap_vars(shape), - ), - ) - .get_output_vars( - data=get_value(data), - shape=get_value(shape), - ) - .reshaped - ) + return _Reshape( + _Reshape.Attributes( + allowzero=AttrInt64(allowzero, name="allowzero"), + ), _Reshape.Inputs( + data=unwrap_vars(data), shape=unwrap_vars(shape), ), ).get_output_vars( + data=get_value(data), shape=get_value(shape), ).reshaped -def scan( - initial_state_and_scan_inputs: Sequence[Var], - *, - body: Callable[..., Iterable[Var]], - num_scan_inputs: int, - scan_input_axes: Optional[Iterable[int]] = None, - scan_input_directions: Optional[Iterable[int]] = None, - scan_output_axes: Optional[Iterable[int]] = None, - scan_output_directions: Optional[Iterable[int]] = None, -) -> Sequence[Var]: +def scan(initial_state_and_scan_inputs: Sequence[Var], *, body: Callable[..., Iterable[Var]], num_scan_inputs: int, scan_input_axes: Optional[Iterable[int]] = None, scan_input_directions: Optional[Iterable[int]] = None, scan_output_axes: Optional[Iterable[int]] = None, scan_output_directions: Optional[Iterable[int]] = None, ) -> Sequence[Var]: r""" - Scan can be used to iterate over one or more scan_input tensors, - constructing zero or more scan_output tensors. It combines ideas from - general recurrences, functional programming constructs such as scan, - fold, map, and zip, and is intended to enable generalizations of - RNN-like constructs for sequence-to-sequence processing. Other tensors - (referred to as state_variables here) can be used to carry a state when - iterating from one element to another (similar to hidden-state in RNNs, - also referred to as loop-carried dependences in the context of loops). - Many common usages involve a single scan_input tensor (where - functionality similar to scan, fold and map can be obtained). When more - than one scan_input is used, a behavior similar to zip is obtained. - - The attribute body must be a graph, specifying the computation to be - performed in every iteration. It takes as input the current values of - the state_variables and the current iterated element of the scan_inputs. - It must return the (updated) values of the state_variables and zero or - more scan_output_element tensors. The values of the scan_output_element - tensors are concatenated over all the iterations to produce the - scan_output values of the scan construct (similar to the concatenated - intermediate hidden-state values of RNN-like constructs). All the output - tensors (state_variables as well as scan_output_element tensors) are - required to have the same shape in each iteration of the loop (a - restriction imposed to enable efficient memory allocation). - - Note that the iterated element passed to the body subgraph does not have - a sequence axis. It will have a rank one less than the rank of the - corresponding scan_input. - - The scan operation returns the final values of the state_variables as - well as the scan_outputs. - - The optional attribute scan_input_directions specifies the direction - (forward or backward) for each scan input. If this attribute is omitted, - all sequences are scanned in the forward direction. A bidirectional scan - may be performed by specifying the same tensor input twice in the - scan_inputs, once with a forward direction, and once with a backward - direction. - - The scan_output of the operation is produced by concatenating the - scan_output_element values produced by the body in each iteration. The - optional attribute scan_output_directions specifies the direction in - which scan_output is constructed (by appending or prepending the - scan_output_element to scan_output in each iteration) for each - scan_output. If this attribute is omitted, the scan_output_element is - appended to the scan_output in each iteration. - - The optional attribute scan_input_axes specifies the axis to be scanned - for each scan_input. If omitted, every scan_input will be scanned in - axis 0. For example, if axis 0 is the batch axis and axis 1 is the time - axis (to be scanned), specify an axis value of 1. Note that scanning a - non-zero axis may be less efficient than scanning axis zero. - - The optional attribute scan_output_axes specifies the axis along which - the scan_outputs are accumulated for each scan_output. For example, if - axis 1 is the time axis (to be scanned) for both inputs and outputs, - specify a scan_input axis and scan_output axis value of 1. - - Note that because of the ONNX restriction that only the last parameter - of an operator can be variadic, the initial-states and scan-inputs are - listed together as one input parameter. Similarly, the final-states and - scan-outputs are listed together as one output parameter. The attribute - num_scan_inputs indicates the number M of scan-inputs. - - The behavior of - - :: - - Scan < - num_scan_inputs = m, - body = loop-body, - scan_input_axes = [axis_1, ..., axis_m] - > (init_1, ..., init_n, scan_1, ..., scan_m) - - is equivalent to the following pseudo-code: - - :: - - // scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i - // scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j. - sequence_length = scan_1.shape[axis_1]; - - // initialize state-variables - st_1 = init_1; ... st_n = init_n; - // initialize scan-output variables: [] denotes an empty tensor - scan_out_1 = []; ...; scan_out_k = []; - // identify number of iterations: - - // execute loop - for (int t = 0; t < sequence_length; ++t) { - // generate the scan-input elements: the notation T[t] indicates the sub-tensor - // of rank one less than T obtained by indexing T at position t along axis k. - si_1 = scan_1[t]; - ... ; - si_m = scan_m[t]; - // execute loop-body - st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m) - // accumulate the scan-output elements - scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k); - } - - return st_1, ..., st_n, scan_out_1, ..., scan_out_k; - - *Sample usage: Encoding RNN using a Scan* - - The following example shows how a simple RNN over an input tensor %X, - with weight tensor %Wi, recurrence weight tensor %Ri, bias tensors %Wbi - and %Rbi, and initial hidden-state %H_0 can be encoded as a ScanLoop. - Note that the loop-body is a nested graph, and it directly computes %Wi, - %Ri, %Wbi, and %Rbi (typically constants or initializers in the body - graph). If these values are computed in the outer graph, they need to be - passed in as extra state_variables. - - :: - - graph rnn-encoding { - %H_0 = ... - %X = ... - %Y_h, %Y = Scan[body = , num_scan_inputs=1](%H_0, %X) - return %Y, %Y_h - } - - graph rnn-cell-1 ( - %H_tminus1[FLOAT, tensor] - %X_t[FLOAT, tensor] - ) { - %Wi = ... - %Ri = ... - %Wbi = ... - %Rbi = ... - %t1 = X_t * (Wi^T) - %t2 = H_tminus1*(Ri^T) - %t3 = Add(%t1, %t2) - %t4 = Add(%t3, %Wbi) - %t5 = Add(%t4, %Rbi) - %Ht = Tanh(%t5) - %Accumulate = Identity(%Ht) - return %Ht, %Accumulate - } - - Parameters - ========== - initial_state_and_scan_inputs - Type V. - Initial values of the loop's N state variables followed by M scan_inputs - body - Attribute. - The graph run each iteration. It has N+M inputs: (loop state - variables..., scan_input_elts...). It has N+K outputs: (loop state - variables..., scan_output_elts...). Each scan_output is created by - concatenating the value of the specified scan_output_elt value at the - end of each iteration of the loop. It is an error if the dimensions of - these values change across loop iterations. - num_scan_inputs - Attribute. - An attribute specifying the number of scan_inputs M. - scan_input_axes - Attribute. - An optional list of M flags. The i-th element of the list specifies the - axis to be scanned (the sequence axis) for the i-th scan_input. If - omitted, 0 will be used as the scan axis for every scan_input. Negative - value for an axis means counting dimensions from the back. Accepted - range is [-r, r-1] where r = rank(input). - scan_input_directions - Attribute. - An optional list of M flags. The i-th element of the list specifies the - direction to be scanned for the i-th scan_input tensor: 0 indicates - forward direction and 1 indicates reverse direction. If omitted, all - scan_input tensors will be scanned in the forward direction. - scan_output_axes - Attribute. - An optional list of K flags. The i-th element of the list specifies the - axis for the i-th scan_output. The scan outputs are accumulated along - the specified axis. If omitted, 0 will be used as the scan axis for - every scan_output. Negative value for an axis means counting dimensions - from the back. Accepted range is [-r, r-1]. - scan_output_directions - Attribute. - An optional list of K flags, one for each scan_output. The i-th element - of the list specifies whether the i-th scan_output should be constructed - by appending or prepending a new value in each iteration: 0 indicates - appending and 1 indicates prepending. If omitted, all scan_output - tensors will be produced by appending a value in each iteration. - - Returns - ======= - final_state_and_scan_outputs : Sequence[Var] - Type V. - Final values of the loop's N state variables followed by K scan_outputs - - Notes - ===== - Signature: ``ai.onnx@21::Scan``. - - Type constraints: - - V: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` +Scan can be used to iterate over one or more scan_input tensors, +constructing zero or more scan_output tensors. It combines ideas from +general recurrences, functional programming constructs such as scan, +fold, map, and zip, and is intended to enable generalizations of +RNN-like constructs for sequence-to-sequence processing. Other tensors +(referred to as state_variables here) can be used to carry a state when +iterating from one element to another (similar to hidden-state in RNNs, +also referred to as loop-carried dependences in the context of loops). +Many common usages involve a single scan_input tensor (where +functionality similar to scan, fold and map can be obtained). When more +than one scan_input is used, a behavior similar to zip is obtained. + +The attribute body must be a graph, specifying the computation to be +performed in every iteration. It takes as input the current values of +the state_variables and the current iterated element of the scan_inputs. +It must return the (updated) values of the state_variables and zero or +more scan_output_element tensors. The values of the scan_output_element +tensors are concatenated over all the iterations to produce the +scan_output values of the scan construct (similar to the concatenated +intermediate hidden-state values of RNN-like constructs). All the output +tensors (state_variables as well as scan_output_element tensors) are +required to have the same shape in each iteration of the loop (a +restriction imposed to enable efficient memory allocation). + +Note that the iterated element passed to the body subgraph does not have +a sequence axis. It will have a rank one less than the rank of the +corresponding scan_input. + +The scan operation returns the final values of the state_variables as +well as the scan_outputs. + +The optional attribute scan_input_directions specifies the direction +(forward or backward) for each scan input. If this attribute is omitted, +all sequences are scanned in the forward direction. A bidirectional scan +may be performed by specifying the same tensor input twice in the +scan_inputs, once with a forward direction, and once with a backward +direction. + +The scan_output of the operation is produced by concatenating the +scan_output_element values produced by the body in each iteration. The +optional attribute scan_output_directions specifies the direction in +which scan_output is constructed (by appending or prepending the +scan_output_element to scan_output in each iteration) for each +scan_output. If this attribute is omitted, the scan_output_element is +appended to the scan_output in each iteration. + +The optional attribute scan_input_axes specifies the axis to be scanned +for each scan_input. If omitted, every scan_input will be scanned in +axis 0. For example, if axis 0 is the batch axis and axis 1 is the time +axis (to be scanned), specify an axis value of 1. Note that scanning a +non-zero axis may be less efficient than scanning axis zero. + +The optional attribute scan_output_axes specifies the axis along which +the scan_outputs are accumulated for each scan_output. For example, if +axis 1 is the time axis (to be scanned) for both inputs and outputs, +specify a scan_input axis and scan_output axis value of 1. + +Note that because of the ONNX restriction that only the last parameter +of an operator can be variadic, the initial-states and scan-inputs are +listed together as one input parameter. Similarly, the final-states and +scan-outputs are listed together as one output parameter. The attribute +num_scan_inputs indicates the number M of scan-inputs. + +The behavior of + +:: + + Scan < + num_scan_inputs = m, + body = loop-body, + scan_input_axes = [axis_1, ..., axis_m] + > (init_1, ..., init_n, scan_1, ..., scan_m) + +is equivalent to the following pseudo-code: + +:: + + // scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i + // scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j. + sequence_length = scan_1.shape[axis_1]; + + // initialize state-variables + st_1 = init_1; ... st_n = init_n; + // initialize scan-output variables: [] denotes an empty tensor + scan_out_1 = []; ...; scan_out_k = []; + // identify number of iterations: + + // execute loop + for (int t = 0; t < sequence_length; ++t) { + // generate the scan-input elements: the notation T[t] indicates the sub-tensor + // of rank one less than T obtained by indexing T at position t along axis k. + si_1 = scan_1[t]; + ... ; + si_m = scan_m[t]; + // execute loop-body + st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m) + // accumulate the scan-output elements + scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k); + } + + return st_1, ..., st_n, scan_out_1, ..., scan_out_k; + +*Sample usage: Encoding RNN using a Scan* + +The following example shows how a simple RNN over an input tensor %X, +with weight tensor %Wi, recurrence weight tensor %Ri, bias tensors %Wbi +and %Rbi, and initial hidden-state %H_0 can be encoded as a ScanLoop. +Note that the loop-body is a nested graph, and it directly computes %Wi, +%Ri, %Wbi, and %Rbi (typically constants or initializers in the body +graph). If these values are computed in the outer graph, they need to be +passed in as extra state_variables. + +:: + + graph rnn-encoding { + %H_0 = ... + %X = ... + %Y_h, %Y = Scan[body = , num_scan_inputs=1](%H_0, %X) + return %Y, %Y_h + } + + graph rnn-cell-1 ( + %H_tminus1[FLOAT, tensor] + %X_t[FLOAT, tensor] + ) { + %Wi = ... + %Ri = ... + %Wbi = ... + %Rbi = ... + %t1 = X_t * (Wi^T) + %t2 = H_tminus1*(Ri^T) + %t3 = Add(%t1, %t2) + %t4 = Add(%t3, %Wbi) + %t5 = Add(%t4, %Rbi) + %Ht = Tanh(%t5) + %Accumulate = Identity(%Ht) + return %Ht, %Accumulate + } + +Parameters +========== +initial_state_and_scan_inputs + Type V. + Initial values of the loop's N state variables followed by M scan_inputs +body + Attribute. + The graph run each iteration. It has N+M inputs: (loop state + variables..., scan_input_elts...). It has N+K outputs: (loop state + variables..., scan_output_elts...). Each scan_output is created by + concatenating the value of the specified scan_output_elt value at the + end of each iteration of the loop. It is an error if the dimensions of + these values change across loop iterations. +num_scan_inputs + Attribute. + An attribute specifying the number of scan_inputs M. +scan_input_axes + Attribute. + An optional list of M flags. The i-th element of the list specifies the + axis to be scanned (the sequence axis) for the i-th scan_input. If + omitted, 0 will be used as the scan axis for every scan_input. Negative + value for an axis means counting dimensions from the back. Accepted + range is [-r, r-1] where r = rank(input). +scan_input_directions + Attribute. + An optional list of M flags. The i-th element of the list specifies the + direction to be scanned for the i-th scan_input tensor: 0 indicates + forward direction and 1 indicates reverse direction. If omitted, all + scan_input tensors will be scanned in the forward direction. +scan_output_axes + Attribute. + An optional list of K flags. The i-th element of the list specifies the + axis for the i-th scan_output. The scan outputs are accumulated along + the specified axis. If omitted, 0 will be used as the scan axis for + every scan_output. Negative value for an axis means counting dimensions + from the back. Accepted range is [-r, r-1]. +scan_output_directions + Attribute. + An optional list of K flags, one for each scan_output. The i-th element + of the list specifies whether the i-th scan_output should be constructed + by appending or prepending a new value in each iteration: 0 indicates + appending and 1 indicates prepending. If omitted, all scan_output + tensors will be produced by appending a value in each iteration. + +Returns +======= +final_state_and_scan_outputs : Sequence[Var] + Type V. + Final values of the loop's N state variables followed by K scan_outputs + +Notes +===== +Signature: ``ai.onnx@21::Scan``. + +Type constraints: + - V: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ _body_subgraph: Graph = subgraph( - [ - Tensor( - var.unwrap_tensor().dtype, - (lambda x: x[1:] if x is not None else None)(var.unwrap_tensor().shape), - ) - for var in initial_state_and_scan_inputs[:num_scan_inputs] - ] - + [ - Tensor(var.unwrap_tensor().dtype) - for var in initial_state_and_scan_inputs[num_scan_inputs:] - ], - body, - ) - return ( - _Scan( - _Scan.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - num_scan_inputs=AttrInt64(num_scan_inputs, name="num_scan_inputs"), - scan_input_axes=AttrInt64s.maybe( - scan_input_axes, name="scan_input_axes" - ), - scan_input_directions=AttrInt64s.maybe( - scan_input_directions, name="scan_input_directions" - ), - scan_output_axes=AttrInt64s.maybe( - scan_output_axes, name="scan_output_axes" - ), - scan_output_directions=AttrInt64s.maybe( - scan_output_directions, name="scan_output_directions" - ), - ), - _Scan.Inputs( - initial_state_and_scan_inputs=unwrap_vars( - initial_state_and_scan_inputs - ), - ), - out_variadic=len(_body_subgraph.requested_results), - ) - .get_output_vars( - initial_state_and_scan_inputs=get_value(initial_state_and_scan_inputs), - ) - .final_state_and_scan_outputs + [Tensor(var.unwrap_tensor().dtype, (lambda x: x[1:] if x is not None else None)(var.unwrap_tensor().shape)) for var in initial_state_and_scan_inputs[:num_scan_inputs]] + [Tensor(var.unwrap_tensor().dtype) for var in initial_state_and_scan_inputs[num_scan_inputs:]], + body ) - - -def shape( - data: Var, - *, - end: Optional[int] = None, - start: int = 0, -) -> Var: + return _Scan( + _Scan.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + num_scan_inputs=AttrInt64(num_scan_inputs, name="num_scan_inputs"), + scan_input_axes=AttrInt64s.maybe(scan_input_axes, name="scan_input_axes"), + scan_input_directions=AttrInt64s.maybe(scan_input_directions, name="scan_input_directions"), + scan_output_axes=AttrInt64s.maybe(scan_output_axes, name="scan_output_axes"), + scan_output_directions=AttrInt64s.maybe(scan_output_directions, name="scan_output_directions"), + ), _Scan.Inputs( + initial_state_and_scan_inputs=unwrap_vars(initial_state_and_scan_inputs), ), out_variadic=len(_body_subgraph.requested_results), ).get_output_vars( + initial_state_and_scan_inputs=get_value(initial_state_and_scan_inputs), ).final_state_and_scan_outputs + + +def shape(data: Var, *, end: Optional[int] = None, start: int = 0, ) -> Var: r""" - Takes a tensor as input and outputs an 1D int64 tensor containing the - shape of the input tensor. Optional attributes start and end can be used - to compute a slice of the input tensor's shape. If start axis is - omitted, the slice starts from axis 0. The end axis, if specified, is - exclusive (and the returned value will not include the size of that - axis). If the end axis is omitted, the axes upto the last one will be - included. Negative axes indicate counting back from the last axis. Note - that axes will be clamped to the range [0, r-1], where r is the rank of - the input tensor if they are out-of-range (after adding r in the case of - negative axis). Thus, specifying any end value > r is equivalent to - specifying an end value of r, and specifying any start value < -r is - equivalent to specifying a start value of 0. - - Examples: - - :: - - Input tensor with shape: [2, 3, 4] - No attributes specified. - Output: [2, 3, 4] - - :: - - Input tensor with shape: [2, 3, 4] - start: -1 - Output: [4] - - :: - - Input tensor with shape: [2, 3, 4] - end: -1 - Output: [2, 3] - - :: - - Input tensor with shape: [2, 3, 4] - start: 1 - end: 2 - Output: [3] - - Parameters - ========== - data - Type T. - An input tensor. - end - Attribute. - (Optional) Ending axis for slicing the shape. Negative value means - counting dimensions from the back. If omitted, sizes of all axes upto - (including) the last one will be included. - start - Attribute. - (Optional) Starting axis for slicing the shape. Default value is - 0.Negative value means counting dimensions from the back. - - Returns - ======= - shape : Var - Type T1. - Shape of the input tensor - - Notes - ===== - Signature: ``ai.onnx@21::Shape``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` - - T1: `tensor(int64)` +Takes a tensor as input and outputs an 1D int64 tensor containing the +shape of the input tensor. Optional attributes start and end can be used +to compute a slice of the input tensor's shape. If start axis is +omitted, the slice starts from axis 0. The end axis, if specified, is +exclusive (and the returned value will not include the size of that +axis). If the end axis is omitted, the axes upto the last one will be +included. Negative axes indicate counting back from the last axis. Note +that axes will be clamped to the range [0, r-1], where r is the rank of +the input tensor if they are out-of-range (after adding r in the case of +negative axis). Thus, specifying any end value > r is equivalent to +specifying an end value of r, and specifying any start value < -r is +equivalent to specifying a start value of 0. + +Examples: + +:: + + Input tensor with shape: [2, 3, 4] + No attributes specified. + Output: [2, 3, 4] + +:: + + Input tensor with shape: [2, 3, 4] + start: -1 + Output: [4] + +:: + + Input tensor with shape: [2, 3, 4] + end: -1 + Output: [2, 3] + +:: + + Input tensor with shape: [2, 3, 4] + start: 1 + end: 2 + Output: [3] + +Parameters +========== +data + Type T. + An input tensor. +end + Attribute. + (Optional) Ending axis for slicing the shape. Negative value means + counting dimensions from the back. If omitted, sizes of all axes upto + (including) the last one will be included. +start + Attribute. + (Optional) Starting axis for slicing the shape. Default value is + 0.Negative value means counting dimensions from the back. + +Returns +======= +shape : Var + Type T1. + Shape of the input tensor + +Notes +===== +Signature: ``ai.onnx@21::Shape``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` + - T1: `tensor(int64)` """ - return ( - _Shape( - _Shape.Attributes( - end=AttrInt64.maybe(end, name="end"), - start=AttrInt64(start, name="start"), - ), - _Shape.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .shape - ) + return _Shape( + _Shape.Attributes( + end=AttrInt64.maybe(end, name="end"), + start=AttrInt64(start, name="start"), + ), _Shape.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).shape -def size( - data: Var, -) -> Var: +def size(data: Var, ) -> Var: r""" - Takes a tensor as input and outputs a int64 scalar that equals to the - total number of elements of the input tensor. - - Parameters - ========== - data - Type T. - An input tensor. - - Returns - ======= - size : Var - Type T1. - Total number of elements of the input tensor - - Notes - ===== - Signature: ``ai.onnx@21::Size``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` - - T1: `tensor(int64)` +Takes a tensor as input and outputs a int64 scalar that equals to the +total number of elements of the input tensor. + +Parameters +========== +data + Type T. + An input tensor. + +Returns +======= +size : Var + Type T1. + Total number of elements of the input tensor + +Notes +===== +Signature: ``ai.onnx@21::Size``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` + - T1: `tensor(int64)` """ - return ( - _Size( - _Size.Attributes(), - _Size.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .size - ) + return _Size( + _Size.Attributes( + ), _Size.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).size -def squeeze( - data: Var, - axes: Optional[Var] = None, -) -> Var: +def squeeze(data: Var, axes: Optional[Var] = None, ) -> Var: r""" - Remove single-dimensional entries from the shape of a tensor. Takes an - input ``axes`` with a list of axes to squeeze. If ``axes`` is not - provided, all the single dimensions will be removed from the shape. If - an axis is selected with shape entry not equal to one, an error is - raised. - - Parameters - ========== - data - Type T. - Tensors with at least max(dims) dimensions. - axes - Type tensor(int64). - List of integers indicating the dimensions to squeeze. Negative value - means counting dimensions from the back. Accepted range is [-r, r-1] - where r = rank(data). - - Returns - ======= - squeezed : Var - Type T. - Reshaped tensor with same data as input. - - Notes - ===== - Signature: ``ai.onnx@21::Squeeze``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` +Remove single-dimensional entries from the shape of a tensor. Takes an +input ``axes`` with a list of axes to squeeze. If ``axes`` is not +provided, all the single dimensions will be removed from the shape. If +an axis is selected with shape entry not equal to one, an error is +raised. + +Parameters +========== +data + Type T. + Tensors with at least max(dims) dimensions. +axes + Type tensor(int64). + List of integers indicating the dimensions to squeeze. Negative value + means counting dimensions from the back. Accepted range is [-r, r-1] + where r = rank(data). + +Returns +======= +squeezed : Var + Type T. + Reshaped tensor with same data as input. + +Notes +===== +Signature: ``ai.onnx@21::Squeeze``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _Squeeze( - _Squeeze.Attributes(), - _Squeeze.Inputs( - data=unwrap_vars(data), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - axes=get_value(axes), - ) - .squeezed - ) + return _Squeeze( + _Squeeze.Attributes( + ), _Squeeze.Inputs( + data=unwrap_vars(data), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), axes=get_value(axes), ).squeezed -def transpose( - data: Var, - *, - perm: Optional[Iterable[int]] = None, -) -> Var: +def transpose(data: Var, *, perm: Optional[Iterable[int]] = None, ) -> Var: r""" - Transpose the input tensor similar to numpy.transpose. For example, when - perm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output - shape will be (2, 1, 3). - - Parameters - ========== - data - Type T. - An input tensor. - perm - Attribute. - A list of integers. By default, reverse the dimensions, otherwise - permute the axes according to the values given. Its length must be equal - to the rank of the input. - - Returns - ======= - transposed : Var - Type T. - Transposed output. - - Notes - ===== - Signature: ``ai.onnx@21::Transpose``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` +Transpose the input tensor similar to numpy.transpose. For example, when +perm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output +shape will be (2, 1, 3). + +Parameters +========== +data + Type T. + An input tensor. +perm + Attribute. + A list of integers. By default, reverse the dimensions, otherwise + permute the axes according to the values given. Its length must be equal + to the rank of the input. + +Returns +======= +transposed : Var + Type T. + Transposed output. + +Notes +===== +Signature: ``ai.onnx@21::Transpose``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _Transpose( - _Transpose.Attributes( - perm=AttrInt64s.maybe(perm, name="perm"), - ), - _Transpose.Inputs( - data=unwrap_vars(data), - ), - ) - .get_output_vars( - data=get_value(data), - ) - .transposed - ) + return _Transpose( + _Transpose.Attributes( + perm=AttrInt64s.maybe(perm, name="perm"), + ), _Transpose.Inputs( + data=unwrap_vars(data), ), ).get_output_vars( + data=get_value(data), ).transposed -def unsqueeze( - data: Var, - axes: Var, -) -> Var: +def unsqueeze(data: Var, axes: Var, ) -> Var: r""" - Insert single-dimensional entries to the shape of an input tensor - (``data``). Takes one required input ``axes`` - which contains a list of - dimension indices and this operator will insert a dimension of value - ``1`` into the corresponding index of the output tensor (``expanded``). - - For example, given an input tensor (``data``) of shape [3, 4, 5], then - Unsqueeze(data, axes=[0, 4]) outputs a tensor (``expanded``) containing - same data as ``data`` but with shape [1, 3, 4, 5, 1]. - - The input ``axes`` should not contain any duplicate entries. It is an - error if it contains duplicates. The rank of the output tensor - (``output_rank``) is the rank of the input tensor (``data``) plus the - number of values in ``axes``. Each value in ``axes`` should be within - the (inclusive) range [-output_rank , output_rank - 1]. The order of - values in ``axes`` does not matter and can come in any order. - - Parameters - ========== - data - Type T. - Original tensor - axes - Type tensor(int64). - List of integers indicating the dimensions to be inserted. Negative - value means counting dimensions from the back. Accepted range is [-r, - r-1] where r = rank(expanded). - - Returns - ======= - expanded : Var - Type T. - Reshaped tensor with same data as input. - - Notes - ===== - Signature: ``ai.onnx@21::Unsqueeze``. - - Type constraints: - - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` +Insert single-dimensional entries to the shape of an input tensor +(``data``). Takes one required input ``axes`` - which contains a list of +dimension indices and this operator will insert a dimension of value +``1`` into the corresponding index of the output tensor (``expanded``). + +For example, given an input tensor (``data``) of shape [3, 4, 5], then +Unsqueeze(data, axes=[0, 4]) outputs a tensor (``expanded``) containing +same data as ``data`` but with shape [1, 3, 4, 5, 1]. + +The input ``axes`` should not contain any duplicate entries. It is an +error if it contains duplicates. The rank of the output tensor +(``output_rank``) is the rank of the input tensor (``data``) plus the +number of values in ``axes``. Each value in ``axes`` should be within +the (inclusive) range [-output_rank , output_rank - 1]. The order of +values in ``axes`` does not matter and can come in any order. + +Parameters +========== +data + Type T. + Original tensor +axes + Type tensor(int64). + List of integers indicating the dimensions to be inserted. Negative + value means counting dimensions from the back. Accepted range is [-r, + r-1] where r = rank(expanded). + +Returns +======= +expanded : Var + Type T. + Reshaped tensor with same data as input. + +Notes +===== +Signature: ``ai.onnx@21::Unsqueeze``. + +Type constraints: + - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return ( - _Unsqueeze( - _Unsqueeze.Attributes(), - _Unsqueeze.Inputs( - data=unwrap_vars(data), - axes=unwrap_vars(axes), - ), - ) - .get_output_vars( - data=get_value(data), - axes=get_value(axes), - ) - .expanded - ) + return _Unsqueeze( + _Unsqueeze.Attributes( + ), _Unsqueeze.Inputs( + data=unwrap_vars(data), axes=unwrap_vars(axes), ), ).get_output_vars( + data=get_value(data), axes=get_value(axes), ).expanded def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: @@ -3154,4 +2635,4 @@ def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: "Xor": xor, } -__all__ = [fun.__name__ for fun in _CONSTRUCTORS.values()] + ["const"] +__all__ = [fun.__name__ for fun in _CONSTRUCTORS.values()] + ["const"] diff --git a/tests/test_custom_operator.py b/tests/test_custom_operator.py index fc102a8..ae14974 100644 --- a/tests/test_custom_operator.py +++ b/tests/test_custom_operator.py @@ -44,7 +44,7 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: # This is technically optional, but using an operator without type inference may be inconvenient. if self.inputs.X.type is None: return {} diff --git a/tests/test_function.py b/tests/test_function.py index ce6ec4c..7405ede 100644 --- a/tests/test_function.py +++ b/tests/test_function.py @@ -44,7 +44,7 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - def constructor(self, attrs: dict[str, Attr], inputs: Inputs) -> Outputs: + def constructor(self, attrs: dict[str, Attr], inputs: Inputs.Vars) -> Outputs: # FIXME: At some point, attribute references should be properly type-hinted. a = op.constant( value_float=_Ref( diff --git a/tools/templates/class.jinja2 b/tools/templates/class.jinja2 index f5d6a4f..b362e96 100644 --- a/tools/templates/class.jinja2 +++ b/tools/templates/class.jinja2 @@ -47,7 +47,7 @@ class _{{ schema.name }}(StandardNode): {% endif %} {% if type_inference %} - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, initializers={}) -> dict[str, Type]: {% filter indent(width=8) %} {%+ include type_inference %} {% endfilter %}