From 320d57f833aa5103ae8a76f16db0b729333116f7 Mon Sep 17 00:00:00 2001 From: Atanas Dimitrov <70822030+neNasko1@users.noreply.github.com> Date: Tue, 10 Dec 2024 10:03:45 +0000 Subject: [PATCH] Introduce `_VarInfo` internally to reduce memory footprint in value propagation (#189) * Init * Run linter * Changes * Fix ml * Fix some tests * Fix some tests * Fix more tests * Add some proper typing * More initializers * Fix passing * Minor fixes and linter * Change initializers name to input_prop_values * Make tests passing * Hacky fix mypy * Correctly codegen * Comments after code review * Improve type checking * Pre-commit enable * Update documentation * Fix adapt node * Fix function inputs passing * Fix opset generation * Hint that _VarInfo is private * Fix jinja * Fix variadic input value propagation * Comments after code review * Move validation to after propagation * Fix diff * Fix diffs * Improve type-hinting information * Remove unneded functions * Init * fix * Final fixes * Add test for propagation of optional var * Unify logic around VarInfos -> Var * Add comment * Improve qol * Update CHANGELOG.rst * Fix tools/generate * Update CHANGELOG.rst Co-authored-by: Christian Bourjau * Comments after code-review --------- Co-authored-by: Christian Bourjau --- CHANGELOG.rst | 7 + src/spox/_adapt.py | 20 +- src/spox/_build.py | 30 +- src/spox/_debug.py | 4 +- src/spox/_fields.py | 132 +- src/spox/_function.py | 52 +- src/spox/_future.py | 4 +- src/spox/_graph.py | 61 +- src/spox/_inline.py | 22 +- src/spox/_internal_op.py | 38 +- src/spox/_node.py | 120 +- src/spox/_public.py | 32 +- src/spox/_scope.py | 20 +- src/spox/_standard.py | 90 +- src/spox/_value_prop.py | 3 +- src/spox/_var.py | 251 +- src/spox/opset/ai/onnx/ml/v3.py | 820 ++- src/spox/opset/ai/onnx/ml/v4.py | 52 +- src/spox/opset/ai/onnx/ml/v5.py | 73 +- src/spox/opset/ai/onnx/v17.py | 5751 +++++++++++------ src/spox/opset/ai/onnx/v18.py | 899 ++- src/spox/opset/ai/onnx/v19.py | 654 +- src/spox/opset/ai/onnx/v20.py | 413 +- src/spox/opset/ai/onnx/v21.py | 706 +- tests/test_adapt.py | 16 +- tests/test_constructors.py | 5 +- tests/test_custom_operator.py | 19 +- tests/test_function.py | 85 +- tests/test_value_propagation.py | 19 +- tools/templates/class.jinja2 | 16 +- tools/templates/construct.jinja2 | 14 +- tools/templates/preamble.jinja2 | 7 +- .../type_inference/compress11.jinja2 | 4 +- .../type_inference/loop16-fix.jinja2 | 4 +- 34 files changed, 6636 insertions(+), 3807 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f671ae6b..6f6d6185 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -7,6 +7,13 @@ Change log ========== +0.14.0 (unreleased) +------------------- + +**Other changes** + +- Propagated values may now be garbage collected if their associated `Var` object goes out of scope. + 0.13.0 (2024-12-06) ------------------- diff --git a/src/spox/_adapt.py b/src/spox/_adapt.py index bdd3cb28..8877e40e 100644 --- a/src/spox/_adapt.py +++ b/src/spox/_adapt.py @@ -13,7 +13,7 @@ from ._node import Node from ._schemas import SCHEMAS from ._scope import Scope -from ._var import Var +from ._var import _VarInfo def adapt_node( @@ -21,7 +21,7 @@ def adapt_node( proto: onnx.NodeProto, source_version: int, target_version: int, - var_names: dict[Var, str], + var_names: dict[_VarInfo, str], ) -> Optional[list[onnx.NodeProto]]: if source_version == target_version: return None @@ -30,16 +30,16 @@ def adapt_node( # By using a dictionary we ensure that we only have a single # ValueInfo per (possibly repeated) input name. input_info = { - var_names[var]: var.unwrap_type()._to_onnx_value_info( - var_names[var], _traceback_name=f"adapt-input {key}" + var_names[var_info]: var_info.unwrap_type()._to_onnx_value_info( + var_names[var_info], _traceback_name=f"adapt-input {key}" ) - for key, var in node.inputs.get_vars().items() + for key, var_info in node.inputs.get_var_infos().items() } output_info = [ - var.unwrap_type()._to_onnx_value_info( - var_names[var], _traceback_name=f"adapt-output {key}" + var_info.unwrap_type()._to_onnx_value_info( + var_names[var_info], _traceback_name=f"adapt-output {key}" ) - for key, var in node.outputs.get_vars().items() + for key, var_info in node.outputs.get_var_infos().items() ] except ValueError: return None @@ -63,7 +63,7 @@ def adapt_inline( node: _Inline, protos: list[onnx.NodeProto], target_opsets: dict[str, int], - var_names: dict[Var, str], + var_names: dict[_VarInfo, str], node_name: str, ) -> list[onnx.NodeProto]: source_version = max({v for d, v in node.opset_req if d in ("", "ai.onnx")}) @@ -91,7 +91,7 @@ def adapt_best_effort( node: Node, protos: list[onnx.NodeProto], opsets: dict[str, int], - var_names: dict[Var, str], + var_names: dict[_VarInfo, str], node_names: dict[Node, str], ) -> Optional[list[onnx.NodeProto]]: if isinstance(node, _Inline): diff --git a/src/spox/_build.py b/src/spox/_build.py index 7ae4cc92..1fb24cea 100644 --- a/src/spox/_build.py +++ b/src/spox/_build.py @@ -23,7 +23,7 @@ from ._node import Node from ._scope import Scope from ._traverse import iterative_dfs -from ._var import Var +from ._var import Var, _VarInfo, unwrap_vars if TYPE_CHECKING: from ._graph import Graph @@ -60,11 +60,11 @@ class BuildResult: scope: Scope nodes: dict[Node, tuple[onnx.NodeProto, ...]] - arguments: tuple[Var, ...] - results: tuple[Var, ...] + arguments: tuple[_VarInfo, ...] + results: tuple[_VarInfo, ...] opset_req: set[tuple[str, int]] functions: tuple[_function.Function, ...] - initializers: dict[Var, np.ndarray] + initializers: dict[_VarInfo, np.ndarray] class Builder: @@ -95,7 +95,7 @@ class ScopeTree: """ Structure representing the tree of scopes, which are identified with the respective graphs. - This structure is the base of the least-enclosing-scope algorithm. Every value (Var), and hence + This structure is the base of the least-enclosing-scope algorithm. Every value (VarInfo), and hence the responsible Node - up to its (Python object) identity may appear in multiple scopes, but it should best-cased be computed only once in the ONNX graph, same as in the Python source code. @@ -166,12 +166,12 @@ def lca(self, a: Graph, b: Graph) -> Graph: graphs: set[Graph] graph_topo: list[Graph] # Arguments, results - arguments_of: dict[Graph, list[Var]] - results_of: dict[Graph, list[Var]] + arguments_of: dict[Graph, list[_VarInfo]] + results_of: dict[Graph, list[_VarInfo]] source_of: dict[Graph, Node] # Arguments found by traversal - all_arguments_in: dict[Graph, set[Var]] - claimed_arguments_in: dict[Graph, set[Var]] + all_arguments_in: dict[Graph, set[_VarInfo]] + claimed_arguments_in: dict[Graph, set[_VarInfo]] # Scopes scope_tree: ScopeTree scope_own: dict[Graph, list[Node]] @@ -220,7 +220,7 @@ def get_intro_results( var._rename(key) return vars - def discover(self, graph: Graph) -> tuple[set[Var], set[Var]]: + def discover(self, graph: Graph) -> tuple[set[_VarInfo], set[_VarInfo]]: """ Run the discovery step of the build process. Resolves arguments and results for the involved graphs. Finds the topological ordering between (sub)graphs and sets their owners (nodes of which they are attributes). @@ -246,8 +246,8 @@ def discover(self, graph: Graph) -> tuple[set[Var], set[Var]]: # Create and set the source & results of this graph if not graph.requested_results: raise BuildError(f"Graph {graph} has no results.") - self.results_of[graph] = self.get_intro_results( - graph.requested_results, graph is self.main + self.results_of[graph] = unwrap_vars( + self.get_intro_results(graph.requested_results, graph is self.main) ) self.source_of[graph] = self.results_of[graph][0]._op @@ -291,8 +291,8 @@ def collect_arguments(nd: Node) -> None: self.arguments_of[graph] = list(all_arguments - claimed_arguments) else: # If there is a request, we may not have found it by traversal if an argument was unused. - all_arguments |= set(graph.requested_arguments) - self.arguments_of[graph] = list(graph.requested_arguments) + all_arguments |= set(unwrap_vars(graph.requested_arguments)) + self.arguments_of[graph] = unwrap_vars(graph.requested_arguments) if set(self.arguments_of[graph]) & claimed_arguments: raise BuildError( @@ -434,7 +434,7 @@ def compile_graph( # A bunch of model metadata we're collecting opset_req: set[tuple[str, int]] = set() functions: list[_function.Function] = [] - initializers: dict[Var, np.ndarray] = {} + initializers: dict[_VarInfo, np.ndarray] = {} # Add arguments to our scope for arg in self.arguments_of[graph]: diff --git a/src/spox/_debug.py b/src/spox/_debug.py index c3c62600..eece1f70 100644 --- a/src/spox/_debug.py +++ b/src/spox/_debug.py @@ -6,7 +6,7 @@ from contextlib import contextmanager from typing import Any -from spox._var import Var +from spox._var import _VarInfo # If `STORE_TRACEBACK` is `True` any node created will store a traceback for its point of creation. STORE_TRACEBACK = False @@ -40,7 +40,7 @@ def show_construction_tracebacks( if -1 in found: del found[-1] for name, obj in reversed(found.values()): - if isinstance(obj, Var): + if isinstance(obj, _VarInfo): if not obj: continue node = obj._op diff --git a/src/spox/_fields.py b/src/spox/_fields.py index ca2ac254..ff5d6efc 100644 --- a/src/spox/_fields.py +++ b/src/spox/_fields.py @@ -1,14 +1,20 @@ # Copyright (c) QuantCo 2023-2024 # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations + import dataclasses import enum +import warnings from collections.abc import Iterable, Iterator, Sequence from dataclasses import Field, dataclass -from typing import Any, Optional, Union, get_type_hints +from typing import Optional, Union, get_type_hints from ._attributes import Attr -from ._var import Var +from ._exceptions import InferenceWarning +from ._type_system import Optional as tOptional +from ._value_prop import PropDict, PropValue +from ._var import Var, _VarInfo @dataclass @@ -31,20 +37,63 @@ class VarFieldKind(enum.Enum): VARIADIC = 2 +class BaseVars: + """A collection of `Var`-s used to carry around inputs/outputs of nodes""" + + vars: dict[str, Union[Var, Optional[Var], Sequence[Var]]] + + def __init__(self, vars: dict[str, Union[Var, Optional[Var], Sequence[Var]]]): + self.vars = vars + + def _unpack_to_any(self) -> tuple[Union[Var, Optional[Var], Sequence[Var]], ...]: + """Unpack the stored fields into a tuple of appropriate length, typed as Any.""" + return tuple(self.vars.values()) + + def _flatten(self) -> Iterator[tuple[str, Optional[Var]]]: + """Iterate over the pairs of names and values of fields in this object.""" + for key, value in self.vars.items(): + if value is None or isinstance(value, Var): + yield key, value + else: + yield from ((f"{key}_{i}", v) for i, v in enumerate(value)) + + def flatten_vars(self) -> dict[str, Var]: + """Return a flat mapping by name of all the VarInfos in this object.""" + return {key: var for key, var in self._flatten() if var is not None} + + def __getattr__(self, attr: str) -> Union[Var, Optional[Var], Sequence[Var]]: + """Retrieves the attribute if present in the stored variables.""" + try: + return self.vars[attr] + except KeyError: + raise AttributeError( + f"{self.__class__.__name__!r} object has no attribute {attr!r}" + ) + + def __setattr__( + self, attr: str, value: Union[Var, Optional[Var], Sequence[Var]] + ) -> None: + """Sets the attribute to a value if the attribute is present in the stored variables.""" + if attr == "vars": + super().__setattr__(attr, value) + else: + self.vars[attr] = value + + @dataclass -class BaseVars(BaseFields): +class BaseVarInfos(BaseFields): def __post_init__(self) -> None: # Check if passed fields are of the appropriate types based on field kinds for field in dataclasses.fields(self): value = getattr(self, field.name) field_type = self._get_field_type(field) if field_type == VarFieldKind.SINGLE: - if not isinstance(value, Var): - raise TypeError(f"Field expected Var, got: {type(value)}.") + if not isinstance(value, _VarInfo): + raise TypeError(f"Field expected VarInfo, got: {type(value)}.") elif field_type == VarFieldKind.OPTIONAL: - if value is not None and not isinstance(value, Var): + if value is not None and not isinstance(value, _VarInfo): raise TypeError( - f"Optional must be Var or None, got: {type(value)}." + f"Optional must be VarInfo or None, got: {type(value)}." ) elif field_type == VarFieldKind.VARIADIC: if not isinstance(value, Iterable): @@ -53,9 +102,9 @@ def __post_init__(self) -> None: ) # Cast to tuple to avoid accidental mutation setattr(self, field.name, tuple(value)) - if bad := {type(var) for var in value} - {Var}: + if bad := {type(var) for var in value} - {_VarInfo}: raise TypeError( - f"Variadic field must only consist of Vars, got: {bad}." + f"Variadic field must only consist of VarInfos, got: {bad}." ) @classmethod @@ -64,23 +113,23 @@ def _get_field_type(cls, field: Field) -> VarFieldKind: # The field.type may be unannotated as per # from __future__ import annotations field_type = get_type_hints(cls)[field.name] - if field_type == Var: + if field_type == _VarInfo: return VarFieldKind.SINGLE - elif field_type == Optional[Var]: + elif field_type == Optional[_VarInfo]: return VarFieldKind.OPTIONAL - elif field_type == Sequence[Var]: + elif field_type == Sequence[_VarInfo]: return VarFieldKind.VARIADIC raise ValueError(f"Bad field type: '{field.type}'.") - def _flatten(self) -> Iterable[tuple[str, Optional[Var]]]: + def _flatten(self) -> Iterable[tuple[str, Optional[_VarInfo]]]: """Iterate over the pairs of names and values of fields in this object.""" for key, value in self.__dict__.items(): - if value is None or isinstance(value, Var): + if value is None or isinstance(value, _VarInfo): yield key, value else: yield from ((f"{key}_{i}", v) for i, v in enumerate(value)) - def __iter__(self) -> Iterator[Optional[Var]]: + def __iter__(self) -> Iterator[Optional[_VarInfo]]: """Iterate over the values of fields in this object.""" yield from (v for _, v in self._flatten()) @@ -88,32 +137,65 @@ def __len__(self) -> int: """Count the number of fields in this object (should be same as declared in the class).""" return sum(1 for _ in self) - def get_vars(self) -> dict[str, Var]: - """Return a flat mapping by name of all the Vars in this object.""" + def get_var_infos(self) -> dict[str, _VarInfo]: + """Return a flat mapping by name of all the VarInfos in this object.""" return {key: var for key, var in self._flatten() if var is not None} - def get_fields(self) -> dict[str, Union[None, Var, Sequence[Var]]]: + def get_fields(self) -> dict[str, Union[None, _VarInfo, Sequence[_VarInfo]]]: """Return a mapping of all fields stored in this object by name.""" return self.__dict__.copy() - def _unpack_to_any(self) -> Any: - """Unpack the stored fields into a tuple of appropriate length, typed as Any.""" - return tuple(self.__dict__.values()) - @property def fully_typed(self) -> bool: """Check if all stored variables have a concrete type.""" return all( var.type is not None and var.type._is_concrete - for var in self.get_vars().values() + for var in self.get_var_infos().values() ) + def into_vars(self, prop_values: PropDict) -> BaseVars: + """Populate a `BaseVars` object with the propagated values and this object's var_infos""" + + def _create_var(key: str, var_info: _VarInfo) -> Var: + ret = Var(var_info, None) + + if var_info.type is None or key not in prop_values: + return ret + + if not isinstance(var_info.type, tOptional) and prop_values[key] is None: + return ret + + prop = PropValue(var_info.type, prop_values[key]) + if prop.check(): + ret._value = prop + else: + warnings.warn( + InferenceWarning( + f"Propagated value {prop} does not type-check, dropping. " + f"Hint: this indicates a bug with the current value prop backend or type inference." + ) + ) + + return ret + + ret_dict: dict[str, Union[Var, Optional[Var], Sequence[Var]]] = {} + + for key, var_info in self.__dict__.items(): + if isinstance(var_info, _VarInfo): + ret_dict[key] = _create_var(key, var_info) + else: + ret_dict[key] = [ + _create_var(f"{key}_{i}", v) for i, v in enumerate(var_info) + ] + + return BaseVars(ret_dict) + @dataclass -class BaseInputs(BaseVars): +class BaseInputs(BaseVarInfos): pass @dataclass -class BaseOutputs(BaseVars): +class BaseOutputs(BaseVarInfos): pass diff --git a/src/spox/_function.py b/src/spox/_function.py index 036daa93..63760d75 100644 --- a/src/spox/_function.py +++ b/src/spox/_function.py @@ -13,18 +13,19 @@ import onnx from . import _attributes -from ._fields import BaseAttributes, BaseInputs, BaseOutputs +from ._fields import BaseAttributes, BaseInputs, BaseOutputs, BaseVars from ._internal_op import _InternalNode from ._node import Node, OpType from ._type_system import Type -from ._var import Var +from ._var import Var, _VarInfo, unwrap_vars if TYPE_CHECKING: from . import _graph + from ._value_prop import PropDict DEFAULT_FUNCTION_DOMAIN = "spox.default" -ConstructorT = TypeVar("ConstructorT", bound=Callable[..., Iterable[Var]]) +ConstructorT = TypeVar("ConstructorT", bound=Callable[..., Iterable[_VarInfo]]) class Function(_InternalNode): @@ -45,14 +46,14 @@ class Function(_InternalNode): via the ``to_onnx_function`` method. """ - func_args: dict[str, Var] + func_args: dict[str, _VarInfo] func_attrs: dict[str, _attributes.Attr] func_inputs: BaseInputs func_outputs: BaseOutputs func_graph: _graph.Graph def constructor( - self, attrs: dict[str, _attributes.Attr], inputs: BaseInputs + self, attrs: dict[str, _attributes.Attr], inputs: BaseVars ) -> BaseOutputs: """ Abstract method for functions. @@ -66,13 +67,15 @@ def constructor( f"Function {type(self).__name__} does not implement a constructor." ) - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: from . import _graph - self.func_args = _graph.arguments_dict( - **{name: var.type for name, var in self.inputs.get_vars().items()} + func_args_var = _graph.arguments_dict( + **{name: var.type for name, var in self.inputs.get_var_infos().items()} ) + self.func_args = unwrap_vars(func_args_var) + self.func_attrs = {} for name, attr in self.attrs.get_fields().items(): if attr is None: @@ -82,14 +85,16 @@ def infer_output_types(self) -> dict[str, Type]: self.func_attrs[name] = attr self.func_inputs = self.Inputs(**self.func_args) - self.func_outputs = self.constructor(self.func_attrs, self.func_inputs) - self.func_graph = _graph.results(**self.func_outputs.get_vars()).with_arguments( - *self.func_args.values() + self.func_outputs = self.constructor( + self.func_attrs, self.func_inputs.into_vars(input_prop_values) ) + self.func_graph = _graph.results( + **self.func_outputs.into_vars(input_prop_values).flatten_vars() + ).with_arguments(*func_args_var.values()) return { name: var.type - for name, var in self.func_outputs.get_vars().items() + for name, var in self.func_outputs.get_var_infos().items() if var.type } @@ -101,7 +106,7 @@ def opset_req(self) -> set[tuple[str, int]]: def update_metadata( self, opset_req: set[tuple[str, int]], - initializers: dict[Var, np.ndarray], + initializers: dict[_VarInfo, np.ndarray], functions: list[Function], ) -> None: super().update_metadata(opset_req, initializers, functions) @@ -142,12 +147,14 @@ def _make_function_cls( name: str, ) -> type[Function]: _FuncInputs = make_dataclass( - "_FuncInputs", ((f"in{i}", Var) for i in range(num_inputs)), bases=(BaseInputs,) + "_FuncInputs", + ((f"in{i}", _VarInfo) for i in range(num_inputs)), + bases=(BaseInputs,), ) _FuncOutputs = make_dataclass( "_FuncOutputs", - ((f"out{i}", Var) for i in range(num_outputs)), + ((f"out{i}", _VarInfo) for i in range(num_outputs)), bases=(BaseOutputs,), ) @@ -160,8 +167,10 @@ class Attributes(BaseAttributes): Outputs = _FuncOutputs op_type = OpType(name, domain, version) - def constructor(self, attrs: dict[str, _attributes.Attr], inputs: Any) -> Any: - return self.Outputs(*fun(*inputs.get_fields().values())) + def constructor( + self, attrs: dict[str, _attributes.Attr], inputs: BaseVars + ) -> BaseOutputs: + return self.Outputs(*unwrap_vars(fun(*inputs.flatten_vars().values()))) return _Func @@ -208,9 +217,12 @@ def init(*args: Var) -> type[Function]: def alt_fun(*args: Var) -> Iterable[Union[Var, Optional[Var], Sequence[Var]]]: cls = init(*args) - return list( - cls(cls.Attributes(), cls.Inputs(*args)).outputs.get_fields().values() - ) + return [ + Var(var_info) + for var_info in cls(cls.Attributes(), cls.Inputs(*unwrap_vars(args))) + .outputs.get_var_infos() + .values() + ] return alt_fun # type: ignore diff --git a/src/spox/_future.py b/src/spox/_future.py index b955b629..e7d7b76b 100644 --- a/src/spox/_future.py +++ b/src/spox/_future.py @@ -116,7 +116,9 @@ def _promote( ) # TODO: Handle more constant-target inconsistencies here? - def _promote_target(obj: Union[Var, np.generic, int, float]) -> Optional[Var]: + def _promote_target( + obj: Union[Var, np.generic, int, float], + ) -> Optional[Var]: if self.constant_promotion and isinstance(obj, (np.generic, int, float)): return self.op.const(np.array(obj, dtype=target_type)) elif isinstance(obj, Var): diff --git a/src/spox/_graph.py b/src/spox/_graph.py index a28257c7..44afb765 100644 --- a/src/spox/_graph.py +++ b/src/spox/_graph.py @@ -24,7 +24,7 @@ from ._schemas import max_opset_policy from ._type_system import Tensor, Type from ._utils import from_array -from ._var import Var +from ._var import Var, _VarInfo def arguments_dict(**kwargs: Optional[Union[Type, np.ndarray]]) -> dict[str, Var]: @@ -45,27 +45,35 @@ def arguments_dict(**kwargs: Optional[Union[Type, np.ndarray]]) -> dict[str, Var for name, info in kwargs.items(): attr_name = AttrString(value=name, name="dummy") if isinstance(info, Type): - result[name] = Argument( - Argument.Attributes( - name=attr_name, - type=AttrType(value=info, name="dummy"), - default=None, - ), - BaseInputs(), - ).outputs.arg + result[name] = ( + Argument( + Argument.Attributes( + name=attr_name, + type=AttrType(value=info, name="dummy"), + default=None, + ), + BaseInputs(), + ) + .get_output_vars() + .arg + ) elif isinstance(info, np.ndarray): ty = Tensor(info.dtype, info.shape) - result[name] = Argument( - Argument.Attributes( - name=attr_name, - type=AttrType(value=ty, name="dummy"), - default=AttrTensor(value=info, name="dummy"), - ), - BaseInputs(), - ).outputs.arg + result[name] = ( + Argument( + Argument.Attributes( + name=attr_name, + type=AttrType(value=ty, name="dummy"), + default=AttrTensor(value=info, name="dummy"), + ), + BaseInputs(), + ) + .get_output_vars() + .arg + ) else: raise TypeError(f"Cannot construct argument from {type(info)}.") - return result + return result # type: ignore def arguments(**kwargs: Optional[Union[Type, np.ndarray]]) -> tuple[Var, ...]: @@ -112,10 +120,14 @@ def initializer(arr: np.ndarray) -> Var: ------- Var which is always equal to the respective value provided by `arr`. """ - return _Initializer( - _Initializer.Attributes(value=AttrTensor(value=arr, name="dummy")), - BaseInputs(), - ).outputs.arg + return ( + _Initializer( + _Initializer.Attributes(value=AttrTensor(value=arr, name="dummy")), + BaseInputs(), + ) + .get_output_vars() + .arg # type: ignore + ) @dataclass(frozen=True, eq=False) @@ -224,7 +236,7 @@ def requested_results(self) -> dict[str, Var]: """Results (named) requested by this Graph (for building).""" return self._results - def get_arguments(self) -> dict[str, Var]: + def get_arguments(self) -> dict[str, _VarInfo]: """ Get the effective named arguments (after build) of this Graph. @@ -235,7 +247,7 @@ def get_arguments(self) -> dict[str, Var]: for var in self._get_build_result().arguments } - def get_results(self) -> dict[str, Var]: + def get_results(self) -> dict[str, _VarInfo]: """ Get the effective named results (after build) of this Graph. @@ -498,4 +510,5 @@ def subgraph(types: Iterable[Type], fun: Callable[..., Iterable[Var]]) -> Graph: outs = fun(*ins) if not (isinstance(outs, Iterable) and all(isinstance(out, Var) for out in outs)): raise TypeError("Subgraph result must be an Iterable of Var.") + return enum_results(*outs).with_arguments(*ins)._with_constructor(fun) diff --git a/src/spox/_inline.py b/src/spox/_inline.py index 8b6f934f..96da6459 100644 --- a/src/spox/_inline.py +++ b/src/spox/_inline.py @@ -14,7 +14,7 @@ from spox._node import OpType from spox._scope import Scope from spox._type_system import Type -from spox._var import Var +from spox._var import _VarInfo from . import _value_prop @@ -86,11 +86,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - inputs: Sequence[Var] + inputs: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - outputs: Sequence[Var] + outputs: Sequence[_VarInfo] op_type = OpType("Inline", "spox.internal", 0) @@ -111,7 +111,9 @@ def opset_req(self) -> set[tuple[str, int]]: ("", INTERNAL_MIN_OPSET) } - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types( + self, input_prop_values: _value_prop.PropDict + ) -> dict[str, Type]: # First, type check that we match the ModelProto type requirements for i, var in zip(self.graph.input, self.inputs.inputs): if var.type is not None and not ( @@ -127,16 +129,18 @@ def infer_output_types(self) -> dict[str, Type]: for k, o in enumerate(self.graph.output) } - def propagate_values(self) -> dict[str, _value_prop.PropValueType]: + def propagate_values( + self, input_prop_values: _value_prop.PropDict + ) -> dict[str, _value_prop.PropValueType]: if any( - var.type is None or var._value is None - for var in self.inputs.get_vars().values() + var_info.type is None or input_prop_values.get(var_info.name) is None + for var_info in self.model.graph.input ): return {} wrap_feed, run, unwrap_feed = _value_prop.get_backend_calls() input_feed = { - i.name: wrap_feed(var._value) - for i, var in zip(self.model.graph.input, self.inputs.inputs) + i.name: wrap_feed(input_prop_values.get(i.name)) + for i in self.model.graph.input } output_feed = run(self.model, input_feed) return { diff --git a/src/spox/_internal_op.py b/src/spox/_internal_op.py index 5da70aca..3a3af424 100644 --- a/src/spox/_internal_op.py +++ b/src/spox/_internal_op.py @@ -22,8 +22,11 @@ from ._scope import Scope from ._shape import SimpleShape from ._type_system import Tensor, Type -from ._value_prop import PropValueType -from ._var import Var +from ._value_prop import PropDict, PropValueType +from ._var import Var, _VarInfo, unwrap_vars + +if TYPE_CHECKING: + from ._function import Function if TYPE_CHECKING: from ._function import Function @@ -84,7 +87,7 @@ class Inputs(BaseInputs): @dataclass class Outputs(BaseOutputs): - arg: Var + arg: _VarInfo attrs: Attributes inputs: Inputs @@ -94,14 +97,14 @@ def post_init(self, **kwargs: Any) -> None: if self.attrs.name is not None: self.outputs.arg._rename(self.attrs.name.value) - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: # Output type is based on the value of the type attribute return {"arg": self.attrs.type.value} def update_metadata( self, opset_req: set[tuple[str, int]], - initializers: dict[Var, np.ndarray], + initializers: dict[_VarInfo, np.ndarray], functions: list[Function], ) -> None: super().update_metadata(opset_req, initializers, functions) @@ -129,24 +132,24 @@ class Attributes(BaseAttributes): @dataclass class Outputs(BaseOutputs): - arg: Var + arg: _VarInfo attrs: Attributes inputs: BaseInputs outputs: Outputs - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: # Output type is based on the value of the type attribute arr = self.attrs.value.value return {"arg": Tensor(arr.dtype, arr.shape)} - def propagate_values(self) -> dict[str, PropValueType]: + def propagate_values(self, input_prop_values: PropDict) -> dict[str, PropValueType]: return {"arg": self.attrs.value.value} def update_metadata( self, opset_req: set[tuple[str, int]], - initializers: dict[Var, np.ndarray], + initializers: dict[_VarInfo, np.ndarray], functions: list[Function], ) -> None: super().update_metadata(opset_req, initializers, functions) @@ -171,11 +174,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - inputs: Sequence[Var] + inputs: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - outputs: Sequence[Var] + outputs: Sequence[_VarInfo] op_type = OpType("Introduce", "spox.internal", 0) @@ -183,7 +186,7 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: return { f"outputs_{i}": arr.type for i, arr in enumerate(self.inputs.inputs) @@ -238,9 +241,11 @@ def intros(*args: Var) -> Sequence[Var]: Sequence[Var] Vars of the same value as ``args``, but with a shared dependency. """ - return _Introduce( - None, _Introduce.Inputs(args), out_variadic=len(args) - ).outputs.outputs + return ( + _Introduce(None, _Introduce.Inputs(unwrap_vars(args)), out_variadic=len(args)) + .get_output_vars() + .outputs # type: ignore + ) def intro(*args: Var) -> Var: @@ -271,8 +276,7 @@ def unsafe_cast(x: Var, typ: Type) -> Var: Var with the type reset to whatever was given. """ y = intro(x) - y.type = typ - y._value = x._value + y._var_info.type = typ return y diff --git a/src/spox/_node.py b/src/spox/_node.py index aecb2460..8e2a0222 100644 --- a/src/spox/_node.py +++ b/src/spox/_node.py @@ -10,7 +10,7 @@ import typing import warnings from abc import ABC -from collections.abc import Generator, Iterable, Sequence +from collections.abc import Iterable, Iterator, Sequence from dataclasses import dataclass from typing import Any, ClassVar, Optional, Union @@ -20,10 +20,17 @@ from ._attributes import AttrGraph from ._debug import STORE_TRACEBACK from ._exceptions import InferenceWarning -from ._fields import BaseAttributes, BaseInputs, BaseOutputs, BaseVars, VarFieldKind +from ._fields import ( + BaseAttributes, + BaseInputs, + BaseOutputs, + BaseVarInfos, + BaseVars, + VarFieldKind, +) from ._type_system import Type -from ._value_prop import PropValue, PropValueType -from ._var import Var +from ._value_prop import PropDict +from ._var import _VarInfo if typing.TYPE_CHECKING: from ._function import Function @@ -89,6 +96,7 @@ class Node(ABC): out_variadic: Optional[int] _traceback: Union[list[str], None] + _validate: bool def __init__( self, @@ -98,7 +106,6 @@ def __init__( *, out_variadic: Optional[int] = None, infer_types: bool = True, - propagate_values: bool = True, validate: bool = True, **kwargs: Any, ): @@ -117,9 +124,6 @@ def __init__( infer_types Whether to run type inference - setting types for output vars if previously None. Should always succeed if possible, possibly raising type errors if inputs/attributes are not correctly typed. - propagate_values - Whether to run value propagation - setting values for output vars if previously None. Should only succeed - if all inputs are constant (attributes always are). validate Whether to run some extra validation. The default validation only warns against unknown types. kwargs @@ -134,17 +138,16 @@ def __init__( # As inference functions may access which output vars we initialized (e.g. variadics) # we inject uninitialized vars first self.outputs = self._init_output_vars() - self.inference(infer_types, propagate_values) + self.inference(infer_types=infer_types) else: self.outputs = outputs + # Store validate for when the values are actually propagated + self._validate = validate + # Optionally store debug information about where this node was created self._traceback = traceback.format_stack() if STORE_TRACEBACK else None - # Performs type checking using known flags (like type_members) - # and warns if type inference failed (some types are None). - if validate: - self.validate_types() self.post_init(**kwargs) @property @@ -187,13 +190,11 @@ def min_output(self) -> int: def signature(self) -> str: """Get a signature of this Node, including its inputs and attributes (but not outputs).""" - def fmt_input(key: str, var: Var) -> str: - return f"{key}: {var.type}" + ( - f" = {var._value}" if var._value is not None else "" - ) + def fmt_input(key: str, var: _VarInfo) -> str: + return f"{key}: {var.type}" sign = ", ".join( - fmt_input(key, var) for key, var in self.inputs.get_vars().items() + fmt_input(key, var) for key, var in self.inputs.get_var_infos().items() ) sign = f"inputs [{sign}]" shown_attrs = { @@ -216,7 +217,7 @@ def pre_init(self, **kwargs: Any) -> None: def post_init(self, **kwargs: Any) -> None: """Post-initialization hook. Called at the end of ``__init__`` after other default fields are set.""" - def propagate_values(self) -> dict[str, PropValueType]: + def propagate_values(self, input_prop_values: PropDict) -> PropDict: """ Propagate values from inputs, and, if possible, compute values for outputs as well. This method is used to implement ONNX partial data propagation - for example so that @@ -224,40 +225,49 @@ def propagate_values(self) -> dict[str, PropValueType]: """ return {} - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: """ Inference routine for output types. Often overriden by inheriting Node types. - Returns a dictionary of output field names into Types for the respective Vars. + Returns a dictionary of output field names into Types for the respective VarInfos. """ return {} def inference( - self, infer_types: bool = True, propagate_values: bool = True + self, input_prop_values: Optional[PropDict] = None, infer_types: bool = True ) -> None: + if input_prop_values is None: + input_prop_values = {} # Type inference routine - call infer_output_types if required # and check if it provides the expected outputs. - out_types = self.infer_output_types() if infer_types else {} + out_types = ( + self.infer_output_types(input_prop_values=input_prop_values) + if infer_types + else {} + ) - for key, var in self.outputs.get_vars().items(): - if var.type is None: # If no existing type from init_output_vars - # Attempt to use the ones from kwargs, if none then what type inference gave + for key, var in self.outputs.get_var_infos().items(): + typ = out_types.get(key) + if var.type is None or (typ is not None and typ._subtype(var.type)): + # If there is no type, or the infered type is a subtype + # we use the new type var.type = out_types.get(key) + def get_output_vars( + self, input_prop_values: Optional[PropDict] = None, infer_types: bool = True + ) -> BaseVars: + if input_prop_values is None: + input_prop_values = {} # After typing everything, try to get values for outputs - out_values = self.propagate_values() if propagate_values else {} - for key, var in self.outputs.get_vars().items(): - if var.type is not None and var._value is None and key in out_values: - prop = PropValue(var.type, out_values.get(key)) - if prop.check(): - var._value = prop - else: - warnings.warn( - InferenceWarning( - f"Propagated value {prop} does not type-check, dropping. " - f"Hint: this indicates a bug with the current value prop backend or type inference." - ) - ) + self.inference(infer_types=infer_types, input_prop_values=input_prop_values) + + # Performs type checking using known flags (like type_members) + # and warns if type inference failed (some types are None). + if self._validate: + self.validate_types() + + out_values = self.propagate_values(input_prop_values) + return self.outputs.into_vars(out_values) def validate_types(self) -> None: """Validation of types, ran at the end of Node creation.""" @@ -297,10 +307,8 @@ def _check_concrete_type(self, value_type: Optional[Type]) -> Optional[str]: return f"{type(e).__name__}: {str(e)}" return None - def _list_types( - self, source: BaseVars - ) -> Generator[tuple[str, Optional[Type]], None, None]: - return ((key, var.type) for key, var in source.get_vars().items()) + def _list_types(self, source: BaseVarInfos) -> Iterator[tuple[str, Optional[Type]]]: + return ((key, var.type) for key, var in source.get_var_infos().items()) def _init_output_vars(self) -> BaseOutputs: """ @@ -317,31 +325,29 @@ def _init_output_vars(self) -> BaseOutputs: (variadic,) = variadics else: variadic = None - outputs: dict[str, Union[Var, Sequence[Var]]] = { - field.name: Var(self, None, None) + outputs: dict[str, Union[_VarInfo, Sequence[_VarInfo]]] = { + field.name: _VarInfo(self, None) for field in dataclasses.fields(self.Outputs) if field.name != variadic } if variadic is not None: assert self.out_variadic is not None - outputs[variadic] = [ - Var(self, None, None) for _ in range(self.out_variadic) - ] + outputs[variadic] = [_VarInfo(self, None) for _ in range(self.out_variadic)] return self.Outputs(**outputs) @property - def dependencies(self) -> Iterable[Var]: - """List of input Vars into this Node.""" - return (var for var in self.inputs.get_vars().values()) + def dependencies(self) -> Iterable[_VarInfo]: + """List of input VarInfos into this Node.""" + return (var for var in self.inputs.get_var_infos().values()) @property - def dependents(self) -> Iterable[Var]: - """List of output Vars from this Node.""" - return (var for var in self.outputs.get_vars().values()) + def dependents(self) -> Iterable[_VarInfo]: + """List of output VarInfos from this Node.""" + return (var for var in self.outputs.get_var_infos().values()) @property - def incident(self) -> Iterable[Var]: - """List of both input and output Vars for this Node.""" + def incident(self) -> Iterable[_VarInfo]: + """List of both input and output VarInfos for this Node.""" return itertools.chain(self.dependencies, self.dependents) @property @@ -353,7 +359,7 @@ def subgraphs(self) -> Iterable[Graph]: def update_metadata( self, opset_req: set[tuple[str, int]], - initializers: dict[Var, np.ndarray], + initializers: dict[_VarInfo, np.ndarray], functions: list[Function], ) -> None: opset_req.update(self.opset_req) diff --git a/src/spox/_public.py b/src/spox/_public.py index 643f5dbb..e12240e7 100644 --- a/src/spox/_public.py +++ b/src/spox/_public.py @@ -18,6 +18,7 @@ from ._inline import _Inline from ._standard import _strip_dim_symbol from ._type_system import Type +from ._value_prop import PropDict from ._var import Var @@ -36,9 +37,13 @@ def argument(typ: Type) -> Var: An unnamed argument variable of given type that may be used as a model input to build a graph. """ - return _internal_op.Argument( - _internal_op.Argument.Attributes(type=AttrType(typ, "dummy"), default=None) - ).outputs.arg + return ( + _internal_op.Argument( + _internal_op.Argument.Attributes(type=AttrType(typ, "dummy"), default=None) + ) + .get_output_vars() + .arg # type: ignore + ) @contextlib.contextmanager @@ -50,8 +55,8 @@ def _temporary_renames(**kwargs: Var) -> Iterator[None]: pre: dict[Var, Optional[str]] = {} try: for name, arg in kwargs.items(): - pre[arg] = arg._name - arg._rename(name) + pre[arg] = arg._var_info._name + arg._var_info._rename(name) yield finally: for arg, name in pre.items(): @@ -119,6 +124,7 @@ def build( if not all(isinstance(var, Var) for var in outputs.values()): seen_types = {type(obj) for obj in outputs.values()} raise TypeError(f"Build outputs must be Vars, not {seen_types - {Var}}.") + if not all(isinstance(var._op, Argument) for var in inputs.values()): raise TypeError( "Build inputs must be `Var`s constructed using the `spox.argument` function. " @@ -298,11 +304,23 @@ def inline_inner(*args: Var, **kwargs: Var) -> dict[str, Var]: f"Error processing arguments, got {set(kwargs)}, expected {set(in_names)}." ) node = _Inline( - inputs=_Inline.Inputs([kwargs[name] for name in in_names]), + inputs=_Inline.Inputs([kwargs[name]._var_info for name in in_names]), out_variadic=len(model.graph.output), model=model, ) - return dict(zip(out_names, node.outputs.outputs)) + + prop_values: PropDict = { + name: kwargs[name]._value + for name in in_names + if kwargs[name]._value is not None + } + + return dict( + zip( + out_names, + node.get_output_vars(prop_values).flatten_vars().values(), + ) + ) return inline_inner diff --git a/src/spox/_scope.py b/src/spox/_scope.py index dacb4f92..8eff3dbb 100644 --- a/src/spox/_scope.py +++ b/src/spox/_scope.py @@ -7,7 +7,7 @@ from typing import Generic, Optional, TypeVar, Union, overload from ._node import Node -from ._var import Var +from ._var import _VarInfo H = TypeVar("H", bound=Hashable) @@ -20,7 +20,7 @@ class ScopeError(Exception): class ScopeSpace(Generic[H]): """ - Represents the namespace of a scope for some type H, like Node or Var. + Represents the namespace of a scope for some type H, like ``Node`` or ``_VarInfo``. Methods (and operators) on the namespace work both ways: both with names (str) and the named type (H). So ``__getitem__`` (``ScopeSpace[item]``) may be used for both the name of an object and the object of a name. @@ -154,15 +154,15 @@ class Scope: """ Class representing the state of an ONNX-rules scope. - Has namespaces (represented by a ScopeSpace) for Vars and Nodes. + Has namespaces (represented by a ScopeSpace) for VarInfos and Nodes. """ - var: ScopeSpace[Var] + var: ScopeSpace[_VarInfo] node: ScopeSpace[Node] def __init__( self, - sub_var: Optional[ScopeSpace[Var]] = None, + sub_var: Optional[ScopeSpace[_VarInfo]] = None, sub_node: Optional[ScopeSpace[Node]] = None, parent: Optional[Scope] = None, ): @@ -177,7 +177,9 @@ def __init__( @classmethod def of( cls, - *what: Union[tuple[str, Union[Var, Node]], tuple[Union[Var, Node], str]], + *what: Union[ + tuple[str, Union[_VarInfo, Node]], tuple[Union[_VarInfo, Node], str] + ], ) -> Scope: """Convenience constructor for filling a Scope with known names.""" scope = cls() @@ -185,7 +187,7 @@ def of( if not isinstance(key, str): key, value = value, key assert isinstance(key, str) - if isinstance(value, Var): + if isinstance(value, _VarInfo): scope.var[key] = value elif isinstance(value, Node): scope.node[key] = value @@ -207,7 +209,7 @@ def update(self, node: Node, prefix: str = "", force: bool = True) -> None: node Node to introduce in the scope. prefix - What value to prefix the node name with. If the Var has a predeclared name, it does not get the prefix. + What value to prefix the node name with. If the VarInfo has a predeclared name, it does not get the prefix. force Whether to attempt to overwrite existing names (possibly raising a ScopeError if they were different). By default, this is set to True to be more strict, so we see if the scoping algorithm failed to only @@ -215,7 +217,7 @@ def update(self, node: Node, prefix: str = "", force: bool = True) -> None: """ if force or node not in self.node: self.node[node] = self.node.enum(prefix + node.op_type.identifier) - for field, arr in node.outputs.get_vars().items(): + for field, arr in node.outputs.get_var_infos().items(): if arr._name is None: base = f"{self.node[node]}_{field}" name = self.var.maybe_enum(base) diff --git a/src/spox/_standard.py b/src/spox/_standard.py index 2fe8d56f..a9c33b61 100644 --- a/src/spox/_standard.py +++ b/src/spox/_standard.py @@ -5,6 +5,7 @@ from __future__ import annotations +from collections.abc import Iterable from typing import TYPE_CHECKING, Callable import numpy as np @@ -21,11 +22,11 @@ from ._shape import SimpleShape from ._type_system import Optional, Sequence, Tensor, Type from ._utils import from_array -from ._value_prop import PropValueType -from ._var import Var +from ._value_prop import PropDict, PropValue, PropValueType if TYPE_CHECKING: from ._graph import Graph + from ._var import _VarInfo class StandardNode(Node): @@ -53,7 +54,11 @@ def min_output(self) -> int: return self.schema.min_output def to_singleton_onnx_model( - self, *, dummy_outputs: bool = True, with_dummy_subgraphs: bool = True + self, + *, + dummy_outputs: bool = True, + with_dummy_subgraphs: bool = True, + input_prop_values: PropDict, ) -> tuple[onnx.ModelProto, Scope]: """ Build a singleton model consisting of just this StandardNode. Used for type inference. @@ -63,10 +68,10 @@ def to_singleton_onnx_model( # Prepare names for the values in scope of the node scope = Scope() scope.node[self] = "_this_" - for key, var in self.inputs.get_vars().items(): + for key, var in self.inputs.get_var_infos().items(): if var not in scope.var: scope.var[var] = key - for key, var in self.outputs.get_vars().items(): + for key, var in self.outputs.get_var_infos().items(): if var not in scope.var: scope.var[var] = key # We inject the evaluated attribute values here and then substitute back @@ -88,25 +93,48 @@ def to_singleton_onnx_model( # Input types input_info = [ var.unwrap_type()._to_onnx_value_info(key) - for key, var in self.inputs.get_vars().items() + for key, var in self.inputs.get_var_infos().items() ] # Output types with placeholder empty TypeProto (or actual type if not using dummies) - def out_value_info(curr_key: str, curr_var: Var) -> onnx.ValueInfoProto: - if dummy_outputs or curr_var.type is None or not curr_var.type._is_concrete: + def out_value_info( + curr_key: str, curr_var_info: _VarInfo + ) -> onnx.ValueInfoProto: + if ( + dummy_outputs + or curr_var_info.type is None + or not curr_var_info.type._is_concrete + ): return onnx.helper.make_value_info(curr_key, onnx.TypeProto()) - return curr_var.unwrap_type()._to_onnx_value_info(curr_key) + return curr_var_info.unwrap_type()._to_onnx_value_info(curr_key) output_info = [ - out_value_info(key, var) for key, var in self.outputs.get_vars().items() + out_value_info(key, var) + for key, var in self.outputs.get_var_infos().items() ] # Initializers, passed in to allow partial data propagation # - used so that operators like Reshape are aware of constant shapes - initializers = [ - from_array(var._value.value, key) - for key, var in self.inputs.get_vars().items() - if var._value and isinstance(var._value.value, np.ndarray) - ] + + initializers = [] + + for name, prop in input_prop_values.items(): + if prop is None: + continue + elif not isinstance(prop, PropValue) or prop.value is None: + continue + elif isinstance(prop.type, Sequence): + assert isinstance(prop.value, Iterable) + initializers.extend( + [ + from_array(elem.value, f"{name}_{i}") + for i, elem in enumerate(prop.value) + if elem is not None + ] + ) + else: + assert isinstance(prop.value, np.ndarray) + initializers.append(from_array(prop.value, name)) + # Graph and model graph = onnx.helper.make_graph( [node_proto], @@ -126,13 +154,13 @@ def out_value_info(curr_key: str, curr_var: Var) -> onnx.ValueInfoProto: ) return model, scope - def infer_output_types_onnx(self) -> dict[str, Type]: + def infer_output_types_onnx(self, input_prop_values: PropDict) -> dict[str, Type]: """Execute type & shape inference with ``onnx.shape_inference.infer_node_outputs``.""" # Check that all (specified) inputs have known types, as otherwise we fail - if any(var.type is None for var in self.inputs.get_vars().values()): + if any(var.type is None for var in self.inputs.get_var_infos().values()): return {} - model, _ = self.to_singleton_onnx_model() + model, _ = self.to_singleton_onnx_model(input_prop_values=input_prop_values) # Attempt to do shape inference - if an error is caught, we extend the traceback a bit try: @@ -156,26 +184,30 @@ def infer_output_types_onnx(self) -> dict[str, Type]: for key, type_ in results.items() } - def propagate_values_onnx(self) -> dict[str, PropValueType]: + def propagate_values_onnx( + self, input_prop_values: PropDict + ) -> dict[str, PropValueType]: """Perform value propagation by evaluating singleton model. The backend used for the propagation can be configured with the `spox._standard.ValuePropBackend` variable. """ # Cannot do propagation when some inputs were not propagated/inferred if any( - var.type is None or var._value is None - for var in self.inputs.get_vars().values() + var_info.type is None or input_prop_values.get(name, None) is None + for name, var_info in self.inputs.get_var_infos().items() ): return {} if next(iter(self.subgraphs), None) is not None: # Cannot do propagation with subgraphs implicitly for performance - should be reimplemented return {} - model, scope = self.to_singleton_onnx_model(with_dummy_subgraphs=False) + model, scope = self.to_singleton_onnx_model( + with_dummy_subgraphs=False, input_prop_values=input_prop_values + ) wrap_feed, run, unwrap_feed = _value_prop.get_backend_calls() input_feed = { - scope.var[var]: wrap_feed(var._value) - for var in self.inputs.get_vars().values() - if var._value + scope.var[var_info]: wrap_feed(input_prop_values[name]) + for name, var_info in self.inputs.get_var_infos().items() + if input_prop_values[name] } output_feed = run(model, input_feed) @@ -188,12 +220,12 @@ def propagate_values_onnx(self) -> dict[str, PropValueType]: } return {k: v for k, v in results.items() if k is not None} - def infer_output_types(self) -> dict[str, Type]: - return self.infer_output_types_onnx() + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: + return self.infer_output_types_onnx(input_prop_values) - def propagate_values(self) -> dict[str, PropValueType]: + def propagate_values(self, input_prop_values: PropDict) -> dict[str, PropValueType]: if _value_prop._VALUE_PROP_BACKEND != _value_prop.ValuePropBackend.NONE: - return self.propagate_values_onnx() + return self.propagate_values_onnx(input_prop_values) return {} diff --git a/src/spox/_value_prop.py b/src/spox/_value_prop.py index 11d4017f..aaf0f568 100644 --- a/src/spox/_value_prop.py +++ b/src/spox/_value_prop.py @@ -25,6 +25,7 @@ - None -> Optional, Nothing (no value) """ PropValueType = Union[np.ndarray, list["PropValue"], "PropValue", None] +PropDict = dict[str, PropValueType] ORTValue = Union[np.ndarray, list, None] RefValue = Union[np.ndarray, list, float, None] @@ -42,7 +43,7 @@ class ValuePropBackend(enum.Enum): @dataclass(frozen=True) class PropValue: - """Propagated value given to a Var, which has a run-time value known at compile-time. + """Propagated value given to a VarInfo, which has a run-time value known at compile-time. Wrapper for a few Python types which are used to represent values of ONNX types. diff --git a/src/spox/_var.py b/src/spox/_var.py index b557a896..d97e447d 100644 --- a/src/spox/_var.py +++ b/src/spox/_var.py @@ -4,7 +4,8 @@ from __future__ import annotations import typing -from typing import Any, Callable, ClassVar, Optional, TypeVar, Union +from collections.abc import Iterable, Sequence +from typing import Any, Callable, ClassVar, Optional, TypeVar, Union, overload import numpy as np @@ -23,6 +24,93 @@ def _not_impl(self, *args: Any) -> Var: add = sub = mul = truediv = floordiv = neg = and_ = or_ = xor = not_ = _not_impl +class _VarInfo: + """ + Internal information about a ``Var``. Should be mainly inaccessible for most uses of ``spox``. + + ``VarInfo`` should be treated as strictly immutable. + If a ``VarInfo`` or any of its fields are modified, the behaviour is undefined and the produced graph may be invalid. + """ + + type: Optional[_type_system.Type] + _op: Node + _name: Optional[str] + + def __init__( + self, + op: Node, + type_: Optional[_type_system.Type], + ): + """The initializer of ``VarInfo`` is protected. Use operator constructors to construct them instead.""" + if type_ is not None and not isinstance(type_, _type_system.Type): + raise TypeError("The type field of a VarInfo must be a Spox Type.") + + self.type = type_ + self._op = op + self._name = None + + def _rename(self, name: Optional[str]) -> None: + """Mutates the internal state of the VarInfo, overriding its name as given.""" + self._name = name + + @property + def _which_output(self) -> Optional[str]: + """Return the name of the output field that this var is stored in under ``self._op``.""" + if self._op is None: + return None + op_outs = self._op.outputs.get_var_infos() + candidates = [key for key, var in op_outs.items() if var is self] + return candidates[0] if candidates else None + + def __repr__(self) -> str: + nm = repr(self._name) + " " if self._name is not None else "" + op_repr = self._op.get_op_repr() if self._op else "??" + which = self._which_output + is_unary = len(self._op.outputs) <= 1 if self._op else True + which_repr = "->??" if which is None else (f"->{which}" if is_unary else "") + return f"" + + def unwrap_type(self) -> _type_system.Type: + """ + Return the :class:`~spox.Type` of ``self``, unless it is unknown. + + Returns + ------- + _type_system.Type + The type of the VarInfo. + + Raises + ------ + TypeError + If ``type is None`` (the type of this ``VarInfo`` is unknown). + """ + if self.type is None: + raise TypeError( + "Cannot unwrap requested type for VarInfo, as it is unknown." + ) + return self.type + + def unwrap_tensor(self) -> _type_system.Tensor: + """Equivalent to ``self.unwrap_type().unwrap_tensor()``.""" + return self.unwrap_type().unwrap_tensor() + + def unwrap_sequence(self) -> _type_system.Sequence: + """Equivalent to ``self.unwrap_type().unwrap_sequence()``.""" + return self.unwrap_type().unwrap_sequence() + + def unwrap_optional(self) -> _type_system.Optional: + """Equivalent to ``self.unwrap_type().unwrap_optional()``.""" + return self.unwrap_type().unwrap_optional() + + def __copy__(self) -> _VarInfo: + # Simply return `self` to ensure that "copies" are still equal + # during the build process + return self + + def __deepcopy__(self, _: Any) -> _VarInfo: + raise ValueError("'VarInfo' objects cannot be deepcopied.") + + class Var: """ Abstraction for a single ONNX value - like a tensor - that can be passed around in Python code. @@ -30,6 +118,8 @@ class Var: A ``Var`` represents some output of an operator. This operator is stored internally to allow reproducing the graph. + The ``VarInfo`` class holds all relevant information about a ``Var`` - like the ``type``. + The ``type`` field is inferred and checked by operators. It may be ``None`` if type inference failed, in which case it is unknown and should pass all type checks. However, untyped ``Var`` objects may not be used in some contexts. @@ -49,46 +139,26 @@ class Var: Should not be constructed directly - the main source of ``Var`` objects are operator constructors. """ - type: Optional[_type_system.Type] + _var_info: _VarInfo _value: Optional[_value_prop.PropValue] - _op: Node - _name: Optional[str] _operator_dispatcher: ClassVar[Any] = NotImplementedOperatorDispatcher() def __init__( self, - op: Node, - type_: Optional[_type_system.Type], + var_info: _VarInfo, value: Optional[_value_prop.PropValue] = None, ): """The initializer of ``Var`` is protected. Use operator constructors to construct them instead.""" - if type_ is not None and not isinstance(type_, _type_system.Type): - raise TypeError("The type field of a Var must be a Spox Type.") if value is not None and not isinstance(value, _value_prop.PropValue): raise TypeError("The propagated value field of a Var must be a PropValue.") - if value is not None and value.type != type_: + if value is not None and value.type != var_info.type: raise ValueError( - f"The propagated value type ({value.type}) and actual Var type ({type_}) must be the same." + f"The propagated value type ({value.type}) and actual Var type ({var_info.type}) must be the same." ) - self.type = type_ + self._var_info = var_info self._value = value - self._op = op - self._name = None - - def _rename(self, name: Optional[str]) -> None: - """Mutates the internal state of the Var, overriding its name as given.""" - self._name = name - - @property - def _which_output(self) -> Optional[str]: - """Return the name of the output field that this var is stored in under ``self._op``.""" - if self._op is None: - return None - op_outs = self._op.outputs.get_vars() - candidates = [key for key, var in op_outs.items() if var is self] - return candidates[0] if candidates else None def _get_value(self) -> _value_prop.ORTValue: """Get the propagated value in this Var and convert it to the ORT format. Raises if value is missing.""" @@ -137,6 +207,25 @@ def unwrap_optional(self) -> _type_system.Optional: """Equivalent to ``self.unwrap_type().unwrap_optional()``.""" return self.unwrap_type().unwrap_optional() + @property + def _op(self) -> Node: + return self._var_info._op + + @property + def _name(self) -> Optional[str]: + return self._var_info._name + + def _rename(self, name: Optional[str]) -> None: + self._var_info._rename(name) + + @property + def _which_output(self) -> Optional[str]: + return self._var_info._which_output + + @property + def type(self) -> Optional[_type_system.Type]: + return self._var_info.type + def __copy__(self) -> Var: # Simply return `self` to ensure that "copies" are still equal # during the build process @@ -145,70 +234,144 @@ def __copy__(self) -> Var: def __deepcopy__(self, _: Any) -> Var: raise ValueError("'Var' objects cannot be deepcopied.") - def __add__(self, other) -> Var: # type: ignore + def __add__(self, other: Var) -> Var: return Var._operator_dispatcher.add(self, other) - def __sub__(self, other) -> Var: # type: ignore + def __sub__(self, other: Var) -> Var: return Var._operator_dispatcher.sub(self, other) - def __mul__(self, other) -> Var: # type: ignore + def __mul__(self, other: Var) -> Var: return Var._operator_dispatcher.mul(self, other) - def __truediv__(self, other) -> Var: # type: ignore + def __truediv__(self, other: Var) -> Var: return Var._operator_dispatcher.truediv(self, other) - def __floordiv__(self, other) -> Var: # type: ignore + def __floordiv__(self, other: Var) -> Var: return Var._operator_dispatcher.floordiv(self, other) def __neg__(self) -> Var: return Var._operator_dispatcher.neg(self) - def __and__(self, other) -> Var: # type: ignore + def __and__(self, other: Var) -> Var: return Var._operator_dispatcher.and_(self, other) - def __or__(self, other) -> Var: # type: ignore + def __or__(self, other: Var) -> Var: return Var._operator_dispatcher.or_(self, other) - def __xor__(self, other) -> Var: # type: ignore + def __xor__(self, other: Var) -> Var: return Var._operator_dispatcher.xor(self, other) def __invert__(self) -> Var: return Var._operator_dispatcher.not_(self) - def __radd__(self, other) -> Var: # type: ignore + def __radd__(self, other: Var) -> Var: return Var._operator_dispatcher.add(other, self) - def __rsub__(self, other) -> Var: # type: ignore + def __rsub__(self, other: Var) -> Var: return Var._operator_dispatcher.sub(other, self) - def __rmul__(self, other) -> Var: # type: ignore + def __rmul__(self, other: Var) -> Var: return Var._operator_dispatcher.mul(other, self) - def __rtruediv__(self, other) -> Var: # type: ignore + def __rtruediv__(self, other: Var) -> Var: return Var._operator_dispatcher.truediv(other, self) - def __rfloordiv__(self, other) -> Var: # type: ignore + def __rfloordiv__(self, other: Var) -> Var: return Var._operator_dispatcher.floordiv(other, self) - def __rand__(self, other) -> Var: # type: ignore + def __rand__(self, other: Var) -> Var: return Var._operator_dispatcher.and_(other, self) - def __ror__(self, other) -> Var: # type: ignore + def __ror__(self, other: Var) -> Var: return Var._operator_dispatcher.or_(other, self) - def __rxor__(self, other) -> Var: # type: ignore + def __rxor__(self, other: Var) -> Var: return Var._operator_dispatcher.xor(other, self) +# we want unwrap to be type aware +T = TypeVar("T") + + +@overload +def wrap_vars(var_info: _VarInfo) -> Var: ... + + +@overload +def wrap_vars(var_info: Optional[_VarInfo]) -> Optional[Var]: ... + + +@overload +def wrap_vars(var_info: dict[T, _VarInfo]) -> dict[T, Var]: ... # type: ignore[overload-overlap] + + +@overload +def wrap_vars(var_info: Iterable[_VarInfo]) -> list[Var]: ... + + +def wrap_vars(var_info): # type: ignore + if var_info is None: + return None + elif isinstance(var_info, _VarInfo): + return Var(var_info) + elif isinstance(var_info, dict): + return {k: wrap_vars(v) for k, v in var_info.items()} + elif isinstance(var_info, (Iterable)): + return [wrap_vars(v) for v in var_info] + else: + raise ValueError("Unsupported type for wrap_vars") + + +@overload +def unwrap_vars(var: Var) -> _VarInfo: ... + + +@overload +def unwrap_vars(var: Optional[Var]) -> Optional[_VarInfo]: ... + + +@overload +def unwrap_vars(var: dict[T, Var]) -> dict[T, _VarInfo]: ... # type: ignore[overload-overlap] + + +@overload +def unwrap_vars(var: Iterable[Var]) -> list[_VarInfo]: ... + + +def unwrap_vars(var): # type: ignore + if var is None: + return None + elif isinstance(var, Var): + return var._var_info + elif isinstance(var, dict): + return {k: unwrap_vars(v) for k, v in var.items()} + elif isinstance(var, Iterable): + return [unwrap_vars(v) for v in var] + else: + raise ValueError("Unsupported type for unwrap_vars") + + def result_type( - *types: Union[Var, np.generic, int, float], + *types: Union[_VarInfo, np.generic, int, float], ) -> type[np.generic]: """Promote type for all given element types/values using ``np.result_type``.""" return np.dtype( np.result_type( *( - typ.unwrap_tensor().dtype if isinstance(typ, Var) else typ + typ.unwrap_tensor().dtype + if isinstance(typ, Var) or isinstance(typ, _VarInfo) + else typ for typ in types ) ) ).type + + +def create_prop_dict( + **kwargs: Union[Var, Sequence[Var], Optional[Var]], +) -> _value_prop.PropDict: + from ._fields import BaseVars + + flattened_vars = BaseVars(kwargs).flatten_vars() + + return {key: var._value for key, var in flattened_vars.items() if var is not None} diff --git a/src/spox/opset/ai/onnx/ml/v3.py b/src/spox/opset/ai/onnx/ml/v3.py index a1ef0ba5..ce292aa9 100644 --- a/src/spox/opset/ai/onnx/ml/v3.py +++ b/src/spox/opset/ai/onnx/ml/v3.py @@ -23,7 +23,8 @@ from spox._node import OpType from spox._standard import InferenceError, StandardNode from spox._type_system import Tensor, Type -from spox._var import Var +from spox._value_prop import PropDict +from spox._var import Var, _VarInfo, create_prop_dict, unwrap_vars class _ArrayFeatureExtractor(StandardNode): @@ -33,14 +34,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - Y: Var + X: _VarInfo + Y: _VarInfo @dataclass class Outputs(BaseOutputs): - Z: Var + Z: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: if not self.inputs.fully_typed: return {} xt, yt = self.inputs.X.unwrap_tensor(), self.inputs.Y.unwrap_tensor() @@ -69,13 +70,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: return {"Y": self.inputs.X.type} if self.inputs.X.type is not None else {} op_type = OpType("Binarizer", "ai.onnx.ml", 1) @@ -94,11 +95,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("CastMap", "ai.onnx.ml", 1) @@ -117,13 +118,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: if not self.inputs.fully_typed: return {} cats1, cats2 = self.attrs.cats_int64s, self.attrs.cats_strings @@ -150,11 +151,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("DictVectorizer", "ai.onnx.ml", 1) @@ -170,11 +171,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Sequence[Var] + X: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("FeatureVectorizer", "ai.onnx.ml", 1) @@ -193,13 +194,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: if not self.inputs.fully_typed: return {} t = self.inputs.X.unwrap_tensor() @@ -256,11 +257,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("LabelEncoder", "ai.onnx.ml", 2) @@ -281,12 +282,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var - Z: Var + Y: _VarInfo + Z: _VarInfo op_type = OpType("LinearClassifier", "ai.onnx.ml", 1) @@ -305,13 +306,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: if not self.inputs.fully_typed: return {} sim = self.inputs.X.unwrap_tensor().shape @@ -339,13 +340,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: if self.attrs.norm.value not in ("MAX", "L1", "L2"): raise InferenceError( f"Unknown normalisation method `{self.attrs.norm.value}`" @@ -368,13 +369,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: if not self.inputs.fully_typed: return {} if self.attrs.cats_int64s: @@ -412,12 +413,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var - Z: Var + Y: _VarInfo + Z: _VarInfo op_type = OpType("SVMClassifier", "ai.onnx.ml", 1) @@ -440,11 +441,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("SVMRegressor", "ai.onnx.ml", 1) @@ -461,13 +462,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: if self.inputs.X.type is None: return {} sc, off = self.attrs.scale, self.attrs.offset @@ -520,14 +521,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var - Z: Var + Y: _VarInfo + Z: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: e = ( len(self.attrs.class_ids.value) if self.attrs.class_ids is not None @@ -585,13 +586,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: if self.inputs.fully_typed: shape = self.inputs.X.unwrap_tensor().shape assert shape is not None # already checked with fully_typed @@ -619,11 +620,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Z: Var + Z: _VarInfo op_type = OpType("ZipMap", "ai.onnx.ml", 1) @@ -662,13 +663,22 @@ def array_feature_extractor( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)`, `tensor(string)` """ - return _ArrayFeatureExtractor( - _ArrayFeatureExtractor.Attributes(), - _ArrayFeatureExtractor.Inputs( - X=X, - Y=Y, - ), - ).outputs.Z + input_prop_values = create_prop_dict( + X=X, + Y=Y, + ) + output_vars = ( + _ArrayFeatureExtractor( + _ArrayFeatureExtractor.Attributes(), + _ArrayFeatureExtractor.Inputs( + X=unwrap_vars(X), + Y=unwrap_vars(Y), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Z + ) + return output_vars # type: ignore def binarizer( @@ -702,14 +712,22 @@ def binarizer( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return _Binarizer( - _Binarizer.Attributes( - threshold=AttrFloat32(threshold, name="threshold"), - ), - _Binarizer.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Binarizer( + _Binarizer.Attributes( + threshold=AttrFloat32(threshold, name="threshold"), + ), + _Binarizer.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def cast_map( @@ -759,16 +777,24 @@ def cast_map( - T1: `map(int64,tensor(float))`, `map(int64,tensor(string))` - T2: `tensor(float)`, `tensor(int64)`, `tensor(string)` """ - return _CastMap( - _CastMap.Attributes( - cast_to=AttrString(cast_to, name="cast_to"), - map_form=AttrString(map_form, name="map_form"), - max_map=AttrInt64(max_map, name="max_map"), - ), - _CastMap.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _CastMap( + _CastMap.Attributes( + cast_to=AttrString(cast_to, name="cast_to"), + map_form=AttrString(map_form, name="map_form"), + max_map=AttrInt64(max_map, name="max_map"), + ), + _CastMap.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def category_mapper( @@ -826,17 +852,25 @@ def category_mapper( - T1: `tensor(int64)`, `tensor(string)` - T2: `tensor(int64)`, `tensor(string)` """ - return _CategoryMapper( - _CategoryMapper.Attributes( - cats_int64s=AttrInt64s.maybe(cats_int64s, name="cats_int64s"), - cats_strings=AttrStrings.maybe(cats_strings, name="cats_strings"), - default_int64=AttrInt64(default_int64, name="default_int64"), - default_string=AttrString(default_string, name="default_string"), - ), - _CategoryMapper.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _CategoryMapper( + _CategoryMapper.Attributes( + cats_int64s=AttrInt64s.maybe(cats_int64s, name="cats_int64s"), + cats_strings=AttrStrings.maybe(cats_strings, name="cats_strings"), + default_int64=AttrInt64(default_int64, name="default_int64"), + default_string=AttrString(default_string, name="default_string"), + ), + _CategoryMapper.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def dict_vectorizer( @@ -889,19 +923,27 @@ def dict_vectorizer( - T1: `map(int64,tensor(double))`, `map(int64,tensor(float))`, `map(int64,tensor(string))`, `map(string,tensor(double))`, `map(string,tensor(float))`, `map(string,tensor(int64))` - T2: `tensor(double)`, `tensor(float)`, `tensor(int64)`, `tensor(string)` """ - return _DictVectorizer( - _DictVectorizer.Attributes( - int64_vocabulary=AttrInt64s.maybe( - int64_vocabulary, name="int64_vocabulary" + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _DictVectorizer( + _DictVectorizer.Attributes( + int64_vocabulary=AttrInt64s.maybe( + int64_vocabulary, name="int64_vocabulary" + ), + string_vocabulary=AttrStrings.maybe( + string_vocabulary, name="string_vocabulary" + ), ), - string_vocabulary=AttrStrings.maybe( - string_vocabulary, name="string_vocabulary" + _DictVectorizer.Inputs( + X=unwrap_vars(X), ), - ), - _DictVectorizer.Inputs( - X=X, - ), - ).outputs.Y + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def feature_vectorizer( @@ -938,14 +980,24 @@ def feature_vectorizer( Type constraints: - T1: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return _FeatureVectorizer( - _FeatureVectorizer.Attributes( - inputdimensions=AttrInt64s.maybe(inputdimensions, name="inputdimensions"), - ), - _FeatureVectorizer.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _FeatureVectorizer( + _FeatureVectorizer.Attributes( + inputdimensions=AttrInt64s.maybe( + inputdimensions, name="inputdimensions" + ), + ), + _FeatureVectorizer.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def imputer( @@ -1003,25 +1055,33 @@ def imputer( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return _Imputer( - _Imputer.Attributes( - imputed_value_floats=AttrFloat32s.maybe( - imputed_value_floats, name="imputed_value_floats" + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Imputer( + _Imputer.Attributes( + imputed_value_floats=AttrFloat32s.maybe( + imputed_value_floats, name="imputed_value_floats" + ), + imputed_value_int64s=AttrInt64s.maybe( + imputed_value_int64s, name="imputed_value_int64s" + ), + replaced_value_float=AttrFloat32( + replaced_value_float, name="replaced_value_float" + ), + replaced_value_int64=AttrInt64( + replaced_value_int64, name="replaced_value_int64" + ), ), - imputed_value_int64s=AttrInt64s.maybe( - imputed_value_int64s, name="imputed_value_int64s" + _Imputer.Inputs( + X=unwrap_vars(X), ), - replaced_value_float=AttrFloat32( - replaced_value_float, name="replaced_value_float" - ), - replaced_value_int64=AttrInt64( - replaced_value_int64, name="replaced_value_int64" - ), - ), - _Imputer.Inputs( - X=X, - ), - ).outputs.Y + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def label_encoder( @@ -1104,22 +1164,30 @@ def label_encoder( - T1: `tensor(float)`, `tensor(int64)`, `tensor(string)` - T2: `tensor(float)`, `tensor(int64)`, `tensor(string)` """ - return _LabelEncoder( - _LabelEncoder.Attributes( - default_float=AttrFloat32(default_float, name="default_float"), - default_int64=AttrInt64(default_int64, name="default_int64"), - default_string=AttrString(default_string, name="default_string"), - keys_floats=AttrFloat32s.maybe(keys_floats, name="keys_floats"), - keys_int64s=AttrInt64s.maybe(keys_int64s, name="keys_int64s"), - keys_strings=AttrStrings.maybe(keys_strings, name="keys_strings"), - values_floats=AttrFloat32s.maybe(values_floats, name="values_floats"), - values_int64s=AttrInt64s.maybe(values_int64s, name="values_int64s"), - values_strings=AttrStrings.maybe(values_strings, name="values_strings"), - ), - _LabelEncoder.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _LabelEncoder( + _LabelEncoder.Attributes( + default_float=AttrFloat32(default_float, name="default_float"), + default_int64=AttrInt64(default_int64, name="default_int64"), + default_string=AttrString(default_string, name="default_string"), + keys_floats=AttrFloat32s.maybe(keys_floats, name="keys_floats"), + keys_int64s=AttrInt64s.maybe(keys_int64s, name="keys_int64s"), + keys_strings=AttrStrings.maybe(keys_strings, name="keys_strings"), + values_floats=AttrFloat32s.maybe(values_floats, name="values_floats"), + values_int64s=AttrInt64s.maybe(values_int64s, name="values_int64s"), + values_strings=AttrStrings.maybe(values_strings, name="values_strings"), + ), + _LabelEncoder.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def linear_classifier( @@ -1179,23 +1247,31 @@ def linear_classifier( - T1: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` - T2: `tensor(int64)`, `tensor(string)` """ - return _LinearClassifier( - _LinearClassifier.Attributes( - classlabels_ints=AttrInt64s.maybe( - classlabels_ints, name="classlabels_ints" + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _LinearClassifier( + _LinearClassifier.Attributes( + classlabels_ints=AttrInt64s.maybe( + classlabels_ints, name="classlabels_ints" + ), + classlabels_strings=AttrStrings.maybe( + classlabels_strings, name="classlabels_strings" + ), + coefficients=AttrFloat32s(coefficients, name="coefficients"), + intercepts=AttrFloat32s.maybe(intercepts, name="intercepts"), + multi_class=AttrInt64(multi_class, name="multi_class"), + post_transform=AttrString(post_transform, name="post_transform"), ), - classlabels_strings=AttrStrings.maybe( - classlabels_strings, name="classlabels_strings" + _LinearClassifier.Inputs( + X=unwrap_vars(X), ), - coefficients=AttrFloat32s(coefficients, name="coefficients"), - intercepts=AttrFloat32s.maybe(intercepts, name="intercepts"), - multi_class=AttrInt64(multi_class, name="multi_class"), - post_transform=AttrString(post_transform, name="post_transform"), - ), - _LinearClassifier.Inputs( - X=X, - ), - ).outputs._unpack_to_any() + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) + return output_vars # type: ignore def linear_regressor( @@ -1247,17 +1323,25 @@ def linear_regressor( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return _LinearRegressor( - _LinearRegressor.Attributes( - coefficients=AttrFloat32s.maybe(coefficients, name="coefficients"), - intercepts=AttrFloat32s.maybe(intercepts, name="intercepts"), - post_transform=AttrString(post_transform, name="post_transform"), - targets=AttrInt64(targets, name="targets"), - ), - _LinearRegressor.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _LinearRegressor( + _LinearRegressor.Attributes( + coefficients=AttrFloat32s.maybe(coefficients, name="coefficients"), + intercepts=AttrFloat32s.maybe(intercepts, name="intercepts"), + post_transform=AttrString(post_transform, name="post_transform"), + targets=AttrInt64(targets, name="targets"), + ), + _LinearRegressor.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def normalizer( @@ -1296,14 +1380,22 @@ def normalizer( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return _Normalizer( - _Normalizer.Attributes( - norm=AttrString(norm, name="norm"), - ), - _Normalizer.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Normalizer( + _Normalizer.Attributes( + norm=AttrString(norm, name="norm"), + ), + _Normalizer.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def one_hot_encoder( @@ -1355,16 +1447,24 @@ def one_hot_encoder( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)`, `tensor(string)` """ - return _OneHotEncoder( - _OneHotEncoder.Attributes( - cats_int64s=AttrInt64s.maybe(cats_int64s, name="cats_int64s"), - cats_strings=AttrStrings.maybe(cats_strings, name="cats_strings"), - zeros=AttrInt64(zeros, name="zeros"), - ), - _OneHotEncoder.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _OneHotEncoder( + _OneHotEncoder.Attributes( + cats_int64s=AttrInt64s.maybe(cats_int64s, name="cats_int64s"), + cats_strings=AttrStrings.maybe(cats_strings, name="cats_strings"), + zeros=AttrInt64(zeros, name="zeros"), + ), + _OneHotEncoder.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def svmclassifier( @@ -1449,30 +1549,40 @@ def svmclassifier( - T1: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` - T2: `tensor(int64)`, `tensor(string)` """ - return _SVMClassifier( - _SVMClassifier.Attributes( - classlabels_ints=AttrInt64s.maybe( - classlabels_ints, name="classlabels_ints" + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _SVMClassifier( + _SVMClassifier.Attributes( + classlabels_ints=AttrInt64s.maybe( + classlabels_ints, name="classlabels_ints" + ), + classlabels_strings=AttrStrings.maybe( + classlabels_strings, name="classlabels_strings" + ), + coefficients=AttrFloat32s.maybe(coefficients, name="coefficients"), + kernel_params=AttrFloat32s.maybe(kernel_params, name="kernel_params"), + kernel_type=AttrString(kernel_type, name="kernel_type"), + post_transform=AttrString(post_transform, name="post_transform"), + prob_a=AttrFloat32s.maybe(prob_a, name="prob_a"), + prob_b=AttrFloat32s.maybe(prob_b, name="prob_b"), + rho=AttrFloat32s.maybe(rho, name="rho"), + support_vectors=AttrFloat32s.maybe( + support_vectors, name="support_vectors" + ), + vectors_per_class=AttrInt64s.maybe( + vectors_per_class, name="vectors_per_class" + ), ), - classlabels_strings=AttrStrings.maybe( - classlabels_strings, name="classlabels_strings" + _SVMClassifier.Inputs( + X=unwrap_vars(X), ), - coefficients=AttrFloat32s.maybe(coefficients, name="coefficients"), - kernel_params=AttrFloat32s.maybe(kernel_params, name="kernel_params"), - kernel_type=AttrString(kernel_type, name="kernel_type"), - post_transform=AttrString(post_transform, name="post_transform"), - prob_a=AttrFloat32s.maybe(prob_a, name="prob_a"), - prob_b=AttrFloat32s.maybe(prob_b, name="prob_b"), - rho=AttrFloat32s.maybe(rho, name="rho"), - support_vectors=AttrFloat32s.maybe(support_vectors, name="support_vectors"), - vectors_per_class=AttrInt64s.maybe( - vectors_per_class, name="vectors_per_class" - ), - ), - _SVMClassifier.Inputs( - X=X, - ), - ).outputs._unpack_to_any() + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) + return output_vars # type: ignore def svmregressor( @@ -1536,21 +1646,31 @@ def svmregressor( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return _SVMRegressor( - _SVMRegressor.Attributes( - coefficients=AttrFloat32s.maybe(coefficients, name="coefficients"), - kernel_params=AttrFloat32s.maybe(kernel_params, name="kernel_params"), - kernel_type=AttrString(kernel_type, name="kernel_type"), - n_supports=AttrInt64(n_supports, name="n_supports"), - one_class=AttrInt64(one_class, name="one_class"), - post_transform=AttrString(post_transform, name="post_transform"), - rho=AttrFloat32s.maybe(rho, name="rho"), - support_vectors=AttrFloat32s.maybe(support_vectors, name="support_vectors"), - ), - _SVMRegressor.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _SVMRegressor( + _SVMRegressor.Attributes( + coefficients=AttrFloat32s.maybe(coefficients, name="coefficients"), + kernel_params=AttrFloat32s.maybe(kernel_params, name="kernel_params"), + kernel_type=AttrString(kernel_type, name="kernel_type"), + n_supports=AttrInt64(n_supports, name="n_supports"), + one_class=AttrInt64(one_class, name="one_class"), + post_transform=AttrString(post_transform, name="post_transform"), + rho=AttrFloat32s.maybe(rho, name="rho"), + support_vectors=AttrFloat32s.maybe( + support_vectors, name="support_vectors" + ), + ), + _SVMRegressor.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def scaler( @@ -1592,15 +1712,23 @@ def scaler( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return _Scaler( - _Scaler.Attributes( - offset=AttrFloat32s.maybe(offset, name="offset"), - scale=AttrFloat32s.maybe(scale, name="scale"), - ), - _Scaler.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Scaler( + _Scaler.Attributes( + offset=AttrFloat32s.maybe(offset, name="offset"), + scale=AttrFloat32s.maybe(scale, name="scale"), + ), + _Scaler.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def tree_ensemble_classifier( @@ -1737,54 +1865,65 @@ def tree_ensemble_classifier( - T1: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` - T2: `tensor(int64)`, `tensor(string)` """ - return _TreeEnsembleClassifier( - _TreeEnsembleClassifier.Attributes( - base_values=AttrFloat32s.maybe(base_values, name="base_values"), - base_values_as_tensor=AttrTensor.maybe( - base_values_as_tensor, name="base_values_as_tensor" - ), - class_ids=AttrInt64s.maybe(class_ids, name="class_ids"), - class_nodeids=AttrInt64s.maybe(class_nodeids, name="class_nodeids"), - class_treeids=AttrInt64s.maybe(class_treeids, name="class_treeids"), - class_weights=AttrFloat32s.maybe(class_weights, name="class_weights"), - class_weights_as_tensor=AttrTensor.maybe( - class_weights_as_tensor, name="class_weights_as_tensor" - ), - classlabels_int64s=AttrInt64s.maybe( - classlabels_int64s, name="classlabels_int64s" - ), - classlabels_strings=AttrStrings.maybe( - classlabels_strings, name="classlabels_strings" - ), - nodes_falsenodeids=AttrInt64s.maybe( - nodes_falsenodeids, name="nodes_falsenodeids" + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _TreeEnsembleClassifier( + _TreeEnsembleClassifier.Attributes( + base_values=AttrFloat32s.maybe(base_values, name="base_values"), + base_values_as_tensor=AttrTensor.maybe( + base_values_as_tensor, name="base_values_as_tensor" + ), + class_ids=AttrInt64s.maybe(class_ids, name="class_ids"), + class_nodeids=AttrInt64s.maybe(class_nodeids, name="class_nodeids"), + class_treeids=AttrInt64s.maybe(class_treeids, name="class_treeids"), + class_weights=AttrFloat32s.maybe(class_weights, name="class_weights"), + class_weights_as_tensor=AttrTensor.maybe( + class_weights_as_tensor, name="class_weights_as_tensor" + ), + classlabels_int64s=AttrInt64s.maybe( + classlabels_int64s, name="classlabels_int64s" + ), + classlabels_strings=AttrStrings.maybe( + classlabels_strings, name="classlabels_strings" + ), + nodes_falsenodeids=AttrInt64s.maybe( + nodes_falsenodeids, name="nodes_falsenodeids" + ), + nodes_featureids=AttrInt64s.maybe( + nodes_featureids, name="nodes_featureids" + ), + nodes_hitrates=AttrFloat32s.maybe( + nodes_hitrates, name="nodes_hitrates" + ), + nodes_hitrates_as_tensor=AttrTensor.maybe( + nodes_hitrates_as_tensor, name="nodes_hitrates_as_tensor" + ), + nodes_missing_value_tracks_true=AttrInt64s.maybe( + nodes_missing_value_tracks_true, + name="nodes_missing_value_tracks_true", + ), + nodes_modes=AttrStrings.maybe(nodes_modes, name="nodes_modes"), + nodes_nodeids=AttrInt64s.maybe(nodes_nodeids, name="nodes_nodeids"), + nodes_treeids=AttrInt64s.maybe(nodes_treeids, name="nodes_treeids"), + nodes_truenodeids=AttrInt64s.maybe( + nodes_truenodeids, name="nodes_truenodeids" + ), + nodes_values=AttrFloat32s.maybe(nodes_values, name="nodes_values"), + nodes_values_as_tensor=AttrTensor.maybe( + nodes_values_as_tensor, name="nodes_values_as_tensor" + ), + post_transform=AttrString(post_transform, name="post_transform"), ), - nodes_featureids=AttrInt64s.maybe( - nodes_featureids, name="nodes_featureids" + _TreeEnsembleClassifier.Inputs( + X=unwrap_vars(X), ), - nodes_hitrates=AttrFloat32s.maybe(nodes_hitrates, name="nodes_hitrates"), - nodes_hitrates_as_tensor=AttrTensor.maybe( - nodes_hitrates_as_tensor, name="nodes_hitrates_as_tensor" - ), - nodes_missing_value_tracks_true=AttrInt64s.maybe( - nodes_missing_value_tracks_true, name="nodes_missing_value_tracks_true" - ), - nodes_modes=AttrStrings.maybe(nodes_modes, name="nodes_modes"), - nodes_nodeids=AttrInt64s.maybe(nodes_nodeids, name="nodes_nodeids"), - nodes_treeids=AttrInt64s.maybe(nodes_treeids, name="nodes_treeids"), - nodes_truenodeids=AttrInt64s.maybe( - nodes_truenodeids, name="nodes_truenodeids" - ), - nodes_values=AttrFloat32s.maybe(nodes_values, name="nodes_values"), - nodes_values_as_tensor=AttrTensor.maybe( - nodes_values_as_tensor, name="nodes_values_as_tensor" - ), - post_transform=AttrString(post_transform, name="post_transform"), - ), - _TreeEnsembleClassifier.Inputs( - X=X, - ), - ).outputs._unpack_to_any() + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) + return output_vars # type: ignore def tree_ensemble_regressor( @@ -1919,52 +2058,65 @@ def tree_ensemble_regressor( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int32)`, `tensor(int64)` """ - return _TreeEnsembleRegressor( - _TreeEnsembleRegressor.Attributes( - aggregate_function=AttrString( - aggregate_function, name="aggregate_function" - ), - base_values=AttrFloat32s.maybe(base_values, name="base_values"), - base_values_as_tensor=AttrTensor.maybe( - base_values_as_tensor, name="base_values_as_tensor" - ), - n_targets=AttrInt64.maybe(n_targets, name="n_targets"), - nodes_falsenodeids=AttrInt64s.maybe( - nodes_falsenodeids, name="nodes_falsenodeids" + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _TreeEnsembleRegressor( + _TreeEnsembleRegressor.Attributes( + aggregate_function=AttrString( + aggregate_function, name="aggregate_function" + ), + base_values=AttrFloat32s.maybe(base_values, name="base_values"), + base_values_as_tensor=AttrTensor.maybe( + base_values_as_tensor, name="base_values_as_tensor" + ), + n_targets=AttrInt64.maybe(n_targets, name="n_targets"), + nodes_falsenodeids=AttrInt64s.maybe( + nodes_falsenodeids, name="nodes_falsenodeids" + ), + nodes_featureids=AttrInt64s.maybe( + nodes_featureids, name="nodes_featureids" + ), + nodes_hitrates=AttrFloat32s.maybe( + nodes_hitrates, name="nodes_hitrates" + ), + nodes_hitrates_as_tensor=AttrTensor.maybe( + nodes_hitrates_as_tensor, name="nodes_hitrates_as_tensor" + ), + nodes_missing_value_tracks_true=AttrInt64s.maybe( + nodes_missing_value_tracks_true, + name="nodes_missing_value_tracks_true", + ), + nodes_modes=AttrStrings.maybe(nodes_modes, name="nodes_modes"), + nodes_nodeids=AttrInt64s.maybe(nodes_nodeids, name="nodes_nodeids"), + nodes_treeids=AttrInt64s.maybe(nodes_treeids, name="nodes_treeids"), + nodes_truenodeids=AttrInt64s.maybe( + nodes_truenodeids, name="nodes_truenodeids" + ), + nodes_values=AttrFloat32s.maybe(nodes_values, name="nodes_values"), + nodes_values_as_tensor=AttrTensor.maybe( + nodes_values_as_tensor, name="nodes_values_as_tensor" + ), + post_transform=AttrString(post_transform, name="post_transform"), + target_ids=AttrInt64s.maybe(target_ids, name="target_ids"), + target_nodeids=AttrInt64s.maybe(target_nodeids, name="target_nodeids"), + target_treeids=AttrInt64s.maybe(target_treeids, name="target_treeids"), + target_weights=AttrFloat32s.maybe( + target_weights, name="target_weights" + ), + target_weights_as_tensor=AttrTensor.maybe( + target_weights_as_tensor, name="target_weights_as_tensor" + ), ), - nodes_featureids=AttrInt64s.maybe( - nodes_featureids, name="nodes_featureids" + _TreeEnsembleRegressor.Inputs( + X=unwrap_vars(X), ), - nodes_hitrates=AttrFloat32s.maybe(nodes_hitrates, name="nodes_hitrates"), - nodes_hitrates_as_tensor=AttrTensor.maybe( - nodes_hitrates_as_tensor, name="nodes_hitrates_as_tensor" - ), - nodes_missing_value_tracks_true=AttrInt64s.maybe( - nodes_missing_value_tracks_true, name="nodes_missing_value_tracks_true" - ), - nodes_modes=AttrStrings.maybe(nodes_modes, name="nodes_modes"), - nodes_nodeids=AttrInt64s.maybe(nodes_nodeids, name="nodes_nodeids"), - nodes_treeids=AttrInt64s.maybe(nodes_treeids, name="nodes_treeids"), - nodes_truenodeids=AttrInt64s.maybe( - nodes_truenodeids, name="nodes_truenodeids" - ), - nodes_values=AttrFloat32s.maybe(nodes_values, name="nodes_values"), - nodes_values_as_tensor=AttrTensor.maybe( - nodes_values_as_tensor, name="nodes_values_as_tensor" - ), - post_transform=AttrString(post_transform, name="post_transform"), - target_ids=AttrInt64s.maybe(target_ids, name="target_ids"), - target_nodeids=AttrInt64s.maybe(target_nodeids, name="target_nodeids"), - target_treeids=AttrInt64s.maybe(target_treeids, name="target_treeids"), - target_weights=AttrFloat32s.maybe(target_weights, name="target_weights"), - target_weights_as_tensor=AttrTensor.maybe( - target_weights_as_tensor, name="target_weights_as_tensor" - ), - ), - _TreeEnsembleRegressor.Inputs( - X=X, - ), - ).outputs.Y + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def zip_map( @@ -2007,19 +2159,27 @@ def zip_map( Type constraints: - T: `seq(map(int64,tensor(float)))`, `seq(map(string,tensor(float)))` """ - return _ZipMap( - _ZipMap.Attributes( - classlabels_int64s=AttrInt64s.maybe( - classlabels_int64s, name="classlabels_int64s" + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _ZipMap( + _ZipMap.Attributes( + classlabels_int64s=AttrInt64s.maybe( + classlabels_int64s, name="classlabels_int64s" + ), + classlabels_strings=AttrStrings.maybe( + classlabels_strings, name="classlabels_strings" + ), ), - classlabels_strings=AttrStrings.maybe( - classlabels_strings, name="classlabels_strings" + _ZipMap.Inputs( + X=unwrap_vars(X), ), - ), - _ZipMap.Inputs( - X=X, - ), - ).outputs.Z + ) + .get_output_vars(input_prop_values=input_prop_values) + .Z + ) + return output_vars # type: ignore _OPERATORS = { diff --git a/src/spox/opset/ai/onnx/ml/v4.py b/src/spox/opset/ai/onnx/ml/v4.py index 9e51382c..2369dfc5 100644 --- a/src/spox/opset/ai/onnx/ml/v4.py +++ b/src/spox/opset/ai/onnx/ml/v4.py @@ -22,7 +22,7 @@ from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._node import OpType from spox._standard import StandardNode -from spox._var import Var +from spox._var import Var, _VarInfo, create_prop_dict, unwrap_vars from spox.opset.ai.onnx.ml.v3 import ( _ArrayFeatureExtractor, _Binarizer, @@ -79,11 +79,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("LabelEncoder", "ai.onnx.ml", 4) @@ -191,25 +191,33 @@ def label_encoder( - T1: `tensor(double)`, `tensor(float)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(string)` - T2: `tensor(double)`, `tensor(float)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(string)` """ - return _LabelEncoder( - _LabelEncoder.Attributes( - default_float=AttrFloat32(default_float, name="default_float"), - default_int64=AttrInt64(default_int64, name="default_int64"), - default_string=AttrString(default_string, name="default_string"), - default_tensor=AttrTensor.maybe(default_tensor, name="default_tensor"), - keys_floats=AttrFloat32s.maybe(keys_floats, name="keys_floats"), - keys_int64s=AttrInt64s.maybe(keys_int64s, name="keys_int64s"), - keys_strings=AttrStrings.maybe(keys_strings, name="keys_strings"), - keys_tensor=AttrTensor.maybe(keys_tensor, name="keys_tensor"), - values_floats=AttrFloat32s.maybe(values_floats, name="values_floats"), - values_int64s=AttrInt64s.maybe(values_int64s, name="values_int64s"), - values_strings=AttrStrings.maybe(values_strings, name="values_strings"), - values_tensor=AttrTensor.maybe(values_tensor, name="values_tensor"), - ), - _LabelEncoder.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _LabelEncoder( + _LabelEncoder.Attributes( + default_float=AttrFloat32(default_float, name="default_float"), + default_int64=AttrInt64(default_int64, name="default_int64"), + default_string=AttrString(default_string, name="default_string"), + default_tensor=AttrTensor.maybe(default_tensor, name="default_tensor"), + keys_floats=AttrFloat32s.maybe(keys_floats, name="keys_floats"), + keys_int64s=AttrInt64s.maybe(keys_int64s, name="keys_int64s"), + keys_strings=AttrStrings.maybe(keys_strings, name="keys_strings"), + keys_tensor=AttrTensor.maybe(keys_tensor, name="keys_tensor"), + values_floats=AttrFloat32s.maybe(values_floats, name="values_floats"), + values_int64s=AttrInt64s.maybe(values_int64s, name="values_int64s"), + values_strings=AttrStrings.maybe(values_strings, name="values_strings"), + values_tensor=AttrTensor.maybe(values_tensor, name="values_tensor"), + ), + _LabelEncoder.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore _OPERATORS = { diff --git a/src/spox/opset/ai/onnx/ml/v5.py b/src/spox/opset/ai/onnx/ml/v5.py index 100bf179..1d575705 100644 --- a/src/spox/opset/ai/onnx/ml/v5.py +++ b/src/spox/opset/ai/onnx/ml/v5.py @@ -18,7 +18,7 @@ from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._node import OpType from spox._standard import StandardNode -from spox._var import Var +from spox._var import Var, _VarInfo, create_prop_dict, unwrap_vars from spox.opset.ai.onnx.ml.v4 import ( _ArrayFeatureExtractor, _Binarizer, @@ -77,11 +77,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("TreeEnsemble", "ai.onnx.ml", 5) @@ -224,35 +224,48 @@ def tree_ensemble( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _TreeEnsemble( - _TreeEnsemble.Attributes( - aggregate_function=AttrInt64(aggregate_function, name="aggregate_function"), - leaf_targetids=AttrInt64s(leaf_targetids, name="leaf_targetids"), - leaf_weights=AttrTensor(leaf_weights, name="leaf_weights"), - membership_values=AttrTensor.maybe( - membership_values, name="membership_values" + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _TreeEnsemble( + _TreeEnsemble.Attributes( + aggregate_function=AttrInt64( + aggregate_function, name="aggregate_function" + ), + leaf_targetids=AttrInt64s(leaf_targetids, name="leaf_targetids"), + leaf_weights=AttrTensor(leaf_weights, name="leaf_weights"), + membership_values=AttrTensor.maybe( + membership_values, name="membership_values" + ), + n_targets=AttrInt64.maybe(n_targets, name="n_targets"), + nodes_falseleafs=AttrInt64s(nodes_falseleafs, name="nodes_falseleafs"), + nodes_falsenodeids=AttrInt64s( + nodes_falsenodeids, name="nodes_falsenodeids" + ), + nodes_featureids=AttrInt64s(nodes_featureids, name="nodes_featureids"), + nodes_hitrates=AttrTensor.maybe(nodes_hitrates, name="nodes_hitrates"), + nodes_missing_value_tracks_true=AttrInt64s.maybe( + nodes_missing_value_tracks_true, + name="nodes_missing_value_tracks_true", + ), + nodes_modes=AttrTensor(nodes_modes, name="nodes_modes"), + nodes_splits=AttrTensor(nodes_splits, name="nodes_splits"), + nodes_trueleafs=AttrInt64s(nodes_trueleafs, name="nodes_trueleafs"), + nodes_truenodeids=AttrInt64s( + nodes_truenodeids, name="nodes_truenodeids" + ), + post_transform=AttrInt64(post_transform, name="post_transform"), + tree_roots=AttrInt64s(tree_roots, name="tree_roots"), ), - n_targets=AttrInt64.maybe(n_targets, name="n_targets"), - nodes_falseleafs=AttrInt64s(nodes_falseleafs, name="nodes_falseleafs"), - nodes_falsenodeids=AttrInt64s( - nodes_falsenodeids, name="nodes_falsenodeids" + _TreeEnsemble.Inputs( + X=unwrap_vars(X), ), - nodes_featureids=AttrInt64s(nodes_featureids, name="nodes_featureids"), - nodes_hitrates=AttrTensor.maybe(nodes_hitrates, name="nodes_hitrates"), - nodes_missing_value_tracks_true=AttrInt64s.maybe( - nodes_missing_value_tracks_true, name="nodes_missing_value_tracks_true" - ), - nodes_modes=AttrTensor(nodes_modes, name="nodes_modes"), - nodes_splits=AttrTensor(nodes_splits, name="nodes_splits"), - nodes_trueleafs=AttrInt64s(nodes_trueleafs, name="nodes_trueleafs"), - nodes_truenodeids=AttrInt64s(nodes_truenodeids, name="nodes_truenodeids"), - post_transform=AttrInt64(post_transform, name="post_transform"), - tree_roots=AttrInt64s(tree_roots, name="tree_roots"), - ), - _TreeEnsemble.Inputs( - X=X, - ), - ).outputs.Y + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore _OPERATORS = { diff --git a/src/spox/opset/ai/onnx/v17.py b/src/spox/opset/ai/onnx/v17.py index f9cbd0bf..e02a8fa8 100644 --- a/src/spox/opset/ai/onnx/v17.py +++ b/src/spox/opset/ai/onnx/v17.py @@ -31,8 +31,13 @@ from spox._standard import InferenceError, StandardNode from spox._type_system import Sequence as SpoxSequence from spox._type_system import Tensor, Type -from spox._value_prop import PropValueType -from spox._var import Var +from spox._value_prop import PropDict, PropValueType +from spox._var import ( + Var, + _VarInfo, + create_prop_dict, + unwrap_vars, +) class _Abs(StandardNode): @@ -42,11 +47,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Abs", "", 13) @@ -62,11 +67,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Acos", "", 7) @@ -82,11 +87,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Acosh", "", 9) @@ -102,12 +107,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Add", "", 14) @@ -123,12 +128,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("And", "", 7) @@ -146,11 +151,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ArgMax", "", 13) @@ -168,11 +173,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ArgMin", "", 13) @@ -188,11 +193,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Asin", "", 7) @@ -208,11 +213,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Asinh", "", 9) @@ -228,11 +233,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Atan", "", 7) @@ -248,11 +253,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Atanh", "", 9) @@ -273,11 +278,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("AveragePool", "", 11) @@ -295,17 +300,17 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - scale: Var - B: Var - input_mean: Var - input_var: Var + X: _VarInfo + scale: _VarInfo + B: _VarInfo + input_mean: _VarInfo + input_var: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var - running_mean: Optional[Var] - running_var: Optional[Var] + Y: _VarInfo + running_mean: Optional[_VarInfo] + running_var: Optional[_VarInfo] op_type = OpType("BatchNormalization", "", 15) @@ -322,11 +327,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Bernoulli", "", 15) @@ -342,12 +347,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - Y: Var + X: _VarInfo + Y: _VarInfo @dataclass class Outputs(BaseOutputs): - Z: Var + Z: _VarInfo op_type = OpType("BitShift", "", 11) @@ -364,11 +369,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - size: Var + size: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("BlackmanWindow", "", 17) @@ -384,11 +389,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Cast", "", 13) @@ -404,12 +409,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - target_type: Var + input: _VarInfo + target_type: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("CastLike", "", 15) @@ -425,11 +430,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Ceil", "", 13) @@ -445,11 +450,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Celu", "", 12) @@ -465,13 +470,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - min: Optional[Var] - max: Optional[Var] + input: _VarInfo + min: Optional[_VarInfo] + max: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Clip", "", 13) @@ -487,15 +492,15 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - condition: Var + input: _VarInfo + condition: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo - def infer_output_types(self) -> dict[str, Type]: - self.infer_output_types_onnx() + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: + self.infer_output_types_onnx(input_prop_values) inp, cond = ( self.inputs.input.unwrap_tensor(), self.inputs.condition.unwrap_tensor(), @@ -534,11 +539,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - inputs: Sequence[Var] + inputs: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - concat_result: Var + concat_result: _VarInfo op_type = OpType("Concat", "", 13) @@ -555,11 +560,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input_sequence: Var + input_sequence: _VarInfo @dataclass class Outputs(BaseOutputs): - concat_result: Var + concat_result: _VarInfo op_type = OpType("ConcatFromSequence", "", 11) @@ -583,9 +588,9 @@ class Attributes(BaseAttributes): @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo - def propagate_values(self) -> dict[str, PropValueType]: + def propagate_values(self, input_prop_values: PropDict) -> dict[str, PropValueType]: ((key, raw),) = ( (k, v.value) for k, v in self.attrs.get_fields().items() if v is not None ) @@ -625,11 +630,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("ConstantOfShape", "", 9) @@ -650,13 +655,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - W: Var - B: Optional[Var] + X: _VarInfo + W: _VarInfo + B: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Conv", "", 11) @@ -677,14 +682,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var - w: Var - x_zero_point: Optional[Var] - w_zero_point: Optional[Var] + x: _VarInfo + w: _VarInfo + x_zero_point: Optional[_VarInfo] + w_zero_point: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("ConvInteger", "", 10) @@ -707,13 +712,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - W: Var - B: Optional[Var] + X: _VarInfo + W: _VarInfo + B: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("ConvTranspose", "", 11) @@ -729,11 +734,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Cos", "", 7) @@ -749,11 +754,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Cosh", "", 9) @@ -770,12 +775,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var - axis: Var + x: _VarInfo + axis: _VarInfo @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("CumSum", "", 14) @@ -793,12 +798,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - dft_length: Optional[Var] + input: _VarInfo + dft_length: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("DFT", "", 17) @@ -815,11 +820,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("DepthToSpace", "", 13) @@ -835,13 +840,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var - x_scale: Var - x_zero_point: Optional[Var] + x: _VarInfo + x_scale: _VarInfo + x_zero_point: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("DequantizeLinear", "", 13) @@ -857,11 +862,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Det", "", 11) @@ -877,12 +882,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Div", "", 14) @@ -898,14 +903,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - ratio: Optional[Var] - training_mode: Optional[Var] + data: _VarInfo + ratio: Optional[_VarInfo] + training_mode: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var - mask: Optional[Var] + output: _VarInfo + mask: Optional[_VarInfo] op_type = OpType("Dropout", "", 13) @@ -921,13 +926,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var + x: _VarInfo @dataclass class Outputs(BaseOutputs): - y: Var - y_scale: Var - y_zero_point: Var + y: _VarInfo + y_scale: _VarInfo + y_zero_point: _VarInfo op_type = OpType("DynamicQuantizeLinear", "", 11) @@ -943,11 +948,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - Inputs: Sequence[Var] + Inputs: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - Output: Var + Output: _VarInfo op_type = OpType("Einsum", "", 12) @@ -963,11 +968,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Elu", "", 6) @@ -983,12 +988,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Equal", "", 13) @@ -1004,11 +1009,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Erf", "", 13) @@ -1024,11 +1029,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Exp", "", 13) @@ -1044,12 +1049,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - shape: Var + input: _VarInfo + shape: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Expand", "", 13) @@ -1066,11 +1071,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("EyeLike", "", 9) @@ -1086,11 +1091,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Flatten", "", 13) @@ -1106,11 +1111,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Floor", "", 13) @@ -1133,17 +1138,17 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - W: Var - R: Var - B: Optional[Var] - sequence_lens: Optional[Var] - initial_h: Optional[Var] + X: _VarInfo + W: _VarInfo + R: _VarInfo + B: Optional[_VarInfo] + sequence_lens: Optional[_VarInfo] + initial_h: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Optional[Var] - Y_h: Optional[Var] + Y: Optional[_VarInfo] + Y_h: Optional[_VarInfo] op_type = OpType("GRU", "", 14) @@ -1159,12 +1164,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - indices: Var + data: _VarInfo + indices: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Gather", "", 13) @@ -1180,12 +1185,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - indices: Var + data: _VarInfo + indices: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("GatherElements", "", 13) @@ -1201,12 +1206,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - indices: Var + data: _VarInfo + indices: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("GatherND", "", 13) @@ -1225,13 +1230,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var - C: Optional[Var] + A: _VarInfo + B: _VarInfo + C: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Gemm", "", 13) @@ -1247,11 +1252,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("GlobalAveragePool", "", 1) @@ -1267,11 +1272,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("GlobalLpPool", "", 2) @@ -1287,11 +1292,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("GlobalMaxPool", "", 1) @@ -1307,12 +1312,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Greater", "", 13) @@ -1328,12 +1333,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("GreaterOrEqual", "", 16) @@ -1351,12 +1356,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - grid: Var + X: _VarInfo + grid: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("GridSample", "", 16) @@ -1373,11 +1378,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - size: Var + size: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("HammingWindow", "", 17) @@ -1394,11 +1399,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - size: Var + size: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("HannWindow", "", 17) @@ -1415,11 +1420,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("HardSigmoid", "", 6) @@ -1435,11 +1440,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("HardSwish", "", 14) @@ -1455,11 +1460,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Hardmax", "", 13) @@ -1475,11 +1480,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Identity", "", 16) @@ -1496,11 +1501,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - cond: Var + cond: _VarInfo @dataclass class Outputs(BaseOutputs): - outputs: Sequence[Var] + outputs: Sequence[_VarInfo] op_type = OpType("If", "", 16) @@ -1516,13 +1521,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - scale: Var - B: Var + input: _VarInfo + scale: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("InstanceNormalization", "", 6) @@ -1539,11 +1544,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("IsInf", "", 10) @@ -1559,11 +1564,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("IsNaN", "", 13) @@ -1582,11 +1587,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("LRN", "", 13) @@ -1609,20 +1614,20 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - W: Var - R: Var - B: Optional[Var] - sequence_lens: Optional[Var] - initial_h: Optional[Var] - initial_c: Optional[Var] - P: Optional[Var] + X: _VarInfo + W: _VarInfo + R: _VarInfo + B: Optional[_VarInfo] + sequence_lens: Optional[_VarInfo] + initial_h: Optional[_VarInfo] + initial_c: Optional[_VarInfo] + P: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Optional[Var] - Y_h: Optional[Var] - Y_c: Optional[Var] + Y: Optional[_VarInfo] + Y_h: Optional[_VarInfo] + Y_c: Optional[_VarInfo] op_type = OpType("LSTM", "", 14) @@ -1640,15 +1645,15 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - Scale: Var - B: Optional[Var] + X: _VarInfo + Scale: _VarInfo + B: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var - Mean: Optional[Var] - InvStdDev: Optional[Var] + Y: _VarInfo + Mean: Optional[_VarInfo] + InvStdDev: Optional[_VarInfo] op_type = OpType("LayerNormalization", "", 17) @@ -1664,11 +1669,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("LeakyRelu", "", 16) @@ -1684,12 +1689,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Less", "", 13) @@ -1705,12 +1710,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("LessOrEqual", "", 16) @@ -1726,11 +1731,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Log", "", 13) @@ -1746,11 +1751,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("LogSoftmax", "", 13) @@ -1766,21 +1771,21 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - M: Optional[Var] - cond: Optional[Var] - v_initial: Sequence[Var] + M: Optional[_VarInfo] + cond: Optional[_VarInfo] + v_initial: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - v_final_and_scan_outputs: Sequence[Var] + v_final_and_scan_outputs: Sequence[_VarInfo] - def infer_output_types(self) -> dict[str, Type]: - output_types = super().infer_output_types() + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: + output_types = super().infer_output_types({}) body = self.attrs.body.value n = len(body.requested_arguments) - 2 - carried_names = list(self.outputs.get_vars())[:n] + carried_names = list(self.outputs.get_var_infos())[:n] carried_types = [v.type for v in list(body.requested_results.values())[1:][:n]] for name, typ in zip(carried_names, carried_types): @@ -1803,11 +1808,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("LpNormalization", "", 1) @@ -1827,11 +1832,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("LpPool", "", 11) @@ -1847,12 +1852,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("MatMul", "", 13) @@ -1868,14 +1873,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var - a_zero_point: Optional[Var] - b_zero_point: Optional[Var] + A: _VarInfo + B: _VarInfo + a_zero_point: Optional[_VarInfo] + b_zero_point: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("MatMulInteger", "", 10) @@ -1891,11 +1896,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data_0: Sequence[Var] + data_0: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - max: Var + max: _VarInfo op_type = OpType("Max", "", 13) @@ -1917,12 +1922,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var - Indices: Optional[Var] + Y: _VarInfo + Indices: Optional[_VarInfo] op_type = OpType("MaxPool", "", 12) @@ -1939,12 +1944,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - rois: Var + X: _VarInfo + rois: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("MaxRoiPool", "", 1) @@ -1962,13 +1967,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - I: Var - output_shape: Optional[Var] + X: _VarInfo + I: _VarInfo + output_shape: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("MaxUnpool", "", 11) @@ -1984,11 +1989,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data_0: Sequence[Var] + data_0: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - mean: Var + mean: _VarInfo op_type = OpType("Mean", "", 13) @@ -2004,11 +2009,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("MeanVarianceNormalization", "", 13) @@ -2024,15 +2029,15 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - num_mel_bins: Var - dft_length: Var - sample_rate: Var - lower_edge_hertz: Var - upper_edge_hertz: Var + num_mel_bins: _VarInfo + dft_length: _VarInfo + sample_rate: _VarInfo + lower_edge_hertz: _VarInfo + upper_edge_hertz: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("MelWeightMatrix", "", 17) @@ -2048,11 +2053,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data_0: Sequence[Var] + data_0: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - min: Var + min: _VarInfo op_type = OpType("Min", "", 13) @@ -2068,12 +2073,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Mod", "", 13) @@ -2089,12 +2094,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Mul", "", 14) @@ -2112,11 +2117,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Multinomial", "", 7) @@ -2132,11 +2137,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Neg", "", 13) @@ -2153,13 +2158,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - target: Var - weight: Optional[Var] + input: _VarInfo + target: _VarInfo + weight: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - loss: Var + loss: _VarInfo op_type = OpType("NegativeLogLikelihoodLoss", "", 13) @@ -2175,15 +2180,15 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - boxes: Var - scores: Var - max_output_boxes_per_class: Optional[Var] - iou_threshold: Optional[Var] - score_threshold: Optional[Var] + boxes: _VarInfo + scores: _VarInfo + max_output_boxes_per_class: Optional[_VarInfo] + iou_threshold: Optional[_VarInfo] + score_threshold: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - selected_indices: Var + selected_indices: _VarInfo op_type = OpType("NonMaxSuppression", "", 11) @@ -2199,11 +2204,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("NonZero", "", 13) @@ -2219,11 +2224,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Not", "", 1) @@ -2239,13 +2244,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - indices: Var - depth: Var - values: Var + indices: _VarInfo + depth: _VarInfo + values: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("OneHot", "", 11) @@ -2261,11 +2266,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Optional[Var] + input: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Optional", "", 15) @@ -2281,11 +2286,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("OptionalGetElement", "", 15) @@ -2301,11 +2306,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("OptionalHasElement", "", 15) @@ -2321,12 +2326,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Or", "", 7) @@ -2342,12 +2347,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - slope: Var + X: _VarInfo + slope: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("PRelu", "", 16) @@ -2363,13 +2368,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - pads: Var - constant_value: Optional[Var] + data: _VarInfo + pads: _VarInfo + constant_value: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Pad", "", 13) @@ -2385,12 +2390,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - Y: Var + X: _VarInfo + Y: _VarInfo @dataclass class Outputs(BaseOutputs): - Z: Var + Z: _VarInfo op_type = OpType("Pow", "", 15) @@ -2411,19 +2416,19 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var - x_scale: Var - x_zero_point: Var - w: Var - w_scale: Var - w_zero_point: Var - y_scale: Var - y_zero_point: Var - B: Optional[Var] + x: _VarInfo + x_scale: _VarInfo + x_zero_point: _VarInfo + w: _VarInfo + w_scale: _VarInfo + w_zero_point: _VarInfo + y_scale: _VarInfo + y_zero_point: _VarInfo + B: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("QLinearConv", "", 10) @@ -2439,18 +2444,18 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - a: Var - a_scale: Var - a_zero_point: Var - b: Var - b_scale: Var - b_zero_point: Var - y_scale: Var - y_zero_point: Var + a: _VarInfo + a_scale: _VarInfo + a_zero_point: _VarInfo + b: _VarInfo + b_scale: _VarInfo + b_zero_point: _VarInfo + y_scale: _VarInfo + y_zero_point: _VarInfo @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("QLinearMatMul", "", 10) @@ -2466,13 +2471,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var - y_scale: Var - y_zero_point: Optional[Var] + x: _VarInfo + y_scale: _VarInfo + y_zero_point: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("QuantizeLinear", "", 13) @@ -2494,17 +2499,17 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - W: Var - R: Var - B: Optional[Var] - sequence_lens: Optional[Var] - initial_h: Optional[Var] + X: _VarInfo + W: _VarInfo + R: _VarInfo + B: Optional[_VarInfo] + sequence_lens: Optional[_VarInfo] + initial_h: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Optional[Var] - Y_h: Optional[Var] + Y: Optional[_VarInfo] + Y_h: Optional[_VarInfo] op_type = OpType("RNN", "", 14) @@ -2526,7 +2531,7 @@ class Attributes(BaseAttributes): @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("RandomNormal", "", 1) @@ -2545,11 +2550,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("RandomNormalLike", "", 1) @@ -2571,7 +2576,7 @@ class Attributes(BaseAttributes): @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("RandomUniform", "", 1) @@ -2590,11 +2595,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("RandomUniformLike", "", 1) @@ -2610,13 +2615,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - start: Var - limit: Var - delta: Var + start: _VarInfo + limit: _VarInfo + delta: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Range", "", 11) @@ -2632,11 +2637,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Reciprocal", "", 13) @@ -2653,11 +2658,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceL1", "", 13) @@ -2674,11 +2679,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceL2", "", 13) @@ -2695,11 +2700,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceLogSum", "", 13) @@ -2716,11 +2721,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceLogSumExp", "", 13) @@ -2737,11 +2742,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceMax", "", 13) @@ -2758,11 +2763,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceMean", "", 13) @@ -2779,11 +2784,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceMin", "", 13) @@ -2800,11 +2805,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceProd", "", 13) @@ -2821,12 +2826,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceSum", "", 13) @@ -2843,11 +2848,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceSumSquare", "", 13) @@ -2863,11 +2868,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Relu", "", 14) @@ -2883,12 +2888,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - shape: Var + data: _VarInfo + shape: _VarInfo @dataclass class Outputs(BaseOutputs): - reshaped: Var + reshaped: _VarInfo op_type = OpType("Reshape", "", 14) @@ -2909,14 +2914,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - roi: Optional[Var] - scales: Optional[Var] - sizes: Optional[Var] + X: _VarInfo + roi: Optional[_VarInfo] + scales: Optional[_VarInfo] + sizes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Resize", "", 13) @@ -2933,12 +2938,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - sequence_lens: Var + input: _VarInfo + sequence_lens: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("ReverseSequence", "", 10) @@ -2959,13 +2964,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - rois: Var - batch_indices: Var + X: _VarInfo + rois: _VarInfo + batch_indices: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("RoiAlign", "", 16) @@ -2981,11 +2986,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Round", "", 11) @@ -3001,14 +3006,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - signal: Var - frame_step: Var - window: Optional[Var] - frame_length: Optional[Var] + signal: _VarInfo + frame_step: _VarInfo + window: Optional[_VarInfo] + frame_length: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("STFT", "", 17) @@ -3029,11 +3034,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - initial_state_and_scan_inputs: Sequence[Var] + initial_state_and_scan_inputs: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - final_state_and_scan_outputs: Sequence[Var] + final_state_and_scan_outputs: Sequence[_VarInfo] op_type = OpType("Scan", "", 16) @@ -3050,13 +3055,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - indices: Var - updates: Var + data: _VarInfo + indices: _VarInfo + updates: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("ScatterElements", "", 16) @@ -3072,13 +3077,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - indices: Var - updates: Var + data: _VarInfo + indices: _VarInfo + updates: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("ScatterND", "", 16) @@ -3095,11 +3100,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Selu", "", 6) @@ -3115,12 +3120,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input_sequence: Var - position: Var + input_sequence: _VarInfo + position: _VarInfo @dataclass class Outputs(BaseOutputs): - tensor: Var + tensor: _VarInfo op_type = OpType("SequenceAt", "", 11) @@ -3136,11 +3141,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - inputs: Sequence[Var] + inputs: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - output_sequence: Var + output_sequence: _VarInfo op_type = OpType("SequenceConstruct", "", 11) @@ -3158,7 +3163,7 @@ class Attributes(BaseAttributes): @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("SequenceEmpty", "", 11) @@ -3174,12 +3179,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input_sequence: Var - position: Optional[Var] + input_sequence: _VarInfo + position: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output_sequence: Var + output_sequence: _VarInfo op_type = OpType("SequenceErase", "", 11) @@ -3195,13 +3200,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input_sequence: Var - tensor: Var - position: Optional[Var] + input_sequence: _VarInfo + tensor: _VarInfo + position: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output_sequence: Var + output_sequence: _VarInfo op_type = OpType("SequenceInsert", "", 11) @@ -3217,11 +3222,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input_sequence: Var + input_sequence: _VarInfo @dataclass class Outputs(BaseOutputs): - length: Var + length: _VarInfo op_type = OpType("SequenceLength", "", 11) @@ -3237,12 +3242,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input_sequence: Var - additional_inputs: Sequence[Var] + input_sequence: _VarInfo + additional_inputs: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - out_sequence: Sequence[Var] + out_sequence: Sequence[_VarInfo] op_type = OpType("SequenceMap", "", 17) @@ -3259,11 +3264,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - shape: Var + shape: _VarInfo op_type = OpType("Shape", "", 15) @@ -3280,11 +3285,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Shrink", "", 9) @@ -3300,11 +3305,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Sigmoid", "", 13) @@ -3320,11 +3325,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Sign", "", 13) @@ -3340,11 +3345,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Sin", "", 7) @@ -3360,11 +3365,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Sinh", "", 9) @@ -3380,11 +3385,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - size: Var + size: _VarInfo op_type = OpType("Size", "", 13) @@ -3400,15 +3405,15 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - starts: Var - ends: Var - axes: Optional[Var] - steps: Optional[Var] + data: _VarInfo + starts: _VarInfo + ends: _VarInfo + axes: Optional[_VarInfo] + steps: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Slice", "", 13) @@ -3424,11 +3429,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Softmax", "", 13) @@ -3445,14 +3450,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - scores: Var - labels: Var - weights: Optional[Var] + scores: _VarInfo + labels: _VarInfo + weights: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var - log_prob: Optional[Var] + output: _VarInfo + log_prob: Optional[_VarInfo] op_type = OpType("SoftmaxCrossEntropyLoss", "", 13) @@ -3468,11 +3473,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Softplus", "", 1) @@ -3488,11 +3493,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Softsign", "", 1) @@ -3508,11 +3513,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("SpaceToDepth", "", 13) @@ -3528,12 +3533,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - split: Optional[Var] + input: _VarInfo + split: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - outputs: Sequence[Var] + outputs: Sequence[_VarInfo] op_type = OpType("Split", "", 13) @@ -3550,12 +3555,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - split: Optional[Var] + input: _VarInfo + split: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output_sequence: Var + output_sequence: _VarInfo op_type = OpType("SplitToSequence", "", 11) @@ -3571,11 +3576,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Sqrt", "", 13) @@ -3591,12 +3596,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - squeezed: Var + squeezed: _VarInfo op_type = OpType("Squeeze", "", 13) @@ -3615,11 +3620,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("StringNormalizer", "", 10) @@ -3635,12 +3640,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Sub", "", 14) @@ -3656,11 +3661,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data_0: Sequence[Var] + data_0: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - sum: Var + sum: _VarInfo op_type = OpType("Sum", "", 13) @@ -3676,11 +3681,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Tan", "", 7) @@ -3696,11 +3701,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Tanh", "", 13) @@ -3724,11 +3729,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("TfIdfVectorizer", "", 9) @@ -3744,11 +3749,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("ThresholdedRelu", "", 10) @@ -3764,12 +3769,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - repeats: Var + input: _VarInfo + repeats: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Tile", "", 13) @@ -3787,13 +3792,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - K: Var + X: _VarInfo + K: _VarInfo @dataclass class Outputs(BaseOutputs): - Values: Var - Indices: Var + Values: _VarInfo + Indices: _VarInfo op_type = OpType("TopK", "", 11) @@ -3809,11 +3814,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - transposed: Var + transposed: _VarInfo op_type = OpType("Transpose", "", 13) @@ -3829,12 +3834,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - k: Optional[Var] + input: _VarInfo + k: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Trilu", "", 14) @@ -3851,14 +3856,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var - indices: Optional[Var] - inverse_indices: Optional[Var] - counts: Optional[Var] + Y: _VarInfo + indices: Optional[_VarInfo] + inverse_indices: Optional[_VarInfo] + counts: Optional[_VarInfo] op_type = OpType("Unique", "", 11) @@ -3874,12 +3879,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Var + data: _VarInfo + axes: _VarInfo @dataclass class Outputs(BaseOutputs): - expanded: Var + expanded: _VarInfo op_type = OpType("Unsqueeze", "", 13) @@ -3895,13 +3900,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - condition: Var - X: Var - Y: Var + condition: _VarInfo + X: _VarInfo + Y: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Where", "", 16) @@ -3917,12 +3922,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Xor", "", 7) @@ -3958,12 +3963,20 @@ def abs( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Abs( - _Abs.Attributes(), - _Abs.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Abs( + _Abs.Attributes(), + _Abs.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def acos( @@ -3992,12 +4005,20 @@ def acos( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Acos( - _Acos.Attributes(), - _Acos.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Acos( + _Acos.Attributes(), + _Acos.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def acosh( @@ -4027,12 +4048,20 @@ def acosh( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Acosh( - _Acosh.Attributes(), - _Acosh.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Acosh( + _Acosh.Attributes(), + _Acosh.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def add( @@ -4072,13 +4101,22 @@ def add( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Add( - _Add.Attributes(), - _Add.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + output_vars = ( + _Add( + _Add.Attributes(), + _Add.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) + return output_vars # type: ignore def and_( @@ -4117,13 +4155,22 @@ def and_( - T: `tensor(bool)` - T1: `tensor(bool)` """ - return _And( - _And.Attributes(), - _And.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + output_vars = ( + _And( + _And.Attributes(), + _And.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) + return output_vars # type: ignore def arg_max( @@ -4174,16 +4221,26 @@ def arg_max( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ArgMax( - _ArgMax.Attributes( - axis=AttrInt64(axis, name="axis"), - keepdims=AttrInt64(keepdims, name="keepdims"), - select_last_index=AttrInt64(select_last_index, name="select_last_index"), - ), - _ArgMax.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _ArgMax( + _ArgMax.Attributes( + axis=AttrInt64(axis, name="axis"), + keepdims=AttrInt64(keepdims, name="keepdims"), + select_last_index=AttrInt64( + select_last_index, name="select_last_index" + ), + ), + _ArgMax.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def arg_min( @@ -4234,16 +4291,26 @@ def arg_min( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ArgMin( - _ArgMin.Attributes( - axis=AttrInt64(axis, name="axis"), - keepdims=AttrInt64(keepdims, name="keepdims"), - select_last_index=AttrInt64(select_last_index, name="select_last_index"), - ), - _ArgMin.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _ArgMin( + _ArgMin.Attributes( + axis=AttrInt64(axis, name="axis"), + keepdims=AttrInt64(keepdims, name="keepdims"), + select_last_index=AttrInt64( + select_last_index, name="select_last_index" + ), + ), + _ArgMin.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def asin( @@ -4272,12 +4339,20 @@ def asin( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Asin( - _Asin.Attributes(), - _Asin.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Asin( + _Asin.Attributes(), + _Asin.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def asinh( @@ -4306,12 +4381,20 @@ def asinh( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Asinh( - _Asinh.Attributes(), - _Asinh.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Asinh( + _Asinh.Attributes(), + _Asinh.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def atan( @@ -4340,12 +4423,20 @@ def atan( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Atan( - _Atan.Attributes(), - _Atan.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Atan( + _Atan.Attributes(), + _Atan.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def atanh( @@ -4375,12 +4466,20 @@ def atanh( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Atanh( - _Atanh.Attributes(), - _Atanh.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Atanh( + _Atanh.Attributes(), + _Atanh.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def average_pool( @@ -4504,19 +4603,29 @@ def average_pool( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _AveragePool( - _AveragePool.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), - count_include_pad=AttrInt64(count_include_pad, name="count_include_pad"), - kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _AveragePool.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _AveragePool( + _AveragePool.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), + count_include_pad=AttrInt64( + count_include_pad, name="count_include_pad" + ), + kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _AveragePool.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def batch_normalization( @@ -4639,20 +4748,32 @@ def batch_normalization( - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _BatchNormalization( - _BatchNormalization.Attributes( - epsilon=AttrFloat32(epsilon, name="epsilon"), - momentum=AttrFloat32(momentum, name="momentum"), - training_mode=AttrInt64(training_mode, name="training_mode"), - ), - _BatchNormalization.Inputs( - X=X, - scale=scale, - B=B, - input_mean=input_mean, - input_var=input_var, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + X=X, + scale=scale, + B=B, + input_mean=input_mean, + input_var=input_var, + ) + output_vars = ( + _BatchNormalization( + _BatchNormalization.Attributes( + epsilon=AttrFloat32(epsilon, name="epsilon"), + momentum=AttrFloat32(momentum, name="momentum"), + training_mode=AttrInt64(training_mode, name="training_mode"), + ), + _BatchNormalization.Inputs( + X=unwrap_vars(X), + scale=unwrap_vars(scale), + B=unwrap_vars(B), + input_mean=unwrap_vars(input_mean), + input_var=unwrap_vars(input_var), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) + return output_vars # type: ignore def bernoulli( @@ -4700,15 +4821,23 @@ def bernoulli( - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Bernoulli( - _Bernoulli.Attributes( - dtype=AttrDtype.maybe(dtype, name="dtype"), - seed=AttrFloat32.maybe(seed, name="seed"), - ), - _Bernoulli.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Bernoulli( + _Bernoulli.Attributes( + dtype=AttrDtype.maybe(dtype, name="dtype"), + seed=AttrFloat32.maybe(seed, name="seed"), + ), + _Bernoulli.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def bit_shift( @@ -4761,15 +4890,24 @@ def bit_shift( Type constraints: - T: `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _BitShift( - _BitShift.Attributes( - direction=AttrString(direction, name="direction"), - ), - _BitShift.Inputs( - X=X, - Y=Y, - ), - ).outputs.Z + input_prop_values = create_prop_dict( + X=X, + Y=Y, + ) + output_vars = ( + _BitShift( + _BitShift.Attributes( + direction=AttrString(direction, name="direction"), + ), + _BitShift.Inputs( + X=unwrap_vars(X), + Y=unwrap_vars(Y), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Z + ) + return output_vars # type: ignore def blackman_window( @@ -4813,15 +4951,23 @@ def blackman_window( - T1: `tensor(int32)`, `tensor(int64)` - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _BlackmanWindow( - _BlackmanWindow.Attributes( - output_datatype=AttrInt64(output_datatype, name="output_datatype"), - periodic=AttrInt64(periodic, name="periodic"), - ), - _BlackmanWindow.Inputs( - size=size, - ), - ).outputs.output + input_prop_values = create_prop_dict( + size=size, + ) + output_vars = ( + _BlackmanWindow( + _BlackmanWindow.Attributes( + output_datatype=AttrInt64(output_datatype, name="output_datatype"), + periodic=AttrInt64(periodic, name="periodic"), + ), + _BlackmanWindow.Inputs( + size=unwrap_vars(size), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def cast( @@ -4906,14 +5052,22 @@ def cast( - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Cast( - _Cast.Attributes( - to=AttrDtype(to, name="to"), - ), - _Cast.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Cast( + _Cast.Attributes( + to=AttrDtype(to, name="to"), + ), + _Cast.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def cast_like( @@ -4950,13 +5104,22 @@ def cast_like( - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _CastLike( - _CastLike.Attributes(), - _CastLike.Inputs( - input=input, - target_type=target_type, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + target_type=target_type, + ) + output_vars = ( + _CastLike( + _CastLike.Attributes(), + _CastLike.Inputs( + input=unwrap_vars(input), + target_type=unwrap_vars(target_type), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def ceil( @@ -4987,12 +5150,20 @@ def ceil( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Ceil( - _Ceil.Attributes(), - _Ceil.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Ceil( + _Ceil.Attributes(), + _Ceil.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def celu( @@ -5031,14 +5202,22 @@ def celu( Type constraints: - T: `tensor(float)` """ - return _Celu( - _Celu.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - ), - _Celu.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Celu( + _Celu.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + ), + _Celu.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def clip( @@ -5078,14 +5257,24 @@ def clip( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Clip( - _Clip.Attributes(), - _Clip.Inputs( - input=input, - min=min, - max=max, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + min=min, + max=max, + ) + output_vars = ( + _Clip( + _Clip.Attributes(), + _Clip.Inputs( + input=unwrap_vars(input), + min=unwrap_vars(min), + max=unwrap_vars(max), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def compress( @@ -5134,15 +5323,24 @@ def compress( - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(bool)` """ - return _Compress( - _Compress.Attributes( - axis=AttrInt64.maybe(axis, name="axis"), - ), - _Compress.Inputs( - input=input, - condition=condition, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + condition=condition, + ) + output_vars = ( + _Compress( + _Compress.Attributes( + axis=AttrInt64.maybe(axis, name="axis"), + ), + _Compress.Inputs( + input=unwrap_vars(input), + condition=unwrap_vars(condition), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def concat( @@ -5178,14 +5376,22 @@ def concat( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Concat( - _Concat.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Concat.Inputs( - inputs=inputs, - ), - ).outputs.concat_result + input_prop_values = create_prop_dict( + inputs=inputs, + ) + output_vars = ( + _Concat( + _Concat.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _Concat.Inputs( + inputs=unwrap_vars(inputs), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .concat_result + ) + return output_vars # type: ignore def concat_from_sequence( @@ -5230,15 +5436,23 @@ def concat_from_sequence( - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ConcatFromSequence( - _ConcatFromSequence.Attributes( - axis=AttrInt64(axis, name="axis"), - new_axis=AttrInt64(new_axis, name="new_axis"), - ), - _ConcatFromSequence.Inputs( - input_sequence=input_sequence, - ), - ).outputs.concat_result + input_prop_values = create_prop_dict( + input_sequence=input_sequence, + ) + output_vars = ( + _ConcatFromSequence( + _ConcatFromSequence.Attributes( + axis=AttrInt64(axis, name="axis"), + new_axis=AttrInt64(new_axis, name="new_axis"), + ), + _ConcatFromSequence.Inputs( + input_sequence=unwrap_vars(input_sequence), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .concat_result + ) + return output_vars # type: ignore def constant( @@ -5296,18 +5510,24 @@ def constant( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Constant( - _Constant.Attributes( - value=AttrTensor.maybe(value, name="value"), - value_float=AttrFloat32.maybe(value_float, name="value_float"), - value_floats=AttrFloat32s.maybe(value_floats, name="value_floats"), - value_int=AttrInt64.maybe(value_int, name="value_int"), - value_ints=AttrInt64s.maybe(value_ints, name="value_ints"), - value_string=AttrString.maybe(value_string, name="value_string"), - value_strings=AttrStrings.maybe(value_strings, name="value_strings"), - ), - _Constant.Inputs(), - ).outputs.output + input_prop_values = create_prop_dict() + output_vars = ( + _Constant( + _Constant.Attributes( + value=AttrTensor.maybe(value, name="value"), + value_float=AttrFloat32.maybe(value_float, name="value_float"), + value_floats=AttrFloat32s.maybe(value_floats, name="value_floats"), + value_int=AttrInt64.maybe(value_int, name="value_int"), + value_ints=AttrInt64s.maybe(value_ints, name="value_ints"), + value_string=AttrString.maybe(value_string, name="value_string"), + value_strings=AttrStrings.maybe(value_strings, name="value_strings"), + ), + _Constant.Inputs(), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def constant_of_shape( @@ -5347,14 +5567,22 @@ def constant_of_shape( - T1: `tensor(int64)` - T2: `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ConstantOfShape( - _ConstantOfShape.Attributes( - value=AttrTensor.maybe(value, name="value"), - ), - _ConstantOfShape.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _ConstantOfShape( + _ConstantOfShape.Attributes( + value=AttrTensor.maybe(value, name="value"), + ), + _ConstantOfShape.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def conv( @@ -5454,21 +5682,31 @@ def conv( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Conv( - _Conv.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - group=AttrInt64(group, name="group"), - kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _Conv.Inputs( - X=X, - W=W, - B=B, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + W=W, + B=B, + ) + output_vars = ( + _Conv( + _Conv.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + group=AttrInt64(group, name="group"), + kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _Conv.Inputs( + X=unwrap_vars(X), + W=unwrap_vars(W), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def conv_integer( @@ -5579,22 +5817,33 @@ def conv_integer( - T2: `tensor(int8)`, `tensor(uint8)` - T3: `tensor(int32)` """ - return _ConvInteger( - _ConvInteger.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - group=AttrInt64(group, name="group"), - kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _ConvInteger.Inputs( - x=x, - w=w, - x_zero_point=x_zero_point, - w_zero_point=w_zero_point, - ), - ).outputs.y + input_prop_values = create_prop_dict( + x=x, + w=w, + x_zero_point=x_zero_point, + w_zero_point=w_zero_point, + ) + output_vars = ( + _ConvInteger( + _ConvInteger.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + group=AttrInt64(group, name="group"), + kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _ConvInteger.Inputs( + x=unwrap_vars(x), + w=unwrap_vars(w), + x_zero_point=unwrap_vars(x_zero_point), + w_zero_point=unwrap_vars(w_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) + return output_vars # type: ignore def conv_transpose( @@ -5725,23 +5974,33 @@ def conv_transpose( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _ConvTranspose( - _ConvTranspose.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - group=AttrInt64(group, name="group"), - kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), - output_padding=AttrInt64s.maybe(output_padding, name="output_padding"), - output_shape=AttrInt64s.maybe(output_shape, name="output_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _ConvTranspose.Inputs( - X=X, - W=W, - B=B, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + W=W, + B=B, + ) + output_vars = ( + _ConvTranspose( + _ConvTranspose.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + group=AttrInt64(group, name="group"), + kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), + output_padding=AttrInt64s.maybe(output_padding, name="output_padding"), + output_shape=AttrInt64s.maybe(output_shape, name="output_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _ConvTranspose.Inputs( + X=unwrap_vars(X), + W=unwrap_vars(W), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def cos( @@ -5769,12 +6028,20 @@ def cos( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Cos( - _Cos.Attributes(), - _Cos.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Cos( + _Cos.Attributes(), + _Cos.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def cosh( @@ -5802,12 +6069,20 @@ def cosh( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Cosh( - _Cosh.Attributes(), - _Cosh.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Cosh( + _Cosh.Attributes(), + _Cosh.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def cumsum( @@ -5875,16 +6150,25 @@ def cumsum( - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` - T2: `tensor(int32)`, `tensor(int64)` """ - return _CumSum( - _CumSum.Attributes( - exclusive=AttrInt64(exclusive, name="exclusive"), - reverse=AttrInt64(reverse, name="reverse"), - ), - _CumSum.Inputs( - x=x, - axis=axis, - ), - ).outputs.y + input_prop_values = create_prop_dict( + x=x, + axis=axis, + ) + output_vars = ( + _CumSum( + _CumSum.Attributes( + exclusive=AttrInt64(exclusive, name="exclusive"), + reverse=AttrInt64(reverse, name="reverse"), + ), + _CumSum.Inputs( + x=unwrap_vars(x), + axis=unwrap_vars(axis), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) + return output_vars # type: ignore def dft( @@ -5960,17 +6244,26 @@ def dft( - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(int32)`, `tensor(int64)` """ - return _DFT( - _DFT.Attributes( - axis=AttrInt64(axis, name="axis"), - inverse=AttrInt64(inverse, name="inverse"), - onesided=AttrInt64(onesided, name="onesided"), - ), - _DFT.Inputs( - input=input, - dft_length=dft_length, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + dft_length=dft_length, + ) + output_vars = ( + _DFT( + _DFT.Attributes( + axis=AttrInt64(axis, name="axis"), + inverse=AttrInt64(inverse, name="inverse"), + onesided=AttrInt64(onesided, name="onesided"), + ), + _DFT.Inputs( + input=unwrap_vars(input), + dft_length=unwrap_vars(dft_length), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def depth_to_space( @@ -6035,15 +6328,23 @@ def depth_to_space( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _DepthToSpace( - _DepthToSpace.Attributes( - blocksize=AttrInt64(blocksize, name="blocksize"), - mode=AttrString(mode, name="mode"), - ), - _DepthToSpace.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _DepthToSpace( + _DepthToSpace.Attributes( + blocksize=AttrInt64(blocksize, name="blocksize"), + mode=AttrString(mode, name="mode"), + ), + _DepthToSpace.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def dequantize_linear( @@ -6096,16 +6397,26 @@ def dequantize_linear( Type constraints: - T: `tensor(int32)`, `tensor(int8)`, `tensor(uint8)` """ - return _DequantizeLinear( - _DequantizeLinear.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _DequantizeLinear.Inputs( - x=x, - x_scale=x_scale, - x_zero_point=x_zero_point, - ), - ).outputs.y + input_prop_values = create_prop_dict( + x=x, + x_scale=x_scale, + x_zero_point=x_zero_point, + ) + output_vars = ( + _DequantizeLinear( + _DequantizeLinear.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _DequantizeLinear.Inputs( + x=unwrap_vars(x), + x_scale=unwrap_vars(x_scale), + x_zero_point=unwrap_vars(x_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) + return output_vars # type: ignore def det( @@ -6138,12 +6449,20 @@ def det( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Det( - _Det.Attributes(), - _Det.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Det( + _Det.Attributes(), + _Det.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def div( @@ -6183,13 +6502,22 @@ def div( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Div( - _Div.Attributes(), - _Div.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + output_vars = ( + _Div( + _Div.Attributes(), + _Div.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) + return output_vars # type: ignore def dropout( @@ -6268,16 +6596,26 @@ def dropout( - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(bool)` """ - return _Dropout( - _Dropout.Attributes( - seed=AttrInt64.maybe(seed, name="seed"), - ), - _Dropout.Inputs( - data=data, - ratio=ratio, - training_mode=training_mode, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + data=data, + ratio=ratio, + training_mode=training_mode, + ) + output_vars = ( + _Dropout( + _Dropout.Attributes( + seed=AttrInt64.maybe(seed, name="seed"), + ), + _Dropout.Inputs( + data=unwrap_vars(data), + ratio=unwrap_vars(ratio), + training_mode=unwrap_vars(training_mode), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) + return output_vars # type: ignore def dynamic_quantize_linear( @@ -6347,12 +6685,20 @@ def dynamic_quantize_linear( - T1: `tensor(float)` - T2: `tensor(uint8)` """ - return _DynamicQuantizeLinear( - _DynamicQuantizeLinear.Attributes(), - _DynamicQuantizeLinear.Inputs( - x=x, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + x=x, + ) + output_vars = ( + _DynamicQuantizeLinear( + _DynamicQuantizeLinear.Attributes(), + _DynamicQuantizeLinear.Inputs( + x=unwrap_vars(x), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) + return output_vars # type: ignore def einsum( @@ -6417,14 +6763,22 @@ def einsum( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Einsum( - _Einsum.Attributes( - equation=AttrString(equation, name="equation"), - ), - _Einsum.Inputs( - Inputs=Inputs, - ), - ).outputs.Output + input_prop_values = create_prop_dict( + Inputs=Inputs, + ) + output_vars = ( + _Einsum( + _Einsum.Attributes( + equation=AttrString(equation, name="equation"), + ), + _Einsum.Inputs( + Inputs=unwrap_vars(Inputs), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Output + ) + return output_vars # type: ignore def elu( @@ -6460,14 +6814,22 @@ def elu( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Elu( - _Elu.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - ), - _Elu.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Elu( + _Elu.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + ), + _Elu.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def equal( @@ -6506,13 +6868,22 @@ def equal( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(bool)` """ - return _Equal( - _Equal.Attributes(), - _Equal.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + output_vars = ( + _Equal( + _Equal.Attributes(), + _Equal.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) + return output_vars # type: ignore def erf( @@ -6541,12 +6912,20 @@ def erf( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Erf( - _Erf.Attributes(), - _Erf.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Erf( + _Erf.Attributes(), + _Erf.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def exp( @@ -6574,12 +6953,20 @@ def exp( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Exp( - _Exp.Attributes(), - _Exp.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Exp( + _Exp.Attributes(), + _Exp.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def expand( @@ -6620,13 +7007,22 @@ def expand( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Expand( - _Expand.Attributes(), - _Expand.Inputs( - input=input, - shape=shape, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + shape=shape, + ) + output_vars = ( + _Expand( + _Expand.Attributes(), + _Expand.Inputs( + input=unwrap_vars(input), + shape=unwrap_vars(shape), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def eye_like( @@ -6677,15 +7073,23 @@ def eye_like( - T1: `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _EyeLike( - _EyeLike.Attributes( - dtype=AttrDtype.maybe(dtype, name="dtype"), - k=AttrInt64(k, name="k"), - ), - _EyeLike.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _EyeLike( + _EyeLike.Attributes( + dtype=AttrDtype.maybe(dtype, name="dtype"), + k=AttrInt64(k, name="k"), + ), + _EyeLike.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def flatten( @@ -6727,14 +7131,22 @@ def flatten( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Flatten( - _Flatten.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Flatten.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Flatten( + _Flatten.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _Flatten.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def floor( @@ -6765,12 +7177,20 @@ def floor( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Floor( - _Floor.Attributes(), - _Floor.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Floor( + _Floor.Attributes(), + _Floor.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def gru( @@ -6945,30 +7365,45 @@ def gru( - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - T1: `tensor(int32)` """ - return _GRU( - _GRU.Attributes( - activation_alpha=AttrFloat32s.maybe( - activation_alpha, name="activation_alpha" - ), - activation_beta=AttrFloat32s.maybe(activation_beta, name="activation_beta"), - activations=AttrStrings.maybe(activations, name="activations"), - clip=AttrFloat32.maybe(clip, name="clip"), - direction=AttrString(direction, name="direction"), - hidden_size=AttrInt64.maybe(hidden_size, name="hidden_size"), - layout=AttrInt64(layout, name="layout"), - linear_before_reset=AttrInt64( - linear_before_reset, name="linear_before_reset" - ), - ), - _GRU.Inputs( - X=X, - W=W, - R=R, - B=B, - sequence_lens=sequence_lens, - initial_h=initial_h, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + X=X, + W=W, + R=R, + B=B, + sequence_lens=sequence_lens, + initial_h=initial_h, + ) + output_vars = ( + _GRU( + _GRU.Attributes( + activation_alpha=AttrFloat32s.maybe( + activation_alpha, name="activation_alpha" + ), + activation_beta=AttrFloat32s.maybe( + activation_beta, name="activation_beta" + ), + activations=AttrStrings.maybe(activations, name="activations"), + clip=AttrFloat32.maybe(clip, name="clip"), + direction=AttrString(direction, name="direction"), + hidden_size=AttrInt64.maybe(hidden_size, name="hidden_size"), + layout=AttrInt64(layout, name="layout"), + linear_before_reset=AttrInt64( + linear_before_reset, name="linear_before_reset" + ), + ), + _GRU.Inputs( + X=unwrap_vars(X), + W=unwrap_vars(W), + R=unwrap_vars(R), + B=unwrap_vars(B), + sequence_lens=unwrap_vars(sequence_lens), + initial_h=unwrap_vars(initial_h), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) + return output_vars # type: ignore def gather( @@ -7057,15 +7492,24 @@ def gather( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _Gather( - _Gather.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Gather.Inputs( - data=data, - indices=indices, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + indices=indices, + ) + output_vars = ( + _Gather( + _Gather.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _Gather.Inputs( + data=unwrap_vars(data), + indices=unwrap_vars(indices), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def gather_elements( @@ -7162,15 +7606,24 @@ def gather_elements( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _GatherElements( - _GatherElements.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _GatherElements.Inputs( - data=data, - indices=indices, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + indices=indices, + ) + output_vars = ( + _GatherElements( + _GatherElements.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _GatherElements.Inputs( + data=unwrap_vars(data), + indices=unwrap_vars(indices), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def gather_nd( @@ -7312,15 +7765,24 @@ def gather_nd( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _GatherND( - _GatherND.Attributes( - batch_dims=AttrInt64(batch_dims, name="batch_dims"), - ), - _GatherND.Inputs( - data=data, - indices=indices, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + indices=indices, + ) + output_vars = ( + _GatherND( + _GatherND.Attributes( + batch_dims=AttrInt64(batch_dims, name="batch_dims"), + ), + _GatherND.Inputs( + data=unwrap_vars(data), + indices=unwrap_vars(indices), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def gemm( @@ -7396,19 +7858,29 @@ def gemm( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _Gemm( - _Gemm.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - beta=AttrFloat32(beta, name="beta"), - transA=AttrInt64(transA, name="transA"), - transB=AttrInt64(transB, name="transB"), - ), - _Gemm.Inputs( - A=A, - B=B, - C=C, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + A=A, + B=B, + C=C, + ) + output_vars = ( + _Gemm( + _Gemm.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + beta=AttrFloat32(beta, name="beta"), + transA=AttrInt64(transA, name="transA"), + transB=AttrInt64(transB, name="transB"), + ), + _Gemm.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + C=unwrap_vars(C), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def global_average_pool( @@ -7445,12 +7917,20 @@ def global_average_pool( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _GlobalAveragePool( - _GlobalAveragePool.Attributes(), - _GlobalAveragePool.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _GlobalAveragePool( + _GlobalAveragePool.Attributes(), + _GlobalAveragePool.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def global_lp_pool( @@ -7492,14 +7972,22 @@ def global_lp_pool( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _GlobalLpPool( - _GlobalLpPool.Attributes( - p=AttrInt64(p, name="p"), - ), - _GlobalLpPool.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _GlobalLpPool( + _GlobalLpPool.Attributes( + p=AttrInt64(p, name="p"), + ), + _GlobalLpPool.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def global_max_pool( @@ -7536,12 +8024,20 @@ def global_max_pool( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _GlobalMaxPool( - _GlobalMaxPool.Attributes(), - _GlobalMaxPool.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _GlobalMaxPool( + _GlobalMaxPool.Attributes(), + _GlobalMaxPool.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def greater( @@ -7580,14 +8076,23 @@ def greater( - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(bool)` """ - return _Greater( - _Greater.Attributes(), - _Greater.Inputs( - A=A, - B=B, - ), - ).outputs.C - + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + output_vars = ( + _Greater( + _Greater.Attributes(), + _Greater.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) + return output_vars # type: ignore + def greater_or_equal( A: Var, @@ -7625,13 +8130,22 @@ def greater_or_equal( - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(bool)` """ - return _GreaterOrEqual( - _GreaterOrEqual.Attributes(), - _GreaterOrEqual.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + output_vars = ( + _GreaterOrEqual( + _GreaterOrEqual.Attributes(), + _GreaterOrEqual.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) + return output_vars # type: ignore def grid_sample( @@ -7716,17 +8230,26 @@ def grid_sample( - T1: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _GridSample( - _GridSample.Attributes( - align_corners=AttrInt64(align_corners, name="align_corners"), - mode=AttrString(mode, name="mode"), - padding_mode=AttrString(padding_mode, name="padding_mode"), - ), - _GridSample.Inputs( - X=X, - grid=grid, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + grid=grid, + ) + output_vars = ( + _GridSample( + _GridSample.Attributes( + align_corners=AttrInt64(align_corners, name="align_corners"), + mode=AttrString(mode, name="mode"), + padding_mode=AttrString(padding_mode, name="padding_mode"), + ), + _GridSample.Inputs( + X=unwrap_vars(X), + grid=unwrap_vars(grid), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def hamming_window( @@ -7770,15 +8293,23 @@ def hamming_window( - T1: `tensor(int32)`, `tensor(int64)` - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _HammingWindow( - _HammingWindow.Attributes( - output_datatype=AttrInt64(output_datatype, name="output_datatype"), - periodic=AttrInt64(periodic, name="periodic"), - ), - _HammingWindow.Inputs( - size=size, - ), - ).outputs.output + input_prop_values = create_prop_dict( + size=size, + ) + output_vars = ( + _HammingWindow( + _HammingWindow.Attributes( + output_datatype=AttrInt64(output_datatype, name="output_datatype"), + periodic=AttrInt64(periodic, name="periodic"), + ), + _HammingWindow.Inputs( + size=unwrap_vars(size), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def hann_window( @@ -7822,15 +8353,23 @@ def hann_window( - T1: `tensor(int32)`, `tensor(int64)` - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _HannWindow( - _HannWindow.Attributes( - output_datatype=AttrInt64(output_datatype, name="output_datatype"), - periodic=AttrInt64(periodic, name="periodic"), - ), - _HannWindow.Inputs( - size=size, - ), - ).outputs.output + input_prop_values = create_prop_dict( + size=size, + ) + output_vars = ( + _HannWindow( + _HannWindow.Attributes( + output_datatype=AttrInt64(output_datatype, name="output_datatype"), + periodic=AttrInt64(periodic, name="periodic"), + ), + _HannWindow.Inputs( + size=unwrap_vars(size), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def hard_sigmoid( @@ -7869,15 +8408,23 @@ def hard_sigmoid( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _HardSigmoid( - _HardSigmoid.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - beta=AttrFloat32(beta, name="beta"), - ), - _HardSigmoid.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _HardSigmoid( + _HardSigmoid.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + beta=AttrFloat32(beta, name="beta"), + ), + _HardSigmoid.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def hard_swish( @@ -7908,12 +8455,20 @@ def hard_swish( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _HardSwish( - _HardSwish.Attributes(), - _HardSwish.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _HardSwish( + _HardSwish.Attributes(), + _HardSwish.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def hardmax( @@ -7955,14 +8510,22 @@ def hardmax( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Hardmax( - _Hardmax.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Hardmax.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Hardmax( + _Hardmax.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _Hardmax.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def identity( @@ -7990,12 +8553,20 @@ def identity( Type constraints: - V: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Identity( - _Identity.Attributes(), - _Identity.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Identity( + _Identity.Attributes(), + _Identity.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def if_( @@ -8052,16 +8623,24 @@ def if_( """ _else_branch_subgraph: Graph = subgraph((), else_branch) _then_branch_subgraph: Graph = subgraph((), then_branch) - return _If( - _If.Attributes( - else_branch=AttrGraph(_else_branch_subgraph, name="else_branch"), - then_branch=AttrGraph(_then_branch_subgraph, name="then_branch"), - ), - _If.Inputs( - cond=cond, - ), - out_variadic=len(_else_branch_subgraph.requested_results), - ).outputs.outputs + input_prop_values = create_prop_dict( + cond=cond, + ) + output_vars = ( + _If( + _If.Attributes( + else_branch=AttrGraph(_else_branch_subgraph, name="else_branch"), + then_branch=AttrGraph(_then_branch_subgraph, name="then_branch"), + ), + _If.Inputs( + cond=unwrap_vars(cond), + ), + out_variadic=len(_else_branch_subgraph.requested_results), + ) + .get_output_vars(input_prop_values=input_prop_values) + .outputs + ) + return output_vars # type: ignore def instance_normalization( @@ -8110,16 +8689,26 @@ def instance_normalization( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _InstanceNormalization( - _InstanceNormalization.Attributes( - epsilon=AttrFloat32(epsilon, name="epsilon"), - ), - _InstanceNormalization.Inputs( - input=input, - scale=scale, - B=B, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + scale=scale, + B=B, + ) + output_vars = ( + _InstanceNormalization( + _InstanceNormalization.Attributes( + epsilon=AttrFloat32(epsilon, name="epsilon"), + ), + _InstanceNormalization.Inputs( + input=unwrap_vars(input), + scale=unwrap_vars(scale), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def isinf( @@ -8161,15 +8750,23 @@ def isinf( - T1: `tensor(double)`, `tensor(float)` - T2: `tensor(bool)` """ - return _IsInf( - _IsInf.Attributes( - detect_negative=AttrInt64(detect_negative, name="detect_negative"), - detect_positive=AttrInt64(detect_positive, name="detect_positive"), - ), - _IsInf.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _IsInf( + _IsInf.Attributes( + detect_negative=AttrInt64(detect_negative, name="detect_negative"), + detect_positive=AttrInt64(detect_positive, name="detect_positive"), + ), + _IsInf.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def isnan( @@ -8198,12 +8795,20 @@ def isnan( - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(bool)` """ - return _IsNaN( - _IsNaN.Attributes(), - _IsNaN.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _IsNaN( + _IsNaN.Attributes(), + _IsNaN.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def lrn( @@ -8265,17 +8870,25 @@ def lrn( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _LRN( - _LRN.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - beta=AttrFloat32(beta, name="beta"), - bias=AttrFloat32(bias, name="bias"), - size=AttrInt64(size, name="size"), - ), - _LRN.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _LRN( + _LRN.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + beta=AttrFloat32(beta, name="beta"), + bias=AttrFloat32(bias, name="bias"), + size=AttrInt64(size, name="size"), + ), + _LRN.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def lstm( @@ -8472,30 +9085,47 @@ def lstm( - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - T1: `tensor(int32)` """ - return _LSTM( - _LSTM.Attributes( - activation_alpha=AttrFloat32s.maybe( - activation_alpha, name="activation_alpha" - ), - activation_beta=AttrFloat32s.maybe(activation_beta, name="activation_beta"), - activations=AttrStrings.maybe(activations, name="activations"), - clip=AttrFloat32.maybe(clip, name="clip"), - direction=AttrString(direction, name="direction"), - hidden_size=AttrInt64.maybe(hidden_size, name="hidden_size"), - input_forget=AttrInt64(input_forget, name="input_forget"), - layout=AttrInt64(layout, name="layout"), - ), - _LSTM.Inputs( - X=X, - W=W, - R=R, - B=B, - sequence_lens=sequence_lens, - initial_h=initial_h, - initial_c=initial_c, - P=P, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + X=X, + W=W, + R=R, + B=B, + sequence_lens=sequence_lens, + initial_h=initial_h, + initial_c=initial_c, + P=P, + ) + output_vars = ( + _LSTM( + _LSTM.Attributes( + activation_alpha=AttrFloat32s.maybe( + activation_alpha, name="activation_alpha" + ), + activation_beta=AttrFloat32s.maybe( + activation_beta, name="activation_beta" + ), + activations=AttrStrings.maybe(activations, name="activations"), + clip=AttrFloat32.maybe(clip, name="clip"), + direction=AttrString(direction, name="direction"), + hidden_size=AttrInt64.maybe(hidden_size, name="hidden_size"), + input_forget=AttrInt64(input_forget, name="input_forget"), + layout=AttrInt64(layout, name="layout"), + ), + _LSTM.Inputs( + X=unwrap_vars(X), + W=unwrap_vars(W), + R=unwrap_vars(R), + B=unwrap_vars(B), + sequence_lens=unwrap_vars(sequence_lens), + initial_h=unwrap_vars(initial_h), + initial_c=unwrap_vars(initial_c), + P=unwrap_vars(P), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) + return output_vars # type: ignore def layer_normalization( @@ -8581,18 +9211,28 @@ def layer_normalization( - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - U: `tensor(bfloat16)`, `tensor(float)` """ - return _LayerNormalization( - _LayerNormalization.Attributes( - axis=AttrInt64(axis, name="axis"), - epsilon=AttrFloat32(epsilon, name="epsilon"), - stash_type=AttrInt64(stash_type, name="stash_type"), - ), - _LayerNormalization.Inputs( - X=X, - Scale=Scale, - B=B, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + X=X, + Scale=Scale, + B=B, + ) + output_vars = ( + _LayerNormalization( + _LayerNormalization.Attributes( + axis=AttrInt64(axis, name="axis"), + epsilon=AttrFloat32(epsilon, name="epsilon"), + stash_type=AttrInt64(stash_type, name="stash_type"), + ), + _LayerNormalization.Inputs( + X=unwrap_vars(X), + Scale=unwrap_vars(Scale), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) + return output_vars # type: ignore def leaky_relu( @@ -8628,14 +9268,22 @@ def leaky_relu( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _LeakyRelu( - _LeakyRelu.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - ), - _LeakyRelu.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _LeakyRelu( + _LeakyRelu.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + ), + _LeakyRelu.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def less( @@ -8674,13 +9322,22 @@ def less( - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(bool)` """ - return _Less( - _Less.Attributes(), - _Less.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + output_vars = ( + _Less( + _Less.Attributes(), + _Less.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) + return output_vars # type: ignore def less_or_equal( @@ -8719,13 +9376,22 @@ def less_or_equal( - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(bool)` """ - return _LessOrEqual( - _LessOrEqual.Attributes(), - _LessOrEqual.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + output_vars = ( + _LessOrEqual( + _LessOrEqual.Attributes(), + _LessOrEqual.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) + return output_vars # type: ignore def log( @@ -8753,12 +9419,20 @@ def log( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Log( - _Log.Attributes(), - _Log.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Log( + _Log.Attributes(), + _Log.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def log_softmax( @@ -8799,14 +9473,22 @@ def log_softmax( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _LogSoftmax( - _LogSoftmax.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _LogSoftmax.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _LogSoftmax( + _LogSoftmax.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _LogSoftmax.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def loop( @@ -8990,17 +9672,27 @@ def loop( + [var.unwrap_type() for var in v_initial], body, ) - return _Loop( - _Loop.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - ), - _Loop.Inputs( - M=M, - cond=cond, - v_initial=v_initial, - ), - out_variadic=len(_body_subgraph.requested_results) - 1, - ).outputs.v_final_and_scan_outputs + input_prop_values = create_prop_dict( + M=M, + cond=cond, + v_initial=v_initial, + ) + output_vars = ( + _Loop( + _Loop.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + ), + _Loop.Inputs( + M=unwrap_vars(M), + cond=unwrap_vars(cond), + v_initial=unwrap_vars(v_initial), + ), + out_variadic=len(_body_subgraph.requested_results) - 1, + ) + .get_output_vars(input_prop_values=input_prop_values) + .v_final_and_scan_outputs + ) + return output_vars # type: ignore def lp_normalization( @@ -9037,15 +9729,23 @@ def lp_normalization( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _LpNormalization( - _LpNormalization.Attributes( - axis=AttrInt64(axis, name="axis"), - p=AttrInt64(p, name="p"), - ), - _LpNormalization.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _LpNormalization( + _LpNormalization.Attributes( + axis=AttrInt64(axis, name="axis"), + p=AttrInt64(p, name="p"), + ), + _LpNormalization.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def lp_pool( @@ -9119,18 +9819,26 @@ def lp_pool( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _LpPool( - _LpPool.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), - p=AttrInt64(p, name="p"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _LpPool.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _LpPool( + _LpPool.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), + p=AttrInt64(p, name="p"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _LpPool.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def matmul( @@ -9163,13 +9871,22 @@ def matmul( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _MatMul( - _MatMul.Attributes(), - _MatMul.Inputs( - A=A, - B=B, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + output_vars = ( + _MatMul( + _MatMul.Attributes(), + _MatMul.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def matmul_integer( @@ -9224,15 +9941,26 @@ def matmul_integer( - T2: `tensor(int8)`, `tensor(uint8)` - T3: `tensor(int32)` """ - return _MatMulInteger( - _MatMulInteger.Attributes(), - _MatMulInteger.Inputs( - A=A, - B=B, - a_zero_point=a_zero_point, - b_zero_point=b_zero_point, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + A=A, + B=B, + a_zero_point=a_zero_point, + b_zero_point=b_zero_point, + ) + output_vars = ( + _MatMulInteger( + _MatMulInteger.Attributes(), + _MatMulInteger.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + a_zero_point=unwrap_vars(a_zero_point), + b_zero_point=unwrap_vars(b_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def max( @@ -9264,12 +9992,20 @@ def max( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Max( - _Max.Attributes(), - _Max.Inputs( - data_0=data_0, - ), - ).outputs.max + input_prop_values = create_prop_dict( + data_0=data_0, + ) + output_vars = ( + _Max( + _Max.Attributes(), + _Max.Inputs( + data_0=unwrap_vars(data_0), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .max + ) + return output_vars # type: ignore def max_pool( @@ -9408,20 +10144,28 @@ def max_pool( - T: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int8)`, `tensor(uint8)` - I: `tensor(int64)` """ - return _MaxPool( - _MaxPool.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - storage_order=AttrInt64(storage_order, name="storage_order"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _MaxPool.Inputs( - X=X, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _MaxPool( + _MaxPool.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + storage_order=AttrInt64(storage_order, name="storage_order"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _MaxPool.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) + return output_vars # type: ignore def max_roi_pool( @@ -9469,16 +10213,25 @@ def max_roi_pool( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _MaxRoiPool( - _MaxRoiPool.Attributes( - pooled_shape=AttrInt64s(pooled_shape, name="pooled_shape"), - spatial_scale=AttrFloat32(spatial_scale, name="spatial_scale"), - ), - _MaxRoiPool.Inputs( - X=X, - rois=rois, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + rois=rois, + ) + output_vars = ( + _MaxRoiPool( + _MaxRoiPool.Attributes( + pooled_shape=AttrInt64s(pooled_shape, name="pooled_shape"), + spatial_scale=AttrFloat32(spatial_scale, name="spatial_scale"), + ), + _MaxRoiPool.Inputs( + X=unwrap_vars(X), + rois=unwrap_vars(rois), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def max_unpool( @@ -9577,18 +10330,28 @@ def max_unpool( - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(int64)` """ - return _MaxUnpool( - _MaxUnpool.Attributes( - kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _MaxUnpool.Inputs( - X=X, - I=I, - output_shape=output_shape, - ), - ).outputs.output + input_prop_values = create_prop_dict( + X=X, + I=I, + output_shape=output_shape, + ) + output_vars = ( + _MaxUnpool( + _MaxUnpool.Attributes( + kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _MaxUnpool.Inputs( + X=unwrap_vars(X), + I=unwrap_vars(I), + output_shape=unwrap_vars(output_shape), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def mean( @@ -9620,12 +10383,20 @@ def mean( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Mean( - _Mean.Attributes(), - _Mean.Inputs( - data_0=data_0, - ), - ).outputs.mean + input_prop_values = create_prop_dict( + data_0=data_0, + ) + output_vars = ( + _Mean( + _Mean.Attributes(), + _Mean.Inputs( + data_0=unwrap_vars(data_0), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .mean + ) + return output_vars # type: ignore def mean_variance_normalization( @@ -9663,14 +10434,22 @@ def mean_variance_normalization( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _MeanVarianceNormalization( - _MeanVarianceNormalization.Attributes( - axes=AttrInt64s(axes, name="axes"), - ), - _MeanVarianceNormalization.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _MeanVarianceNormalization( + _MeanVarianceNormalization.Attributes( + axes=AttrInt64s(axes, name="axes"), + ), + _MeanVarianceNormalization.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def mel_weight_matrix( @@ -9747,18 +10526,30 @@ def mel_weight_matrix( - T2: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - T3: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _MelWeightMatrix( - _MelWeightMatrix.Attributes( - output_datatype=AttrInt64(output_datatype, name="output_datatype"), - ), - _MelWeightMatrix.Inputs( - num_mel_bins=num_mel_bins, - dft_length=dft_length, - sample_rate=sample_rate, - lower_edge_hertz=lower_edge_hertz, - upper_edge_hertz=upper_edge_hertz, - ), - ).outputs.output + input_prop_values = create_prop_dict( + num_mel_bins=num_mel_bins, + dft_length=dft_length, + sample_rate=sample_rate, + lower_edge_hertz=lower_edge_hertz, + upper_edge_hertz=upper_edge_hertz, + ) + output_vars = ( + _MelWeightMatrix( + _MelWeightMatrix.Attributes( + output_datatype=AttrInt64(output_datatype, name="output_datatype"), + ), + _MelWeightMatrix.Inputs( + num_mel_bins=unwrap_vars(num_mel_bins), + dft_length=unwrap_vars(dft_length), + sample_rate=unwrap_vars(sample_rate), + lower_edge_hertz=unwrap_vars(lower_edge_hertz), + upper_edge_hertz=unwrap_vars(upper_edge_hertz), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def min( @@ -9790,12 +10581,20 @@ def min( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Min( - _Min.Attributes(), - _Min.Inputs( - data_0=data_0, - ), - ).outputs.min + input_prop_values = create_prop_dict( + data_0=data_0, + ) + output_vars = ( + _Min( + _Min.Attributes(), + _Min.Inputs( + data_0=unwrap_vars(data_0), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .min + ) + return output_vars # type: ignore def mod( @@ -9850,15 +10649,24 @@ def mod( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Mod( - _Mod.Attributes( - fmod=AttrInt64(fmod, name="fmod"), - ), - _Mod.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + output_vars = ( + _Mod( + _Mod.Attributes( + fmod=AttrInt64(fmod, name="fmod"), + ), + _Mod.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) + return output_vars # type: ignore def mul( @@ -9898,13 +10706,22 @@ def mul( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Mul( - _Mul.Attributes(), - _Mul.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + output_vars = ( + _Mul( + _Mul.Attributes(), + _Mul.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) + return output_vars # type: ignore def multinomial( @@ -9954,16 +10771,24 @@ def multinomial( - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(int32)`, `tensor(int64)` """ - return _Multinomial( - _Multinomial.Attributes( - dtype=AttrDtype(dtype, name="dtype"), - sample_size=AttrInt64(sample_size, name="sample_size"), - seed=AttrFloat32.maybe(seed, name="seed"), - ), - _Multinomial.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Multinomial( + _Multinomial.Attributes( + dtype=AttrDtype(dtype, name="dtype"), + sample_size=AttrInt64(sample_size, name="sample_size"), + seed=AttrFloat32.maybe(seed, name="seed"), + ), + _Multinomial.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def neg( @@ -9993,12 +10818,20 @@ def neg( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)` """ - return _Neg( - _Neg.Attributes(), - _Neg.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Neg( + _Neg.Attributes(), + _Neg.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def negative_log_likelihood_loss( @@ -10158,17 +10991,27 @@ def negative_log_likelihood_loss( - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _NegativeLogLikelihoodLoss( - _NegativeLogLikelihoodLoss.Attributes( - ignore_index=AttrInt64.maybe(ignore_index, name="ignore_index"), - reduction=AttrString(reduction, name="reduction"), - ), - _NegativeLogLikelihoodLoss.Inputs( - input=input, - target=target, - weight=weight, - ), - ).outputs.loss + input_prop_values = create_prop_dict( + input=input, + target=target, + weight=weight, + ) + output_vars = ( + _NegativeLogLikelihoodLoss( + _NegativeLogLikelihoodLoss.Attributes( + ignore_index=AttrInt64.maybe(ignore_index, name="ignore_index"), + reduction=AttrString(reduction, name="reduction"), + ), + _NegativeLogLikelihoodLoss.Inputs( + input=unwrap_vars(input), + target=unwrap_vars(target), + weight=unwrap_vars(weight), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .loss + ) + return output_vars # type: ignore def non_max_suppression( @@ -10237,18 +11080,30 @@ def non_max_suppression( Signature: ``ai.onnx@11::NonMaxSuppression``. """ - return _NonMaxSuppression( - _NonMaxSuppression.Attributes( - center_point_box=AttrInt64(center_point_box, name="center_point_box"), - ), - _NonMaxSuppression.Inputs( - boxes=boxes, - scores=scores, - max_output_boxes_per_class=max_output_boxes_per_class, - iou_threshold=iou_threshold, - score_threshold=score_threshold, - ), - ).outputs.selected_indices + input_prop_values = create_prop_dict( + boxes=boxes, + scores=scores, + max_output_boxes_per_class=max_output_boxes_per_class, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + ) + output_vars = ( + _NonMaxSuppression( + _NonMaxSuppression.Attributes( + center_point_box=AttrInt64(center_point_box, name="center_point_box"), + ), + _NonMaxSuppression.Inputs( + boxes=unwrap_vars(boxes), + scores=unwrap_vars(scores), + max_output_boxes_per_class=unwrap_vars(max_output_boxes_per_class), + iou_threshold=unwrap_vars(iou_threshold), + score_threshold=unwrap_vars(score_threshold), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .selected_indices + ) + return output_vars # type: ignore def non_zero( @@ -10280,12 +11135,20 @@ def non_zero( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _NonZero( - _NonZero.Attributes(), - _NonZero.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _NonZero( + _NonZero.Attributes(), + _NonZero.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def not_( @@ -10313,12 +11176,20 @@ def not_( Type constraints: - T: `tensor(bool)` """ - return _Not( - _Not.Attributes(), - _Not.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Not( + _Not.Attributes(), + _Not.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def one_hot( @@ -10402,16 +11273,26 @@ def one_hot( - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T3: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _OneHot( - _OneHot.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _OneHot.Inputs( - indices=indices, - depth=depth, - values=values, - ), - ).outputs.output + input_prop_values = create_prop_dict( + indices=indices, + depth=depth, + values=values, + ) + output_vars = ( + _OneHot( + _OneHot.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _OneHot.Inputs( + indices=unwrap_vars(indices), + depth=unwrap_vars(depth), + values=unwrap_vars(values), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def optional( @@ -10447,14 +11328,22 @@ def optional( - V: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))` """ - return _Optional( - _Optional.Attributes( - type=AttrType.maybe(type, name="type"), - ), - _Optional.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Optional( + _Optional.Attributes( + type=AttrType.maybe(type, name="type"), + ), + _Optional.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def optional_get_element( @@ -10485,12 +11374,20 @@ def optional_get_element( - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))` - V: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _OptionalGetElement( - _OptionalGetElement.Attributes(), - _OptionalGetElement.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _OptionalGetElement( + _OptionalGetElement.Attributes(), + _OptionalGetElement.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def optional_has_element( @@ -10521,12 +11418,20 @@ def optional_has_element( - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))` - B: `tensor(bool)` """ - return _OptionalHasElement( - _OptionalHasElement.Attributes(), - _OptionalHasElement.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _OptionalHasElement( + _OptionalHasElement.Attributes(), + _OptionalHasElement.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def or_( @@ -10565,13 +11470,22 @@ def or_( - T: `tensor(bool)` - T1: `tensor(bool)` """ - return _Or( - _Or.Attributes(), - _Or.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + output_vars = ( + _Or( + _Or.Attributes(), + _Or.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) + return output_vars # type: ignore def prelu( @@ -10610,13 +11524,22 @@ def prelu( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _PRelu( - _PRelu.Attributes(), - _PRelu.Inputs( - X=X, - slope=slope, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + slope=slope, + ) + output_vars = ( + _PRelu( + _PRelu.Attributes(), + _PRelu.Inputs( + X=unwrap_vars(X), + slope=unwrap_vars(slope), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def pad( @@ -10713,16 +11636,26 @@ def pad( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Pad( - _Pad.Attributes( - mode=AttrString(mode, name="mode"), - ), - _Pad.Inputs( - data=data, - pads=pads, - constant_value=constant_value, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + pads=pads, + constant_value=constant_value, + ) + output_vars = ( + _Pad( + _Pad.Attributes( + mode=AttrString(mode, name="mode"), + ), + _Pad.Inputs( + data=unwrap_vars(data), + pads=unwrap_vars(pads), + constant_value=unwrap_vars(constant_value), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def pow( @@ -10760,13 +11693,22 @@ def pow( - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)` - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Pow( - _Pow.Attributes(), - _Pow.Inputs( - X=X, - Y=Y, - ), - ).outputs.Z + input_prop_values = create_prop_dict( + X=X, + Y=Y, + ) + output_vars = ( + _Pow( + _Pow.Attributes(), + _Pow.Inputs( + X=unwrap_vars(X), + Y=unwrap_vars(Y), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Z + ) + return output_vars # type: ignore def qlinear_conv( @@ -10909,27 +11851,43 @@ def qlinear_conv( - T3: `tensor(int8)`, `tensor(uint8)` - T4: `tensor(int32)` """ - return _QLinearConv( - _QLinearConv.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - group=AttrInt64(group, name="group"), - kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _QLinearConv.Inputs( - x=x, - x_scale=x_scale, - x_zero_point=x_zero_point, - w=w, - w_scale=w_scale, - w_zero_point=w_zero_point, - y_scale=y_scale, - y_zero_point=y_zero_point, - B=B, - ), - ).outputs.y + input_prop_values = create_prop_dict( + x=x, + x_scale=x_scale, + x_zero_point=x_zero_point, + w=w, + w_scale=w_scale, + w_zero_point=w_zero_point, + y_scale=y_scale, + y_zero_point=y_zero_point, + B=B, + ) + output_vars = ( + _QLinearConv( + _QLinearConv.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + group=AttrInt64(group, name="group"), + kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _QLinearConv.Inputs( + x=unwrap_vars(x), + x_scale=unwrap_vars(x_scale), + x_zero_point=unwrap_vars(x_zero_point), + w=unwrap_vars(w), + w_scale=unwrap_vars(w_scale), + w_zero_point=unwrap_vars(w_zero_point), + y_scale=unwrap_vars(y_scale), + y_zero_point=unwrap_vars(y_zero_point), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) + return output_vars # type: ignore def qlinear_matmul( @@ -11004,19 +11962,34 @@ def qlinear_matmul( - T2: `tensor(int8)`, `tensor(uint8)` - T3: `tensor(int8)`, `tensor(uint8)` """ - return _QLinearMatMul( - _QLinearMatMul.Attributes(), - _QLinearMatMul.Inputs( - a=a, - a_scale=a_scale, - a_zero_point=a_zero_point, - b=b, - b_scale=b_scale, - b_zero_point=b_zero_point, - y_scale=y_scale, - y_zero_point=y_zero_point, - ), - ).outputs.y + input_prop_values = create_prop_dict( + a=a, + a_scale=a_scale, + a_zero_point=a_zero_point, + b=b, + b_scale=b_scale, + b_zero_point=b_zero_point, + y_scale=y_scale, + y_zero_point=y_zero_point, + ) + output_vars = ( + _QLinearMatMul( + _QLinearMatMul.Attributes(), + _QLinearMatMul.Inputs( + a=unwrap_vars(a), + a_scale=unwrap_vars(a_scale), + a_zero_point=unwrap_vars(a_zero_point), + b=unwrap_vars(b), + b_scale=unwrap_vars(b_scale), + b_zero_point=unwrap_vars(b_zero_point), + y_scale=unwrap_vars(y_scale), + y_zero_point=unwrap_vars(y_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) + return output_vars # type: ignore def quantize_linear( @@ -11073,16 +12046,26 @@ def quantize_linear( - T1: `tensor(float)`, `tensor(int32)` - T2: `tensor(int8)`, `tensor(uint8)` """ - return _QuantizeLinear( - _QuantizeLinear.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _QuantizeLinear.Inputs( - x=x, - y_scale=y_scale, - y_zero_point=y_zero_point, - ), - ).outputs.y + input_prop_values = create_prop_dict( + x=x, + y_scale=y_scale, + y_zero_point=y_zero_point, + ) + output_vars = ( + _QuantizeLinear( + _QuantizeLinear.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _QuantizeLinear.Inputs( + x=unwrap_vars(x), + y_scale=unwrap_vars(y_scale), + y_zero_point=unwrap_vars(y_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) + return output_vars # type: ignore def rnn( @@ -11237,27 +12220,42 @@ def rnn( - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` - T1: `tensor(int32)` """ - return _RNN( - _RNN.Attributes( - activation_alpha=AttrFloat32s.maybe( - activation_alpha, name="activation_alpha" - ), - activation_beta=AttrFloat32s.maybe(activation_beta, name="activation_beta"), - activations=AttrStrings(activations, name="activations"), - clip=AttrFloat32.maybe(clip, name="clip"), - direction=AttrString(direction, name="direction"), - hidden_size=AttrInt64.maybe(hidden_size, name="hidden_size"), - layout=AttrInt64(layout, name="layout"), - ), - _RNN.Inputs( - X=X, - W=W, - R=R, - B=B, - sequence_lens=sequence_lens, - initial_h=initial_h, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + X=X, + W=W, + R=R, + B=B, + sequence_lens=sequence_lens, + initial_h=initial_h, + ) + output_vars = ( + _RNN( + _RNN.Attributes( + activation_alpha=AttrFloat32s.maybe( + activation_alpha, name="activation_alpha" + ), + activation_beta=AttrFloat32s.maybe( + activation_beta, name="activation_beta" + ), + activations=AttrStrings(activations, name="activations"), + clip=AttrFloat32.maybe(clip, name="clip"), + direction=AttrString(direction, name="direction"), + hidden_size=AttrInt64.maybe(hidden_size, name="hidden_size"), + layout=AttrInt64(layout, name="layout"), + ), + _RNN.Inputs( + X=unwrap_vars(X), + W=unwrap_vars(W), + R=unwrap_vars(R), + B=unwrap_vars(B), + sequence_lens=unwrap_vars(sequence_lens), + initial_h=unwrap_vars(initial_h), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) + return output_vars # type: ignore def random_normal( @@ -11311,16 +12309,22 @@ def random_normal( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _RandomNormal( - _RandomNormal.Attributes( - dtype=AttrDtype(dtype, name="dtype"), - mean=AttrFloat32(mean, name="mean"), - scale=AttrFloat32(scale, name="scale"), - seed=AttrFloat32.maybe(seed, name="seed"), - shape=AttrInt64s(shape, name="shape"), - ), - _RandomNormal.Inputs(), - ).outputs.output + input_prop_values = create_prop_dict() + output_vars = ( + _RandomNormal( + _RandomNormal.Attributes( + dtype=AttrDtype(dtype, name="dtype"), + mean=AttrFloat32(mean, name="mean"), + scale=AttrFloat32(scale, name="scale"), + seed=AttrFloat32.maybe(seed, name="seed"), + shape=AttrInt64s(shape, name="shape"), + ), + _RandomNormal.Inputs(), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def random_normal_like( @@ -11376,17 +12380,25 @@ def random_normal_like( - T1: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _RandomNormalLike( - _RandomNormalLike.Attributes( - dtype=AttrDtype.maybe(dtype, name="dtype"), - mean=AttrFloat32(mean, name="mean"), - scale=AttrFloat32(scale, name="scale"), - seed=AttrFloat32.maybe(seed, name="seed"), - ), - _RandomNormalLike.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _RandomNormalLike( + _RandomNormalLike.Attributes( + dtype=AttrDtype.maybe(dtype, name="dtype"), + mean=AttrFloat32(mean, name="mean"), + scale=AttrFloat32(scale, name="scale"), + seed=AttrFloat32.maybe(seed, name="seed"), + ), + _RandomNormalLike.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def random_uniform( @@ -11439,16 +12451,22 @@ def random_uniform( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _RandomUniform( - _RandomUniform.Attributes( - dtype=AttrDtype(dtype, name="dtype"), - high=AttrFloat32(high, name="high"), - low=AttrFloat32(low, name="low"), - seed=AttrFloat32.maybe(seed, name="seed"), - shape=AttrInt64s(shape, name="shape"), - ), - _RandomUniform.Inputs(), - ).outputs.output + input_prop_values = create_prop_dict() + output_vars = ( + _RandomUniform( + _RandomUniform.Attributes( + dtype=AttrDtype(dtype, name="dtype"), + high=AttrFloat32(high, name="high"), + low=AttrFloat32(low, name="low"), + seed=AttrFloat32.maybe(seed, name="seed"), + shape=AttrInt64s(shape, name="shape"), + ), + _RandomUniform.Inputs(), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def random_uniform_like( @@ -11504,17 +12522,25 @@ def random_uniform_like( - T1: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _RandomUniformLike( - _RandomUniformLike.Attributes( - dtype=AttrDtype.maybe(dtype, name="dtype"), - high=AttrFloat32(high, name="high"), - low=AttrFloat32(low, name="low"), - seed=AttrFloat32.maybe(seed, name="seed"), - ), - _RandomUniformLike.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _RandomUniformLike( + _RandomUniformLike.Attributes( + dtype=AttrDtype.maybe(dtype, name="dtype"), + high=AttrFloat32(high, name="high"), + low=AttrFloat32(low, name="low"), + seed=AttrFloat32.maybe(seed, name="seed"), + ), + _RandomUniformLike.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def range( @@ -11581,14 +12607,24 @@ def range( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)` """ - return _Range( - _Range.Attributes(), - _Range.Inputs( - start=start, - limit=limit, - delta=delta, - ), - ).outputs.output + input_prop_values = create_prop_dict( + start=start, + limit=limit, + delta=delta, + ) + output_vars = ( + _Range( + _Range.Attributes(), + _Range.Inputs( + start=unwrap_vars(start), + limit=unwrap_vars(limit), + delta=unwrap_vars(delta), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def reciprocal( @@ -11618,12 +12654,20 @@ def reciprocal( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Reciprocal( - _Reciprocal.Attributes(), - _Reciprocal.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Reciprocal( + _Reciprocal.Attributes(), + _Reciprocal.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def reduce_l1( @@ -11670,15 +12714,23 @@ def reduce_l1( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceL1( - _ReduceL1.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceL1.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _ReduceL1( + _ReduceL1.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _ReduceL1.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def reduce_l2( @@ -11725,15 +12777,23 @@ def reduce_l2( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceL2( - _ReduceL2.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceL2.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _ReduceL2( + _ReduceL2.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _ReduceL2.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def reduce_log_sum( @@ -11781,15 +12841,23 @@ def reduce_log_sum( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceLogSum( - _ReduceLogSum.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceLogSum.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _ReduceLogSum( + _ReduceLogSum.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _ReduceLogSum.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def reduce_log_sum_exp( @@ -11837,15 +12905,23 @@ def reduce_log_sum_exp( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceLogSumExp( - _ReduceLogSumExp.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceLogSumExp.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _ReduceLogSumExp( + _ReduceLogSumExp.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _ReduceLogSumExp.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def reduce_max( @@ -11894,15 +12970,23 @@ def reduce_max( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ReduceMax( - _ReduceMax.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceMax.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _ReduceMax( + _ReduceMax.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _ReduceMax.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def reduce_mean( @@ -11949,15 +13033,23 @@ def reduce_mean( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceMean( - _ReduceMean.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceMean.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _ReduceMean( + _ReduceMean.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _ReduceMean.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def reduce_min( @@ -12005,15 +13097,23 @@ def reduce_min( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ReduceMin( - _ReduceMin.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceMin.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _ReduceMin( + _ReduceMin.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _ReduceMin.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def reduce_prod( @@ -12060,15 +13160,23 @@ def reduce_prod( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceProd( - _ReduceProd.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceProd.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _ReduceProd( + _ReduceProd.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _ReduceProd.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def reduce_sum( @@ -12124,18 +13232,27 @@ def reduce_sum( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceSum( - _ReduceSum.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + output_vars = ( + _ReduceSum( + _ReduceSum.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), ), - ), - _ReduceSum.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + _ReduceSum.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def reduce_sum_square( @@ -12182,15 +13299,23 @@ def reduce_sum_square( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceSumSquare( - _ReduceSumSquare.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _ReduceSumSquare.Inputs( - data=data, - ), - ).outputs.reduced + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _ReduceSumSquare( + _ReduceSumSquare.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _ReduceSumSquare.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def relu( @@ -12220,12 +13345,20 @@ def relu( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)` """ - return _Relu( - _Relu.Attributes(), - _Relu.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Relu( + _Relu.Attributes(), + _Relu.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def reshape( @@ -12279,15 +13412,24 @@ def reshape( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Reshape( - _Reshape.Attributes( - allowzero=AttrInt64(allowzero, name="allowzero"), - ), - _Reshape.Inputs( - data=data, - shape=shape, - ), - ).outputs.reshaped + input_prop_values = create_prop_dict( + data=data, + shape=shape, + ) + output_vars = ( + _Reshape( + _Reshape.Attributes( + allowzero=AttrInt64(allowzero, name="allowzero"), + ), + _Reshape.Inputs( + data=unwrap_vars(data), + shape=unwrap_vars(shape), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reshaped + ) + return output_vars # type: ignore def resize( @@ -12409,26 +13551,38 @@ def resize( - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Resize( - _Resize.Attributes( - coordinate_transformation_mode=AttrString( - coordinate_transformation_mode, name="coordinate_transformation_mode" + input_prop_values = create_prop_dict( + X=X, + roi=roi, + scales=scales, + sizes=sizes, + ) + output_vars = ( + _Resize( + _Resize.Attributes( + coordinate_transformation_mode=AttrString( + coordinate_transformation_mode, + name="coordinate_transformation_mode", + ), + cubic_coeff_a=AttrFloat32(cubic_coeff_a, name="cubic_coeff_a"), + exclude_outside=AttrInt64(exclude_outside, name="exclude_outside"), + extrapolation_value=AttrFloat32( + extrapolation_value, name="extrapolation_value" + ), + mode=AttrString(mode, name="mode"), + nearest_mode=AttrString(nearest_mode, name="nearest_mode"), ), - cubic_coeff_a=AttrFloat32(cubic_coeff_a, name="cubic_coeff_a"), - exclude_outside=AttrInt64(exclude_outside, name="exclude_outside"), - extrapolation_value=AttrFloat32( - extrapolation_value, name="extrapolation_value" + _Resize.Inputs( + X=unwrap_vars(X), + roi=unwrap_vars(roi), + scales=unwrap_vars(scales), + sizes=unwrap_vars(sizes), ), - mode=AttrString(mode, name="mode"), - nearest_mode=AttrString(nearest_mode, name="nearest_mode"), - ), - _Resize.Inputs( - X=X, - roi=roi, - scales=scales, - sizes=sizes, - ), - ).outputs.Y + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def reverse_sequence( @@ -12493,16 +13647,25 @@ def reverse_sequence( Type constraints: - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ReverseSequence( - _ReverseSequence.Attributes( - batch_axis=AttrInt64(batch_axis, name="batch_axis"), - time_axis=AttrInt64(time_axis, name="time_axis"), - ), - _ReverseSequence.Inputs( - input=input, - sequence_lens=sequence_lens, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + input=input, + sequence_lens=sequence_lens, + ) + output_vars = ( + _ReverseSequence( + _ReverseSequence.Attributes( + batch_axis=AttrInt64(batch_axis, name="batch_axis"), + time_axis=AttrInt64(time_axis, name="time_axis"), + ), + _ReverseSequence.Inputs( + input=unwrap_vars(input), + sequence_lens=unwrap_vars(sequence_lens), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def roi_align( @@ -12592,23 +13755,34 @@ def roi_align( - T1: `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(int64)` """ - return _RoiAlign( - _RoiAlign.Attributes( - coordinate_transformation_mode=AttrString( - coordinate_transformation_mode, name="coordinate_transformation_mode" - ), - mode=AttrString(mode, name="mode"), - output_height=AttrInt64(output_height, name="output_height"), - output_width=AttrInt64(output_width, name="output_width"), - sampling_ratio=AttrInt64(sampling_ratio, name="sampling_ratio"), - spatial_scale=AttrFloat32(spatial_scale, name="spatial_scale"), - ), - _RoiAlign.Inputs( - X=X, - rois=rois, - batch_indices=batch_indices, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + rois=rois, + batch_indices=batch_indices, + ) + output_vars = ( + _RoiAlign( + _RoiAlign.Attributes( + coordinate_transformation_mode=AttrString( + coordinate_transformation_mode, + name="coordinate_transformation_mode", + ), + mode=AttrString(mode, name="mode"), + output_height=AttrInt64(output_height, name="output_height"), + output_width=AttrInt64(output_width, name="output_width"), + sampling_ratio=AttrInt64(sampling_ratio, name="sampling_ratio"), + spatial_scale=AttrFloat32(spatial_scale, name="spatial_scale"), + ), + _RoiAlign.Inputs( + X=unwrap_vars(X), + rois=unwrap_vars(rois), + batch_indices=unwrap_vars(batch_indices), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def round( @@ -12650,12 +13824,20 @@ def round( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Round( - _Round.Attributes(), - _Round.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Round( + _Round.Attributes(), + _Round.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def stft( @@ -12719,17 +13901,28 @@ def stft( - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(int32)`, `tensor(int64)` """ - return _STFT( - _STFT.Attributes( - onesided=AttrInt64(onesided, name="onesided"), - ), - _STFT.Inputs( - signal=signal, - frame_step=frame_step, - window=window, - frame_length=frame_length, - ), - ).outputs.output + input_prop_values = create_prop_dict( + signal=signal, + frame_step=frame_step, + window=window, + frame_length=frame_length, + ) + output_vars = ( + _STFT( + _STFT.Attributes( + onesided=AttrInt64(onesided, name="onesided"), + ), + _STFT.Inputs( + signal=unwrap_vars(signal), + frame_step=unwrap_vars(frame_step), + window=unwrap_vars(window), + frame_length=unwrap_vars(frame_length), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def scan( @@ -12953,26 +14146,38 @@ def scan( ], body, ) - return _Scan( - _Scan.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - num_scan_inputs=AttrInt64(num_scan_inputs, name="num_scan_inputs"), - scan_input_axes=AttrInt64s.maybe(scan_input_axes, name="scan_input_axes"), - scan_input_directions=AttrInt64s.maybe( - scan_input_directions, name="scan_input_directions" - ), - scan_output_axes=AttrInt64s.maybe( - scan_output_axes, name="scan_output_axes" + input_prop_values = create_prop_dict( + initial_state_and_scan_inputs=initial_state_and_scan_inputs, + ) + output_vars = ( + _Scan( + _Scan.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + num_scan_inputs=AttrInt64(num_scan_inputs, name="num_scan_inputs"), + scan_input_axes=AttrInt64s.maybe( + scan_input_axes, name="scan_input_axes" + ), + scan_input_directions=AttrInt64s.maybe( + scan_input_directions, name="scan_input_directions" + ), + scan_output_axes=AttrInt64s.maybe( + scan_output_axes, name="scan_output_axes" + ), + scan_output_directions=AttrInt64s.maybe( + scan_output_directions, name="scan_output_directions" + ), ), - scan_output_directions=AttrInt64s.maybe( - scan_output_directions, name="scan_output_directions" + _Scan.Inputs( + initial_state_and_scan_inputs=unwrap_vars( + initial_state_and_scan_inputs + ), ), - ), - _Scan.Inputs( - initial_state_and_scan_inputs=initial_state_and_scan_inputs, - ), - out_variadic=len(_body_subgraph.requested_results), - ).outputs.final_state_and_scan_outputs + out_variadic=len(_body_subgraph.requested_results), + ) + .get_output_vars(input_prop_values=input_prop_values) + .final_state_and_scan_outputs + ) + return output_vars # type: ignore def scatter_elements( @@ -13095,17 +14300,27 @@ def scatter_elements( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _ScatterElements( - _ScatterElements.Attributes( - axis=AttrInt64(axis, name="axis"), - reduction=AttrString(reduction, name="reduction"), - ), - _ScatterElements.Inputs( - data=data, - indices=indices, - updates=updates, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + indices=indices, + updates=updates, + ) + output_vars = ( + _ScatterElements( + _ScatterElements.Attributes( + axis=AttrInt64(axis, name="axis"), + reduction=AttrString(reduction, name="reduction"), + ), + _ScatterElements.Inputs( + data=unwrap_vars(data), + indices=unwrap_vars(indices), + updates=unwrap_vars(updates), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def scatter_nd( @@ -13217,16 +14432,26 @@ def scatter_nd( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ScatterND( - _ScatterND.Attributes( - reduction=AttrString(reduction, name="reduction"), - ), - _ScatterND.Inputs( - data=data, - indices=indices, - updates=updates, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + indices=indices, + updates=updates, + ) + output_vars = ( + _ScatterND( + _ScatterND.Attributes( + reduction=AttrString(reduction, name="reduction"), + ), + _ScatterND.Inputs( + data=unwrap_vars(data), + indices=unwrap_vars(indices), + updates=unwrap_vars(updates), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def selu( @@ -13268,15 +14493,23 @@ def selu( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Selu( - _Selu.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - gamma=AttrFloat32(gamma, name="gamma"), - ), - _Selu.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Selu( + _Selu.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + gamma=AttrFloat32(gamma, name="gamma"), + ), + _Selu.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def sequence_at( @@ -13317,13 +14550,22 @@ def sequence_at( - I: `tensor(int32)`, `tensor(int64)` - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _SequenceAt( - _SequenceAt.Attributes(), - _SequenceAt.Inputs( - input_sequence=input_sequence, - position=position, - ), - ).outputs.tensor + input_prop_values = create_prop_dict( + input_sequence=input_sequence, + position=position, + ) + output_vars = ( + _SequenceAt( + _SequenceAt.Attributes(), + _SequenceAt.Inputs( + input_sequence=unwrap_vars(input_sequence), + position=unwrap_vars(position), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .tensor + ) + return output_vars # type: ignore def sequence_construct( @@ -13353,12 +14595,20 @@ def sequence_construct( - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` """ - return _SequenceConstruct( - _SequenceConstruct.Attributes(), - _SequenceConstruct.Inputs( - inputs=inputs, - ), - ).outputs.output_sequence + input_prop_values = create_prop_dict( + inputs=inputs, + ) + output_vars = ( + _SequenceConstruct( + _SequenceConstruct.Attributes(), + _SequenceConstruct.Inputs( + inputs=unwrap_vars(inputs), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output_sequence + ) + return output_vars # type: ignore def sequence_empty( @@ -13388,12 +14638,18 @@ def sequence_empty( Type constraints: - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` """ - return _SequenceEmpty( - _SequenceEmpty.Attributes( - dtype=AttrDtype.maybe(dtype, name="dtype"), - ), - _SequenceEmpty.Inputs(), - ).outputs.output + input_prop_values = create_prop_dict() + output_vars = ( + _SequenceEmpty( + _SequenceEmpty.Attributes( + dtype=AttrDtype.maybe(dtype, name="dtype"), + ), + _SequenceEmpty.Inputs(), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def sequence_erase( @@ -13434,13 +14690,22 @@ def sequence_erase( - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` - I: `tensor(int32)`, `tensor(int64)` """ - return _SequenceErase( - _SequenceErase.Attributes(), - _SequenceErase.Inputs( - input_sequence=input_sequence, - position=position, - ), - ).outputs.output_sequence + input_prop_values = create_prop_dict( + input_sequence=input_sequence, + position=position, + ) + output_vars = ( + _SequenceErase( + _SequenceErase.Attributes(), + _SequenceErase.Inputs( + input_sequence=unwrap_vars(input_sequence), + position=unwrap_vars(position), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output_sequence + ) + return output_vars # type: ignore def sequence_insert( @@ -13488,14 +14753,24 @@ def sequence_insert( - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - I: `tensor(int32)`, `tensor(int64)` """ - return _SequenceInsert( - _SequenceInsert.Attributes(), - _SequenceInsert.Inputs( - input_sequence=input_sequence, - tensor=tensor, - position=position, - ), - ).outputs.output_sequence + input_prop_values = create_prop_dict( + input_sequence=input_sequence, + tensor=tensor, + position=position, + ) + output_vars = ( + _SequenceInsert( + _SequenceInsert.Attributes(), + _SequenceInsert.Inputs( + input_sequence=unwrap_vars(input_sequence), + tensor=unwrap_vars(tensor), + position=unwrap_vars(position), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output_sequence + ) + return output_vars # type: ignore def sequence_length( @@ -13525,12 +14800,20 @@ def sequence_length( - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` - I: `tensor(int64)` """ - return _SequenceLength( - _SequenceLength.Attributes(), - _SequenceLength.Inputs( - input_sequence=input_sequence, - ), - ).outputs.length + input_prop_values = create_prop_dict( + input_sequence=input_sequence, + ) + output_vars = ( + _SequenceLength( + _SequenceLength.Attributes(), + _SequenceLength.Inputs( + input_sequence=unwrap_vars(input_sequence), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .length + ) + return output_vars # type: ignore def sequence_map( @@ -13593,16 +14876,25 @@ def sequence_map( ], body, ) - return _SequenceMap( - _SequenceMap.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - ), - _SequenceMap.Inputs( - input_sequence=input_sequence, - additional_inputs=additional_inputs, - ), - out_variadic=len(_body_subgraph.requested_results), - ).outputs.out_sequence + input_prop_values = create_prop_dict( + input_sequence=input_sequence, + additional_inputs=additional_inputs, + ) + output_vars = ( + _SequenceMap( + _SequenceMap.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + ), + _SequenceMap.Inputs( + input_sequence=unwrap_vars(input_sequence), + additional_inputs=unwrap_vars(additional_inputs), + ), + out_variadic=len(_body_subgraph.requested_results), + ) + .get_output_vars(input_prop_values=input_prop_values) + .out_sequence + ) + return output_vars # type: ignore def shape( @@ -13681,15 +14973,23 @@ def shape( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(int64)` """ - return _Shape( - _Shape.Attributes( - end=AttrInt64.maybe(end, name="end"), - start=AttrInt64(start, name="start"), - ), - _Shape.Inputs( - data=data, - ), - ).outputs.shape + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _Shape( + _Shape.Attributes( + end=AttrInt64.maybe(end, name="end"), + start=AttrInt64(start, name="start"), + ), + _Shape.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .shape + ) + return output_vars # type: ignore def shrink( @@ -13729,15 +15029,23 @@ def shrink( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Shrink( - _Shrink.Attributes( - bias=AttrFloat32(bias, name="bias"), - lambd=AttrFloat32(lambd, name="lambd"), - ), - _Shrink.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Shrink( + _Shrink.Attributes( + bias=AttrFloat32(bias, name="bias"), + lambd=AttrFloat32(lambd, name="lambd"), + ), + _Shrink.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def sigmoid( @@ -13767,12 +15075,20 @@ def sigmoid( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Sigmoid( - _Sigmoid.Attributes(), - _Sigmoid.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Sigmoid( + _Sigmoid.Attributes(), + _Sigmoid.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def sign( @@ -13802,12 +15118,20 @@ def sign( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Sign( - _Sign.Attributes(), - _Sign.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Sign( + _Sign.Attributes(), + _Sign.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def sin( @@ -13835,12 +15159,20 @@ def sin( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Sin( - _Sin.Attributes(), - _Sin.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Sin( + _Sin.Attributes(), + _Sin.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def sinh( @@ -13868,12 +15200,20 @@ def sinh( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Sinh( - _Sinh.Attributes(), - _Sinh.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Sinh( + _Sinh.Attributes(), + _Sinh.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def size( @@ -13903,12 +15243,20 @@ def size( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(int64)` """ - return _Size( - _Size.Attributes(), - _Size.Inputs( - data=data, - ), - ).outputs.size + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _Size( + _Size.Attributes(), + _Size.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .size + ) + return output_vars # type: ignore def slice( @@ -14024,16 +15372,28 @@ def slice( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _Slice( - _Slice.Attributes(), - _Slice.Inputs( - data=data, - starts=starts, - ends=ends, - axes=axes, - steps=steps, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + starts=starts, + ends=ends, + axes=axes, + steps=steps, + ) + output_vars = ( + _Slice( + _Slice.Attributes(), + _Slice.Inputs( + data=unwrap_vars(data), + starts=unwrap_vars(starts), + ends=unwrap_vars(ends), + axes=unwrap_vars(axes), + steps=unwrap_vars(steps), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def softmax( @@ -14076,14 +15436,22 @@ def softmax( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Softmax( - _Softmax.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Softmax.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Softmax( + _Softmax.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _Softmax.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def softmax_cross_entropy_loss( @@ -14195,17 +15563,27 @@ def softmax_cross_entropy_loss( - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _SoftmaxCrossEntropyLoss( - _SoftmaxCrossEntropyLoss.Attributes( - ignore_index=AttrInt64.maybe(ignore_index, name="ignore_index"), - reduction=AttrString(reduction, name="reduction"), - ), - _SoftmaxCrossEntropyLoss.Inputs( - scores=scores, - labels=labels, - weights=weights, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + scores=scores, + labels=labels, + weights=weights, + ) + output_vars = ( + _SoftmaxCrossEntropyLoss( + _SoftmaxCrossEntropyLoss.Attributes( + ignore_index=AttrInt64.maybe(ignore_index, name="ignore_index"), + reduction=AttrString(reduction, name="reduction"), + ), + _SoftmaxCrossEntropyLoss.Inputs( + scores=unwrap_vars(scores), + labels=unwrap_vars(labels), + weights=unwrap_vars(weights), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) + return output_vars # type: ignore def softplus( @@ -14235,12 +15613,20 @@ def softplus( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Softplus( - _Softplus.Attributes(), - _Softplus.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Softplus( + _Softplus.Attributes(), + _Softplus.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def softsign( @@ -14270,12 +15656,20 @@ def softsign( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Softsign( - _Softsign.Attributes(), - _Softsign.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Softsign( + _Softsign.Attributes(), + _Softsign.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def space_to_depth( @@ -14312,14 +15706,22 @@ def space_to_depth( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _SpaceToDepth( - _SpaceToDepth.Attributes( - blocksize=AttrInt64(blocksize, name="blocksize"), - ), - _SpaceToDepth.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _SpaceToDepth( + _SpaceToDepth.Attributes( + blocksize=AttrInt64(blocksize, name="blocksize"), + ), + _SpaceToDepth.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def split( @@ -14364,16 +15766,25 @@ def split( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Split( - _Split.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Split.Inputs( - input=input, - split=split, - ), - out_variadic=outputs_count, - ).outputs.outputs + input_prop_values = create_prop_dict( + input=input, + split=split, + ) + output_vars = ( + _Split( + _Split.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _Split.Inputs( + input=unwrap_vars(input), + split=unwrap_vars(split), + ), + out_variadic=outputs_count, + ) + .get_output_vars(input_prop_values=input_prop_values) + .outputs + ) + return output_vars # type: ignore def split_to_sequence( @@ -14431,16 +15842,25 @@ def split_to_sequence( - I: `tensor(int32)`, `tensor(int64)` - S: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))` """ - return _SplitToSequence( - _SplitToSequence.Attributes( - axis=AttrInt64(axis, name="axis"), - keepdims=AttrInt64(keepdims, name="keepdims"), - ), - _SplitToSequence.Inputs( - input=input, - split=split, - ), - ).outputs.output_sequence + input_prop_values = create_prop_dict( + input=input, + split=split, + ) + output_vars = ( + _SplitToSequence( + _SplitToSequence.Attributes( + axis=AttrInt64(axis, name="axis"), + keepdims=AttrInt64(keepdims, name="keepdims"), + ), + _SplitToSequence.Inputs( + input=unwrap_vars(input), + split=unwrap_vars(split), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output_sequence + ) + return output_vars # type: ignore def sqrt( @@ -14470,12 +15890,20 @@ def sqrt( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Sqrt( - _Sqrt.Attributes(), - _Sqrt.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Sqrt( + _Sqrt.Attributes(), + _Sqrt.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def squeeze( @@ -14513,13 +15941,22 @@ def squeeze( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Squeeze( - _Squeeze.Attributes(), - _Squeeze.Inputs( - data=data, - axes=axes, - ), - ).outputs.squeezed + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + output_vars = ( + _Squeeze( + _Squeeze.Attributes(), + _Squeeze.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .squeezed + ) + return output_vars # type: ignore def string_normalizer( @@ -14574,19 +16011,29 @@ def string_normalizer( Signature: ``ai.onnx@10::StringNormalizer``. """ - return _StringNormalizer( - _StringNormalizer.Attributes( - case_change_action=AttrString( - case_change_action, name="case_change_action" + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _StringNormalizer( + _StringNormalizer.Attributes( + case_change_action=AttrString( + case_change_action, name="case_change_action" + ), + is_case_sensitive=AttrInt64( + is_case_sensitive, name="is_case_sensitive" + ), + locale=AttrString.maybe(locale, name="locale"), + stopwords=AttrStrings.maybe(stopwords, name="stopwords"), + ), + _StringNormalizer.Inputs( + X=unwrap_vars(X), ), - is_case_sensitive=AttrInt64(is_case_sensitive, name="is_case_sensitive"), - locale=AttrString.maybe(locale, name="locale"), - stopwords=AttrStrings.maybe(stopwords, name="stopwords"), - ), - _StringNormalizer.Inputs( - X=X, - ), - ).outputs.Y + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def sub( @@ -14626,13 +16073,22 @@ def sub( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Sub( - _Sub.Attributes(), - _Sub.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + output_vars = ( + _Sub( + _Sub.Attributes(), + _Sub.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) + return output_vars # type: ignore def sum( @@ -14664,12 +16120,20 @@ def sum( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Sum( - _Sum.Attributes(), - _Sum.Inputs( - data_0=data_0, - ), - ).outputs.sum + input_prop_values = create_prop_dict( + data_0=data_0, + ) + output_vars = ( + _Sum( + _Sum.Attributes(), + _Sum.Inputs( + data_0=unwrap_vars(data_0), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .sum + ) + return output_vars # type: ignore def tan( @@ -14697,12 +16161,20 @@ def tan( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Tan( - _Tan.Attributes(), - _Tan.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Tan( + _Tan.Attributes(), + _Tan.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def tanh( @@ -14731,12 +16203,20 @@ def tanh( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Tanh( - _Tanh.Attributes(), - _Tanh.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Tanh( + _Tanh.Attributes(), + _Tanh.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def tf_idf_vectorizer( @@ -14868,22 +16348,30 @@ def tf_idf_vectorizer( - T: `tensor(int32)`, `tensor(int64)`, `tensor(string)` - T1: `tensor(float)` """ - return _TfIdfVectorizer( - _TfIdfVectorizer.Attributes( - max_gram_length=AttrInt64(max_gram_length, name="max_gram_length"), - max_skip_count=AttrInt64(max_skip_count, name="max_skip_count"), - min_gram_length=AttrInt64(min_gram_length, name="min_gram_length"), - mode=AttrString(mode, name="mode"), - ngram_counts=AttrInt64s(ngram_counts, name="ngram_counts"), - ngram_indexes=AttrInt64s(ngram_indexes, name="ngram_indexes"), - pool_int64s=AttrInt64s.maybe(pool_int64s, name="pool_int64s"), - pool_strings=AttrStrings.maybe(pool_strings, name="pool_strings"), - weights=AttrFloat32s.maybe(weights, name="weights"), - ), - _TfIdfVectorizer.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _TfIdfVectorizer( + _TfIdfVectorizer.Attributes( + max_gram_length=AttrInt64(max_gram_length, name="max_gram_length"), + max_skip_count=AttrInt64(max_skip_count, name="max_skip_count"), + min_gram_length=AttrInt64(min_gram_length, name="min_gram_length"), + mode=AttrString(mode, name="mode"), + ngram_counts=AttrInt64s(ngram_counts, name="ngram_counts"), + ngram_indexes=AttrInt64s(ngram_indexes, name="ngram_indexes"), + pool_int64s=AttrInt64s.maybe(pool_int64s, name="pool_int64s"), + pool_strings=AttrStrings.maybe(pool_strings, name="pool_strings"), + weights=AttrFloat32s.maybe(weights, name="weights"), + ), + _TfIdfVectorizer.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def thresholded_relu( @@ -14918,14 +16406,22 @@ def thresholded_relu( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _ThresholdedRelu( - _ThresholdedRelu.Attributes( - alpha=AttrFloat32(alpha, name="alpha"), - ), - _ThresholdedRelu.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _ThresholdedRelu( + _ThresholdedRelu.Attributes( + alpha=AttrFloat32(alpha, name="alpha"), + ), + _ThresholdedRelu.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def tile( @@ -14962,13 +16458,22 @@ def tile( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(int64)` """ - return _Tile( - _Tile.Attributes(), - _Tile.Inputs( - input=input, - repeats=repeats, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + repeats=repeats, + ) + output_vars = ( + _Tile( + _Tile.Attributes(), + _Tile.Inputs( + input=unwrap_vars(input), + repeats=unwrap_vars(repeats), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def top_k( @@ -15046,17 +16551,26 @@ def top_k( - T: `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - I: `tensor(int64)` """ - return _TopK( - _TopK.Attributes( - axis=AttrInt64(axis, name="axis"), - largest=AttrInt64(largest, name="largest"), - sorted=AttrInt64(sorted, name="sorted"), - ), - _TopK.Inputs( - X=X, - K=K, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + X=X, + K=K, + ) + output_vars = ( + _TopK( + _TopK.Attributes( + axis=AttrInt64(axis, name="axis"), + largest=AttrInt64(largest, name="largest"), + sorted=AttrInt64(sorted, name="sorted"), + ), + _TopK.Inputs( + X=unwrap_vars(X), + K=unwrap_vars(K), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) + return output_vars # type: ignore def transpose( @@ -15092,14 +16606,22 @@ def transpose( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Transpose( - _Transpose.Attributes( - perm=AttrInt64s.maybe(perm, name="perm"), - ), - _Transpose.Inputs( - data=data, - ), - ).outputs.transposed + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _Transpose( + _Transpose.Attributes( + perm=AttrInt64s.maybe(perm, name="perm"), + ), + _Transpose.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .transposed + ) + return output_vars # type: ignore def trilu( @@ -15155,15 +16677,24 @@ def trilu( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Trilu( - _Trilu.Attributes( - upper=AttrInt64(upper, name="upper"), - ), - _Trilu.Inputs( - input=input, - k=k, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + k=k, + ) + output_vars = ( + _Trilu( + _Trilu.Attributes( + upper=AttrInt64(upper, name="upper"), + ), + _Trilu.Inputs( + input=unwrap_vars(input), + k=unwrap_vars(k), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def unique( @@ -15335,15 +16866,23 @@ def unique( Type constraints: - T: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Unique( - _Unique.Attributes( - axis=AttrInt64.maybe(axis, name="axis"), - sorted=AttrInt64(sorted, name="sorted"), - ), - _Unique.Inputs( - X=X, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Unique( + _Unique.Attributes( + axis=AttrInt64.maybe(axis, name="axis"), + sorted=AttrInt64(sorted, name="sorted"), + ), + _Unique.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) + return output_vars # type: ignore def unsqueeze( @@ -15391,13 +16930,22 @@ def unsqueeze( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Unsqueeze( - _Unsqueeze.Attributes(), - _Unsqueeze.Inputs( - data=data, - axes=axes, - ), - ).outputs.expanded + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + output_vars = ( + _Unsqueeze( + _Unsqueeze.Attributes(), + _Unsqueeze.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .expanded + ) + return output_vars # type: ignore def where( @@ -15441,14 +16989,24 @@ def where( - B: `tensor(bool)` - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Where( - _Where.Attributes(), - _Where.Inputs( - condition=condition, - X=X, - Y=Y, - ), - ).outputs.output + input_prop_values = create_prop_dict( + condition=condition, + X=X, + Y=Y, + ) + output_vars = ( + _Where( + _Where.Attributes(), + _Where.Inputs( + condition=unwrap_vars(condition), + X=unwrap_vars(X), + Y=unwrap_vars(Y), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def xor( @@ -15487,13 +17045,22 @@ def xor( - T: `tensor(bool)` - T1: `tensor(bool)` """ - return _Xor( - _Xor.Attributes(), - _Xor.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + output_vars = ( + _Xor( + _Xor.Attributes(), + _Xor.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) + return output_vars # type: ignore def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: diff --git a/src/spox/opset/ai/onnx/v18.py b/src/spox/opset/ai/onnx/v18.py index 028c0775..d43ce2d5 100644 --- a/src/spox/opset/ai/onnx/v18.py +++ b/src/spox/opset/ai/onnx/v18.py @@ -20,7 +20,7 @@ from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._node import OpType from spox._standard import StandardNode -from spox._var import Var +from spox._var import Var, _VarInfo, create_prop_dict, unwrap_vars from spox.opset.ai.onnx.v17 import ( _DFT, _GRU, @@ -350,12 +350,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("BitwiseAnd", "", 18) @@ -371,11 +371,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("BitwiseNot", "", 18) @@ -391,12 +391,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("BitwiseOr", "", 18) @@ -412,12 +412,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("BitwiseXor", "", 18) @@ -433,12 +433,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input_data: Var - shape: Var + input_data: _VarInfo + shape: _VarInfo @dataclass class Outputs(BaseOutputs): - output_data: Var + output_data: _VarInfo op_type = OpType("CenterCropPad", "", 18) @@ -456,13 +456,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - image_shape: Var - block_shape: Var + input: _VarInfo + image_shape: _VarInfo + block_shape: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Col2Im", "", 18) @@ -479,13 +479,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - scale: Var - bias: Var + X: _VarInfo + scale: _VarInfo + bias: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("GroupNormalization", "", 18) @@ -507,11 +507,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("LpPool", "", 18) @@ -527,11 +527,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Mish", "", 18) @@ -547,11 +547,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("OptionalGetElement", "", 18) @@ -567,11 +567,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Optional[Var] + input: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("OptionalHasElement", "", 18) @@ -587,14 +587,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - pads: Var - constant_value: Optional[Var] - axes: Optional[Var] + data: _VarInfo + pads: _VarInfo + constant_value: Optional[_VarInfo] + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Pad", "", 18) @@ -611,12 +611,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceL1", "", 18) @@ -633,12 +633,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceL2", "", 18) @@ -655,12 +655,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceLogSum", "", 18) @@ -677,12 +677,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceLogSumExp", "", 18) @@ -699,12 +699,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceMax", "", 18) @@ -721,12 +721,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceMean", "", 18) @@ -743,12 +743,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceMin", "", 18) @@ -765,12 +765,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceProd", "", 18) @@ -787,12 +787,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceSumSquare", "", 18) @@ -816,14 +816,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - roi: Optional[Var] - scales: Optional[Var] - sizes: Optional[Var] + X: _VarInfo + roi: Optional[_VarInfo] + scales: Optional[_VarInfo] + sizes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Resize", "", 18) @@ -840,13 +840,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - indices: Var - updates: Var + data: _VarInfo + indices: _VarInfo + updates: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("ScatterElements", "", 18) @@ -862,13 +862,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - indices: Var - updates: Var + data: _VarInfo + indices: _VarInfo + updates: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("ScatterND", "", 18) @@ -885,12 +885,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - split: Optional[Var] + input: _VarInfo + split: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - outputs: Sequence[Var] + outputs: Sequence[_VarInfo] op_type = OpType("Split", "", 18) @@ -934,13 +934,22 @@ def bitwise_and( Type constraints: - T: `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _BitwiseAnd( - _BitwiseAnd.Attributes(), - _BitwiseAnd.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + output_vars = ( + _BitwiseAnd( + _BitwiseAnd.Attributes(), + _BitwiseAnd.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) + return output_vars # type: ignore def bitwise_not( @@ -968,12 +977,20 @@ def bitwise_not( Type constraints: - T: `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _BitwiseNot( - _BitwiseNot.Attributes(), - _BitwiseNot.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _BitwiseNot( + _BitwiseNot.Attributes(), + _BitwiseNot.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def bitwise_or( @@ -1011,13 +1028,22 @@ def bitwise_or( Type constraints: - T: `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _BitwiseOr( - _BitwiseOr.Attributes(), - _BitwiseOr.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + output_vars = ( + _BitwiseOr( + _BitwiseOr.Attributes(), + _BitwiseOr.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) + return output_vars # type: ignore def bitwise_xor( @@ -1055,13 +1081,22 @@ def bitwise_xor( Type constraints: - T: `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _BitwiseXor( - _BitwiseXor.Attributes(), - _BitwiseXor.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + output_vars = ( + _BitwiseXor( + _BitwiseXor.Attributes(), + _BitwiseXor.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) + return output_vars # type: ignore def center_crop_pad( @@ -1111,15 +1146,24 @@ def center_crop_pad( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _CenterCropPad( - _CenterCropPad.Attributes( - axes=AttrInt64s.maybe(axes, name="axes"), - ), - _CenterCropPad.Inputs( - input_data=input_data, - shape=shape, - ), - ).outputs.output_data + input_prop_values = create_prop_dict( + input_data=input_data, + shape=shape, + ) + output_vars = ( + _CenterCropPad( + _CenterCropPad.Attributes( + axes=AttrInt64s.maybe(axes, name="axes"), + ), + _CenterCropPad.Inputs( + input_data=unwrap_vars(input_data), + shape=unwrap_vars(shape), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output_data + ) + return output_vars # type: ignore def col2_im( @@ -1202,18 +1246,28 @@ def col2_im( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Col2Im( - _Col2Im.Attributes( - dilations=AttrInt64s.maybe(dilations, name="dilations"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _Col2Im.Inputs( - input=input, - image_shape=image_shape, - block_shape=block_shape, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + image_shape=image_shape, + block_shape=block_shape, + ) + output_vars = ( + _Col2Im( + _Col2Im.Attributes( + dilations=AttrInt64s.maybe(dilations, name="dilations"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _Col2Im.Inputs( + input=unwrap_vars(input), + image_shape=unwrap_vars(image_shape), + block_shape=unwrap_vars(block_shape), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def group_normalization( @@ -1281,17 +1335,27 @@ def group_normalization( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _GroupNormalization( - _GroupNormalization.Attributes( - epsilon=AttrFloat32(epsilon, name="epsilon"), - num_groups=AttrInt64(num_groups, name="num_groups"), - ), - _GroupNormalization.Inputs( - X=X, - scale=scale, - bias=bias, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + scale=scale, + bias=bias, + ) + output_vars = ( + _GroupNormalization( + _GroupNormalization.Attributes( + epsilon=AttrFloat32(epsilon, name="epsilon"), + num_groups=AttrInt64(num_groups, name="num_groups"), + ), + _GroupNormalization.Inputs( + X=unwrap_vars(X), + scale=unwrap_vars(scale), + bias=unwrap_vars(bias), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def lp_pool( @@ -1402,20 +1466,28 @@ def lp_pool( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _LpPool( - _LpPool.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), - p=AttrInt64(p, name="p"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _LpPool.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _LpPool( + _LpPool.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), + p=AttrInt64(p, name="p"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _LpPool.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def mish( @@ -1450,12 +1522,20 @@ def mish( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Mish( - _Mish.Attributes(), - _Mish.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Mish( + _Mish.Attributes(), + _Mish.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def optional_get_element( @@ -1487,12 +1567,20 @@ def optional_get_element( - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - V: `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _OptionalGetElement( - _OptionalGetElement.Attributes(), - _OptionalGetElement.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _OptionalGetElement( + _OptionalGetElement.Attributes(), + _OptionalGetElement.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def optional_has_element( @@ -1524,12 +1612,20 @@ def optional_has_element( - O: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - B: `tensor(bool)` """ - return _OptionalHasElement( - _OptionalHasElement.Attributes(), - _OptionalHasElement.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _OptionalHasElement( + _OptionalHasElement.Attributes(), + _OptionalHasElement.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def pad( @@ -1666,17 +1762,28 @@ def pad( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _Pad( - _Pad.Attributes( - mode=AttrString(mode, name="mode"), - ), - _Pad.Inputs( - data=data, - pads=pads, - constant_value=constant_value, - axes=axes, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + pads=pads, + constant_value=constant_value, + axes=axes, + ) + output_vars = ( + _Pad( + _Pad.Attributes( + mode=AttrString(mode, name="mode"), + ), + _Pad.Inputs( + data=unwrap_vars(data), + pads=unwrap_vars(pads), + constant_value=unwrap_vars(constant_value), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def reduce_l1( @@ -1732,18 +1839,27 @@ def reduce_l1( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceL1( - _ReduceL1.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + output_vars = ( + _ReduceL1( + _ReduceL1.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), ), - ), - _ReduceL1.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + _ReduceL1.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def reduce_l2( @@ -1799,18 +1915,27 @@ def reduce_l2( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceL2( - _ReduceL2.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + output_vars = ( + _ReduceL2( + _ReduceL2.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), + ), + _ReduceL2.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), ), - ), - _ReduceL2.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def reduce_log_sum( @@ -1867,18 +1992,27 @@ def reduce_log_sum( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceLogSum( - _ReduceLogSum.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + output_vars = ( + _ReduceLogSum( + _ReduceLogSum.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), ), - ), - _ReduceLogSum.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + _ReduceLogSum.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def reduce_log_sum_exp( @@ -1935,18 +2069,27 @@ def reduce_log_sum_exp( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceLogSumExp( - _ReduceLogSumExp.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + output_vars = ( + _ReduceLogSumExp( + _ReduceLogSumExp.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), + ), + _ReduceLogSumExp.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), ), - ), - _ReduceLogSumExp.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def reduce_max( @@ -2004,18 +2147,27 @@ def reduce_max( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ReduceMax( - _ReduceMax.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + output_vars = ( + _ReduceMax( + _ReduceMax.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), ), - ), - _ReduceMax.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + _ReduceMax.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def reduce_mean( @@ -2071,18 +2223,27 @@ def reduce_mean( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceMean( - _ReduceMean.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + output_vars = ( + _ReduceMean( + _ReduceMean.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), + ), + _ReduceMean.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), ), - ), - _ReduceMean.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def reduce_min( @@ -2139,18 +2300,27 @@ def reduce_min( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ReduceMin( - _ReduceMin.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + output_vars = ( + _ReduceMin( + _ReduceMin.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), ), - ), - _ReduceMin.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + _ReduceMin.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def reduce_prod( @@ -2206,18 +2376,27 @@ def reduce_prod( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceProd( - _ReduceProd.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + output_vars = ( + _ReduceProd( + _ReduceProd.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), + ), + _ReduceProd.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), ), - ), - _ReduceProd.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def reduce_sum_square( @@ -2273,18 +2452,27 @@ def reduce_sum_square( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(uint32)`, `tensor(uint64)` """ - return _ReduceSumSquare( - _ReduceSumSquare.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + output_vars = ( + _ReduceSumSquare( + _ReduceSumSquare.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), ), - ), - _ReduceSumSquare.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + _ReduceSumSquare.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def resize( @@ -2457,31 +2645,43 @@ def resize( - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Resize( - _Resize.Attributes( - antialias=AttrInt64(antialias, name="antialias"), - axes=AttrInt64s.maybe(axes, name="axes"), - coordinate_transformation_mode=AttrString( - coordinate_transformation_mode, name="coordinate_transformation_mode" - ), - cubic_coeff_a=AttrFloat32(cubic_coeff_a, name="cubic_coeff_a"), - exclude_outside=AttrInt64(exclude_outside, name="exclude_outside"), - extrapolation_value=AttrFloat32( - extrapolation_value, name="extrapolation_value" + input_prop_values = create_prop_dict( + X=X, + roi=roi, + scales=scales, + sizes=sizes, + ) + output_vars = ( + _Resize( + _Resize.Attributes( + antialias=AttrInt64(antialias, name="antialias"), + axes=AttrInt64s.maybe(axes, name="axes"), + coordinate_transformation_mode=AttrString( + coordinate_transformation_mode, + name="coordinate_transformation_mode", + ), + cubic_coeff_a=AttrFloat32(cubic_coeff_a, name="cubic_coeff_a"), + exclude_outside=AttrInt64(exclude_outside, name="exclude_outside"), + extrapolation_value=AttrFloat32( + extrapolation_value, name="extrapolation_value" + ), + keep_aspect_ratio_policy=AttrString( + keep_aspect_ratio_policy, name="keep_aspect_ratio_policy" + ), + mode=AttrString(mode, name="mode"), + nearest_mode=AttrString(nearest_mode, name="nearest_mode"), ), - keep_aspect_ratio_policy=AttrString( - keep_aspect_ratio_policy, name="keep_aspect_ratio_policy" + _Resize.Inputs( + X=unwrap_vars(X), + roi=unwrap_vars(roi), + scales=unwrap_vars(scales), + sizes=unwrap_vars(sizes), ), - mode=AttrString(mode, name="mode"), - nearest_mode=AttrString(nearest_mode, name="nearest_mode"), - ), - _Resize.Inputs( - X=X, - roi=roi, - scales=scales, - sizes=sizes, - ), - ).outputs.Y + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def scatter_elements( @@ -2607,17 +2807,27 @@ def scatter_elements( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _ScatterElements( - _ScatterElements.Attributes( - axis=AttrInt64(axis, name="axis"), - reduction=AttrString(reduction, name="reduction"), - ), - _ScatterElements.Inputs( - data=data, - indices=indices, - updates=updates, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + indices=indices, + updates=updates, + ) + output_vars = ( + _ScatterElements( + _ScatterElements.Attributes( + axis=AttrInt64(axis, name="axis"), + reduction=AttrString(reduction, name="reduction"), + ), + _ScatterElements.Inputs( + data=unwrap_vars(data), + indices=unwrap_vars(indices), + updates=unwrap_vars(updates), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def scatter_nd( @@ -2745,16 +2955,26 @@ def scatter_nd( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ScatterND( - _ScatterND.Attributes( - reduction=AttrString(reduction, name="reduction"), - ), - _ScatterND.Inputs( - data=data, - indices=indices, - updates=updates, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + indices=indices, + updates=updates, + ) + output_vars = ( + _ScatterND( + _ScatterND.Attributes( + reduction=AttrString(reduction, name="reduction"), + ), + _ScatterND.Inputs( + data=unwrap_vars(data), + indices=unwrap_vars(indices), + updates=unwrap_vars(updates), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def split( @@ -2804,17 +3024,26 @@ def split( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Split( - _Split.Attributes( - axis=AttrInt64(axis, name="axis"), - num_outputs=AttrInt64.maybe(num_outputs, name="num_outputs"), - ), - _Split.Inputs( - input=input, - split=split, - ), - out_variadic=num_outputs, - ).outputs.outputs + input_prop_values = create_prop_dict( + input=input, + split=split, + ) + output_vars = ( + _Split( + _Split.Attributes( + axis=AttrInt64(axis, name="axis"), + num_outputs=AttrInt64.maybe(num_outputs, name="num_outputs"), + ), + _Split.Inputs( + input=unwrap_vars(input), + split=unwrap_vars(split), + ), + out_variadic=num_outputs, + ) + .get_output_vars(input_prop_values=input_prop_values) + .outputs + ) + return output_vars # type: ignore def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: diff --git a/src/spox/opset/ai/onnx/v19.py b/src/spox/opset/ai/onnx/v19.py index 6c14823d..bbc6d9b9 100644 --- a/src/spox/opset/ai/onnx/v19.py +++ b/src/spox/opset/ai/onnx/v19.py @@ -29,8 +29,8 @@ from spox._node import OpType from spox._standard import StandardNode from spox._type_system import Tensor, Type -from spox._value_prop import PropValueType -from spox._var import Var +from spox._value_prop import PropDict, PropValueType +from spox._var import Var, _VarInfo, create_prop_dict, unwrap_vars from spox.opset.ai.onnx.v18 import ( _DFT, _GRU, @@ -384,11 +384,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("AveragePool", "", 19) @@ -405,11 +405,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Cast", "", 19) @@ -425,12 +425,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - target_type: Var + input: _VarInfo + target_type: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("CastLike", "", 19) @@ -454,9 +454,9 @@ class Attributes(BaseAttributes): @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo - def propagate_values(self) -> dict[str, PropValueType]: + def propagate_values(self, input_prop_values: PropDict) -> dict[str, PropValueType]: ((key, raw),) = ( (k, v.value) for k, v in self.attrs.get_fields().items() if v is not None ) @@ -501,15 +501,15 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - W: Var - offset: Var - B: Optional[Var] - mask: Optional[Var] + X: _VarInfo + W: _VarInfo + offset: _VarInfo + B: Optional[_VarInfo] + mask: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("DeformConv", "", 19) @@ -525,13 +525,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var - x_scale: Var - x_zero_point: Optional[Var] + x: _VarInfo + x_scale: _VarInfo + x_zero_point: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("DequantizeLinear", "", 19) @@ -547,12 +547,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - A: Var - B: Var + A: _VarInfo + B: _VarInfo @dataclass class Outputs(BaseOutputs): - C: Var + C: _VarInfo op_type = OpType("Equal", "", 19) @@ -568,11 +568,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Identity", "", 19) @@ -589,11 +589,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - cond: Var + cond: _VarInfo @dataclass class Outputs(BaseOutputs): - outputs: Sequence[Var] + outputs: Sequence[_VarInfo] op_type = OpType("If", "", 19) @@ -609,13 +609,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - M: Optional[Var] - cond: Optional[Var] - v_initial: Sequence[Var] + M: Optional[_VarInfo] + cond: Optional[_VarInfo] + v_initial: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - v_final_and_scan_outputs: Sequence[Var] + v_final_and_scan_outputs: Sequence[_VarInfo] op_type = OpType("Loop", "", 19) @@ -631,14 +631,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - pads: Var - constant_value: Optional[Var] - axes: Optional[Var] + data: _VarInfo + pads: _VarInfo + constant_value: Optional[_VarInfo] + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Pad", "", 19) @@ -655,13 +655,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var - y_scale: Var - y_zero_point: Optional[Var] + x: _VarInfo + y_scale: _VarInfo + y_zero_point: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("QuantizeLinear", "", 19) @@ -677,12 +677,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - shape: Var + data: _VarInfo + shape: _VarInfo @dataclass class Outputs(BaseOutputs): - reshaped: Var + reshaped: _VarInfo op_type = OpType("Reshape", "", 19) @@ -706,14 +706,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - roi: Optional[Var] - scales: Optional[Var] - sizes: Optional[Var] + X: _VarInfo + roi: Optional[_VarInfo] + scales: Optional[_VarInfo] + sizes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Resize", "", 19) @@ -734,11 +734,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - initial_state_and_scan_inputs: Sequence[Var] + initial_state_and_scan_inputs: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - final_state_and_scan_outputs: Sequence[Var] + final_state_and_scan_outputs: Sequence[_VarInfo] op_type = OpType("Scan", "", 19) @@ -755,11 +755,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - shape: Var + shape: _VarInfo op_type = OpType("Shape", "", 19) @@ -775,11 +775,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - size: Var + size: _VarInfo op_type = OpType("Size", "", 19) @@ -915,20 +915,30 @@ def average_pool( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _AveragePool( - _AveragePool.Attributes( - auto_pad=AttrString(auto_pad, name="auto_pad"), - ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), - count_include_pad=AttrInt64(count_include_pad, name="count_include_pad"), - dilations=AttrInt64s.maybe(dilations, name="dilations"), - kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _AveragePool.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _AveragePool( + _AveragePool.Attributes( + auto_pad=AttrString(auto_pad, name="auto_pad"), + ceil_mode=AttrInt64(ceil_mode, name="ceil_mode"), + count_include_pad=AttrInt64( + count_include_pad, name="count_include_pad" + ), + dilations=AttrInt64s.maybe(dilations, name="dilations"), + kernel_shape=AttrInt64s(kernel_shape, name="kernel_shape"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _AveragePool.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def cast( @@ -1052,15 +1062,23 @@ def cast( - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Cast( - _Cast.Attributes( - saturate=AttrInt64(saturate, name="saturate"), - to=AttrDtype(to, name="to"), - ), - _Cast.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Cast( + _Cast.Attributes( + saturate=AttrInt64(saturate, name="saturate"), + to=AttrDtype(to, name="to"), + ), + _Cast.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def cast_like( @@ -1106,15 +1124,24 @@ def cast_like( - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _CastLike( - _CastLike.Attributes( - saturate=AttrInt64(saturate, name="saturate"), - ), - _CastLike.Inputs( - input=input, - target_type=target_type, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + target_type=target_type, + ) + output_vars = ( + _CastLike( + _CastLike.Attributes( + saturate=AttrInt64(saturate, name="saturate"), + ), + _CastLike.Inputs( + input=unwrap_vars(input), + target_type=unwrap_vars(target_type), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def constant( @@ -1172,18 +1199,24 @@ def constant( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Constant( - _Constant.Attributes( - value=AttrTensor.maybe(value, name="value"), - value_float=AttrFloat32.maybe(value_float, name="value_float"), - value_floats=AttrFloat32s.maybe(value_floats, name="value_floats"), - value_int=AttrInt64.maybe(value_int, name="value_int"), - value_ints=AttrInt64s.maybe(value_ints, name="value_ints"), - value_string=AttrString.maybe(value_string, name="value_string"), - value_strings=AttrStrings.maybe(value_strings, name="value_strings"), - ), - _Constant.Inputs(), - ).outputs.output + input_prop_values = create_prop_dict() + output_vars = ( + _Constant( + _Constant.Attributes( + value=AttrTensor.maybe(value, name="value"), + value_float=AttrFloat32.maybe(value_float, name="value_float"), + value_floats=AttrFloat32s.maybe(value_floats, name="value_floats"), + value_int=AttrInt64.maybe(value_int, name="value_int"), + value_ints=AttrInt64s.maybe(value_ints, name="value_ints"), + value_string=AttrString.maybe(value_string, name="value_string"), + value_strings=AttrStrings.maybe(value_strings, name="value_strings"), + ), + _Constant.Inputs(), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def deform_conv( @@ -1282,23 +1315,35 @@ def deform_conv( Type constraints: - T: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _DeformConv( - _DeformConv.Attributes( - dilations=AttrInt64s.maybe(dilations, name="dilations"), - group=AttrInt64(group, name="group"), - kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), - offset_group=AttrInt64(offset_group, name="offset_group"), - pads=AttrInt64s.maybe(pads, name="pads"), - strides=AttrInt64s.maybe(strides, name="strides"), - ), - _DeformConv.Inputs( - X=X, - W=W, - offset=offset, - B=B, - mask=mask, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + W=W, + offset=offset, + B=B, + mask=mask, + ) + output_vars = ( + _DeformConv( + _DeformConv.Attributes( + dilations=AttrInt64s.maybe(dilations, name="dilations"), + group=AttrInt64(group, name="group"), + kernel_shape=AttrInt64s.maybe(kernel_shape, name="kernel_shape"), + offset_group=AttrInt64(offset_group, name="offset_group"), + pads=AttrInt64s.maybe(pads, name="pads"), + strides=AttrInt64s.maybe(strides, name="strides"), + ), + _DeformConv.Inputs( + X=unwrap_vars(X), + W=unwrap_vars(W), + offset=unwrap_vars(offset), + B=unwrap_vars(B), + mask=unwrap_vars(mask), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def dequantize_linear( @@ -1358,16 +1403,26 @@ def dequantize_linear( - T1: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int32)`, `tensor(int8)`, `tensor(uint8)` - T2: `tensor(bfloat16)`, `tensor(float)`, `tensor(float16)` """ - return _DequantizeLinear( - _DequantizeLinear.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _DequantizeLinear.Inputs( - x=x, - x_scale=x_scale, - x_zero_point=x_zero_point, - ), - ).outputs.y + input_prop_values = create_prop_dict( + x=x, + x_scale=x_scale, + x_zero_point=x_zero_point, + ) + output_vars = ( + _DequantizeLinear( + _DequantizeLinear.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _DequantizeLinear.Inputs( + x=unwrap_vars(x), + x_scale=unwrap_vars(x_scale), + x_zero_point=unwrap_vars(x_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) + return output_vars # type: ignore def equal( @@ -1406,13 +1461,22 @@ def equal( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(bool)` """ - return _Equal( - _Equal.Attributes(), - _Equal.Inputs( - A=A, - B=B, - ), - ).outputs.C + input_prop_values = create_prop_dict( + A=A, + B=B, + ) + output_vars = ( + _Equal( + _Equal.Attributes(), + _Equal.Inputs( + A=unwrap_vars(A), + B=unwrap_vars(B), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .C + ) + return output_vars # type: ignore def identity( @@ -1440,12 +1504,20 @@ def identity( Type constraints: - V: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Identity( - _Identity.Attributes(), - _Identity.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Identity( + _Identity.Attributes(), + _Identity.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def if_( @@ -1502,16 +1574,24 @@ def if_( """ _else_branch_subgraph: Graph = subgraph((), else_branch) _then_branch_subgraph: Graph = subgraph((), then_branch) - return _If( - _If.Attributes( - else_branch=AttrGraph(_else_branch_subgraph, name="else_branch"), - then_branch=AttrGraph(_then_branch_subgraph, name="then_branch"), - ), - _If.Inputs( - cond=cond, - ), - out_variadic=len(_else_branch_subgraph.requested_results), - ).outputs.outputs + input_prop_values = create_prop_dict( + cond=cond, + ) + output_vars = ( + _If( + _If.Attributes( + else_branch=AttrGraph(_else_branch_subgraph, name="else_branch"), + then_branch=AttrGraph(_then_branch_subgraph, name="then_branch"), + ), + _If.Inputs( + cond=unwrap_vars(cond), + ), + out_variadic=len(_else_branch_subgraph.requested_results), + ) + .get_output_vars(input_prop_values=input_prop_values) + .outputs + ) + return output_vars # type: ignore def loop( @@ -1695,17 +1775,27 @@ def loop( + [var.unwrap_type() for var in v_initial], body, ) - return _Loop( - _Loop.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - ), - _Loop.Inputs( - M=M, - cond=cond, - v_initial=v_initial, - ), - out_variadic=len(_body_subgraph.requested_results) - 1, - ).outputs.v_final_and_scan_outputs + input_prop_values = create_prop_dict( + M=M, + cond=cond, + v_initial=v_initial, + ) + output_vars = ( + _Loop( + _Loop.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + ), + _Loop.Inputs( + M=unwrap_vars(M), + cond=unwrap_vars(cond), + v_initial=unwrap_vars(v_initial), + ), + out_variadic=len(_body_subgraph.requested_results) - 1, + ) + .get_output_vars(input_prop_values=input_prop_values) + .v_final_and_scan_outputs + ) + return output_vars # type: ignore def pad( @@ -1868,17 +1958,28 @@ def pad( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _Pad( - _Pad.Attributes( - mode=AttrString(mode, name="mode"), - ), - _Pad.Inputs( - data=data, - pads=pads, - constant_value=constant_value, - axes=axes, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + pads=pads, + constant_value=constant_value, + axes=axes, + ) + output_vars = ( + _Pad( + _Pad.Attributes( + mode=AttrString(mode, name="mode"), + ), + _Pad.Inputs( + data=unwrap_vars(data), + pads=unwrap_vars(pads), + constant_value=unwrap_vars(constant_value), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def quantize_linear( @@ -1947,17 +2048,27 @@ def quantize_linear( - T1: `tensor(bfloat16)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)` - T2: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int8)`, `tensor(uint8)` """ - return _QuantizeLinear( - _QuantizeLinear.Attributes( - axis=AttrInt64(axis, name="axis"), - saturate=AttrInt64(saturate, name="saturate"), - ), - _QuantizeLinear.Inputs( - x=x, - y_scale=y_scale, - y_zero_point=y_zero_point, - ), - ).outputs.y + input_prop_values = create_prop_dict( + x=x, + y_scale=y_scale, + y_zero_point=y_zero_point, + ) + output_vars = ( + _QuantizeLinear( + _QuantizeLinear.Attributes( + axis=AttrInt64(axis, name="axis"), + saturate=AttrInt64(saturate, name="saturate"), + ), + _QuantizeLinear.Inputs( + x=unwrap_vars(x), + y_scale=unwrap_vars(y_scale), + y_zero_point=unwrap_vars(y_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) + return output_vars # type: ignore def reshape( @@ -2011,15 +2122,24 @@ def reshape( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Reshape( - _Reshape.Attributes( - allowzero=AttrInt64(allowzero, name="allowzero"), - ), - _Reshape.Inputs( - data=data, - shape=shape, - ), - ).outputs.reshaped + input_prop_values = create_prop_dict( + data=data, + shape=shape, + ) + output_vars = ( + _Reshape( + _Reshape.Attributes( + allowzero=AttrInt64(allowzero, name="allowzero"), + ), + _Reshape.Inputs( + data=unwrap_vars(data), + shape=unwrap_vars(shape), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reshaped + ) + return output_vars # type: ignore def resize( @@ -2230,31 +2350,43 @@ def resize( - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Resize( - _Resize.Attributes( - antialias=AttrInt64(antialias, name="antialias"), - axes=AttrInt64s.maybe(axes, name="axes"), - coordinate_transformation_mode=AttrString( - coordinate_transformation_mode, name="coordinate_transformation_mode" - ), - cubic_coeff_a=AttrFloat32(cubic_coeff_a, name="cubic_coeff_a"), - exclude_outside=AttrInt64(exclude_outside, name="exclude_outside"), - extrapolation_value=AttrFloat32( - extrapolation_value, name="extrapolation_value" + input_prop_values = create_prop_dict( + X=X, + roi=roi, + scales=scales, + sizes=sizes, + ) + output_vars = ( + _Resize( + _Resize.Attributes( + antialias=AttrInt64(antialias, name="antialias"), + axes=AttrInt64s.maybe(axes, name="axes"), + coordinate_transformation_mode=AttrString( + coordinate_transformation_mode, + name="coordinate_transformation_mode", + ), + cubic_coeff_a=AttrFloat32(cubic_coeff_a, name="cubic_coeff_a"), + exclude_outside=AttrInt64(exclude_outside, name="exclude_outside"), + extrapolation_value=AttrFloat32( + extrapolation_value, name="extrapolation_value" + ), + keep_aspect_ratio_policy=AttrString( + keep_aspect_ratio_policy, name="keep_aspect_ratio_policy" + ), + mode=AttrString(mode, name="mode"), + nearest_mode=AttrString(nearest_mode, name="nearest_mode"), ), - keep_aspect_ratio_policy=AttrString( - keep_aspect_ratio_policy, name="keep_aspect_ratio_policy" + _Resize.Inputs( + X=unwrap_vars(X), + roi=unwrap_vars(roi), + scales=unwrap_vars(scales), + sizes=unwrap_vars(sizes), ), - mode=AttrString(mode, name="mode"), - nearest_mode=AttrString(nearest_mode, name="nearest_mode"), - ), - _Resize.Inputs( - X=X, - roi=roi, - scales=scales, - sizes=sizes, - ), - ).outputs.Y + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def scan( @@ -2478,26 +2610,38 @@ def scan( ], body, ) - return _Scan( - _Scan.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - num_scan_inputs=AttrInt64(num_scan_inputs, name="num_scan_inputs"), - scan_input_axes=AttrInt64s.maybe(scan_input_axes, name="scan_input_axes"), - scan_input_directions=AttrInt64s.maybe( - scan_input_directions, name="scan_input_directions" - ), - scan_output_axes=AttrInt64s.maybe( - scan_output_axes, name="scan_output_axes" + input_prop_values = create_prop_dict( + initial_state_and_scan_inputs=initial_state_and_scan_inputs, + ) + output_vars = ( + _Scan( + _Scan.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + num_scan_inputs=AttrInt64(num_scan_inputs, name="num_scan_inputs"), + scan_input_axes=AttrInt64s.maybe( + scan_input_axes, name="scan_input_axes" + ), + scan_input_directions=AttrInt64s.maybe( + scan_input_directions, name="scan_input_directions" + ), + scan_output_axes=AttrInt64s.maybe( + scan_output_axes, name="scan_output_axes" + ), + scan_output_directions=AttrInt64s.maybe( + scan_output_directions, name="scan_output_directions" + ), ), - scan_output_directions=AttrInt64s.maybe( - scan_output_directions, name="scan_output_directions" + _Scan.Inputs( + initial_state_and_scan_inputs=unwrap_vars( + initial_state_and_scan_inputs + ), ), - ), - _Scan.Inputs( - initial_state_and_scan_inputs=initial_state_and_scan_inputs, - ), - out_variadic=len(_body_subgraph.requested_results), - ).outputs.final_state_and_scan_outputs + out_variadic=len(_body_subgraph.requested_results), + ) + .get_output_vars(input_prop_values=input_prop_values) + .final_state_and_scan_outputs + ) + return output_vars # type: ignore def shape( @@ -2576,15 +2720,23 @@ def shape( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(int64)` """ - return _Shape( - _Shape.Attributes( - end=AttrInt64.maybe(end, name="end"), - start=AttrInt64(start, name="start"), - ), - _Shape.Inputs( - data=data, - ), - ).outputs.shape + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _Shape( + _Shape.Attributes( + end=AttrInt64.maybe(end, name="end"), + start=AttrInt64(start, name="start"), + ), + _Shape.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .shape + ) + return output_vars # type: ignore def size( @@ -2614,12 +2766,20 @@ def size( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(int64)` """ - return _Size( - _Size.Attributes(), - _Size.Inputs( - data=data, - ), - ).outputs.size + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _Size( + _Size.Attributes(), + _Size.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .size + ) + return output_vars # type: ignore def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: diff --git a/src/spox/opset/ai/onnx/v20.py b/src/spox/opset/ai/onnx/v20.py index fa5a4c42..7ae5756d 100644 --- a/src/spox/opset/ai/onnx/v20.py +++ b/src/spox/opset/ai/onnx/v20.py @@ -18,7 +18,7 @@ from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._node import OpType from spox._standard import StandardNode -from spox._var import Var +from spox._var import Var, _VarInfo, create_prop_dict, unwrap_vars from spox.opset.ai.onnx.v19 import ( _GRU, _LRN, @@ -386,12 +386,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - theta: Var - size: Var + theta: _VarInfo + size: _VarInfo @dataclass class Outputs(BaseOutputs): - grid: Var + grid: _VarInfo op_type = OpType("AffineGrid", "", 20) @@ -407,11 +407,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("ConstantOfShape", "", 20) @@ -428,13 +428,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - dft_length: Optional[Var] - axis: Optional[Var] + input: _VarInfo + dft_length: Optional[_VarInfo] + axis: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("DFT", "", 20) @@ -450,11 +450,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("Gelu", "", 20) @@ -472,12 +472,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - grid: Var + X: _VarInfo + grid: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("GridSample", "", 20) @@ -493,11 +493,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - encoded_stream: Var + encoded_stream: _VarInfo @dataclass class Outputs(BaseOutputs): - image: Var + image: _VarInfo op_type = OpType("ImageDecoder", "", 20) @@ -514,11 +514,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("IsInf", "", 20) @@ -534,11 +534,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("IsNaN", "", 20) @@ -555,12 +555,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceMax", "", 20) @@ -577,12 +577,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - reduced: Var + reduced: _VarInfo op_type = OpType("ReduceMin", "", 20) @@ -598,11 +598,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("RegexFullMatch", "", 20) @@ -618,12 +618,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - Y: Var + X: _VarInfo + Y: _VarInfo @dataclass class Outputs(BaseOutputs): - Z: Var + Z: _VarInfo op_type = OpType("StringConcat", "", 20) @@ -640,12 +640,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var - Z: Var + Y: _VarInfo + Z: _VarInfo op_type = OpType("StringSplit", "", 20) @@ -733,15 +733,24 @@ def affine_grid( - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(int64)` """ - return _AffineGrid( - _AffineGrid.Attributes( - align_corners=AttrInt64(align_corners, name="align_corners"), - ), - _AffineGrid.Inputs( - theta=theta, - size=size, - ), - ).outputs.grid + input_prop_values = create_prop_dict( + theta=theta, + size=size, + ) + output_vars = ( + _AffineGrid( + _AffineGrid.Attributes( + align_corners=AttrInt64(align_corners, name="align_corners"), + ), + _AffineGrid.Inputs( + theta=unwrap_vars(theta), + size=unwrap_vars(size), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .grid + ) + return output_vars # type: ignore def constant_of_shape( @@ -781,14 +790,22 @@ def constant_of_shape( - T1: `tensor(int64)` - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ConstantOfShape( - _ConstantOfShape.Attributes( - value=AttrTensor.maybe(value, name="value"), - ), - _ConstantOfShape.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _ConstantOfShape( + _ConstantOfShape.Attributes( + value=AttrTensor.maybe(value, name="value"), + ), + _ConstantOfShape.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def dft( @@ -881,17 +898,27 @@ def dft( - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` - T2: `tensor(int32)`, `tensor(int64)` """ - return _DFT( - _DFT.Attributes( - inverse=AttrInt64(inverse, name="inverse"), - onesided=AttrInt64(onesided, name="onesided"), - ), - _DFT.Inputs( - input=input, - dft_length=dft_length, - axis=axis, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + dft_length=dft_length, + axis=axis, + ) + output_vars = ( + _DFT( + _DFT.Attributes( + inverse=AttrInt64(inverse, name="inverse"), + onesided=AttrInt64(onesided, name="onesided"), + ), + _DFT.Inputs( + input=unwrap_vars(input), + dft_length=unwrap_vars(dft_length), + axis=unwrap_vars(axis), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def gelu( @@ -932,14 +959,22 @@ def gelu( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _Gelu( - _Gelu.Attributes( - approximate=AttrString(approximate, name="approximate"), - ), - _Gelu.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _Gelu( + _Gelu.Attributes( + approximate=AttrString(approximate, name="approximate"), + ), + _Gelu.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def grid_sample( @@ -1044,17 +1079,26 @@ def grid_sample( - T1: `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _GridSample( - _GridSample.Attributes( - align_corners=AttrInt64(align_corners, name="align_corners"), - mode=AttrString(mode, name="mode"), - padding_mode=AttrString(padding_mode, name="padding_mode"), - ), - _GridSample.Inputs( - X=X, - grid=grid, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + grid=grid, + ) + output_vars = ( + _GridSample( + _GridSample.Attributes( + align_corners=AttrInt64(align_corners, name="align_corners"), + mode=AttrString(mode, name="mode"), + padding_mode=AttrString(padding_mode, name="padding_mode"), + ), + _GridSample.Inputs( + X=unwrap_vars(X), + grid=unwrap_vars(grid), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def image_decoder( @@ -1115,14 +1159,22 @@ def image_decoder( - T1: `tensor(uint8)` - T2: `tensor(uint8)` """ - return _ImageDecoder( - _ImageDecoder.Attributes( - pixel_format=AttrString(pixel_format, name="pixel_format"), - ), - _ImageDecoder.Inputs( - encoded_stream=encoded_stream, - ), - ).outputs.image + input_prop_values = create_prop_dict( + encoded_stream=encoded_stream, + ) + output_vars = ( + _ImageDecoder( + _ImageDecoder.Attributes( + pixel_format=AttrString(pixel_format, name="pixel_format"), + ), + _ImageDecoder.Inputs( + encoded_stream=unwrap_vars(encoded_stream), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .image + ) + return output_vars # type: ignore def isinf( @@ -1164,15 +1216,23 @@ def isinf( - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)` - T2: `tensor(bool)` """ - return _IsInf( - _IsInf.Attributes( - detect_negative=AttrInt64(detect_negative, name="detect_negative"), - detect_positive=AttrInt64(detect_positive, name="detect_positive"), - ), - _IsInf.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _IsInf( + _IsInf.Attributes( + detect_negative=AttrInt64(detect_negative, name="detect_negative"), + detect_positive=AttrInt64(detect_positive, name="detect_positive"), + ), + _IsInf.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def isnan( @@ -1201,12 +1261,20 @@ def isnan( - T1: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)` - T2: `tensor(bool)` """ - return _IsNaN( - _IsNaN.Attributes(), - _IsNaN.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _IsNaN( + _IsNaN.Attributes(), + _IsNaN.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def reduce_max( @@ -1267,18 +1335,27 @@ def reduce_max( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ReduceMax( - _ReduceMax.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + output_vars = ( + _ReduceMax( + _ReduceMax.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), + ), + _ReduceMax.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), ), - ), - _ReduceMax.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def reduce_min( @@ -1338,18 +1415,27 @@ def reduce_min( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint32)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ReduceMin( - _ReduceMin.Attributes( - keepdims=AttrInt64(keepdims, name="keepdims"), - noop_with_empty_axes=AttrInt64( - noop_with_empty_axes, name="noop_with_empty_axes" + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + output_vars = ( + _ReduceMin( + _ReduceMin.Attributes( + keepdims=AttrInt64(keepdims, name="keepdims"), + noop_with_empty_axes=AttrInt64( + noop_with_empty_axes, name="noop_with_empty_axes" + ), + ), + _ReduceMin.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), ), - ), - _ReduceMin.Inputs( - data=data, - axes=axes, - ), - ).outputs.reduced + ) + .get_output_vars(input_prop_values=input_prop_values) + .reduced + ) + return output_vars # type: ignore def regex_full_match( @@ -1388,14 +1474,22 @@ def regex_full_match( - T1: `tensor(string)` - T2: `tensor(bool)` """ - return _RegexFullMatch( - _RegexFullMatch.Attributes( - pattern=AttrString.maybe(pattern, name="pattern"), - ), - _RegexFullMatch.Inputs( - X=X, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _RegexFullMatch( + _RegexFullMatch.Attributes( + pattern=AttrString.maybe(pattern, name="pattern"), + ), + _RegexFullMatch.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def string_concat( @@ -1428,13 +1522,22 @@ def string_concat( Type constraints: - T: `tensor(string)` """ - return _StringConcat( - _StringConcat.Attributes(), - _StringConcat.Inputs( - X=X, - Y=Y, - ), - ).outputs.Z + input_prop_values = create_prop_dict( + X=X, + Y=Y, + ) + output_vars = ( + _StringConcat( + _StringConcat.Attributes(), + _StringConcat.Inputs( + X=unwrap_vars(X), + Y=unwrap_vars(Y), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Z + ) + return output_vars # type: ignore def string_split( @@ -1506,15 +1609,23 @@ def string_split( - T2: `tensor(string)` - T3: `tensor(int64)` """ - return _StringSplit( - _StringSplit.Attributes( - delimiter=AttrString.maybe(delimiter, name="delimiter"), - maxsplit=AttrInt64.maybe(maxsplit, name="maxsplit"), - ), - _StringSplit.Inputs( - X=X, - ), - ).outputs._unpack_to_any() + input_prop_values = create_prop_dict( + X=X, + ) + output_vars = ( + _StringSplit( + _StringSplit.Attributes( + delimiter=AttrString.maybe(delimiter, name="delimiter"), + maxsplit=AttrInt64.maybe(maxsplit, name="maxsplit"), + ), + _StringSplit.Inputs( + X=unwrap_vars(X), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + ._unpack_to_any() + ) + return output_vars # type: ignore def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: diff --git a/src/spox/opset/ai/onnx/v21.py b/src/spox/opset/ai/onnx/v21.py index f4f027cc..cc5cb1dd 100644 --- a/src/spox/opset/ai/onnx/v21.py +++ b/src/spox/opset/ai/onnx/v21.py @@ -29,8 +29,8 @@ from spox._node import OpType from spox._standard import StandardNode from spox._type_system import Tensor, Type -from spox._value_prop import PropValueType -from spox._var import Var +from spox._value_prop import PropDict, PropValueType +from spox._var import Var, _VarInfo, create_prop_dict, unwrap_vars from spox.opset.ai.onnx.v20 import ( _DFT, _GRU, @@ -385,11 +385,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Cast", "", 21) @@ -405,12 +405,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var - target_type: Var + input: _VarInfo + target_type: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("CastLike", "", 21) @@ -434,9 +434,9 @@ class Attributes(BaseAttributes): @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo - def propagate_values(self) -> dict[str, PropValueType]: + def propagate_values(self, input_prop_values: PropDict) -> dict[str, PropValueType]: ((key, raw),) = ( (k, v.value) for k, v in self.attrs.get_fields().items() if v is not None ) @@ -476,11 +476,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("ConstantOfShape", "", 21) @@ -497,13 +497,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var - x_scale: Var - x_zero_point: Optional[Var] + x: _VarInfo + x_scale: _VarInfo + x_zero_point: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("DequantizeLinear", "", 21) @@ -519,11 +519,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Flatten", "", 21) @@ -541,13 +541,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var - scale: Var - bias: Var + X: _VarInfo + scale: _VarInfo + bias: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("GroupNormalization", "", 21) @@ -563,11 +563,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - input: Var + input: _VarInfo @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Identity", "", 21) @@ -584,11 +584,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - cond: Var + cond: _VarInfo @dataclass class Outputs(BaseOutputs): - outputs: Sequence[Var] + outputs: Sequence[_VarInfo] op_type = OpType("If", "", 21) @@ -604,13 +604,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - M: Optional[Var] - cond: Optional[Var] - v_initial: Sequence[Var] + M: Optional[_VarInfo] + cond: Optional[_VarInfo] + v_initial: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - v_final_and_scan_outputs: Sequence[Var] + v_final_and_scan_outputs: Sequence[_VarInfo] op_type = OpType("Loop", "", 21) @@ -626,14 +626,14 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - pads: Var - constant_value: Optional[Var] - axes: Optional[Var] + data: _VarInfo + pads: _VarInfo + constant_value: Optional[_VarInfo] + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - output: Var + output: _VarInfo op_type = OpType("Pad", "", 21) @@ -649,18 +649,18 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - a: Var - a_scale: Var - a_zero_point: Var - b: Var - b_scale: Var - b_zero_point: Var - y_scale: Var - y_zero_point: Var + a: _VarInfo + a_scale: _VarInfo + a_zero_point: _VarInfo + b: _VarInfo + b_scale: _VarInfo + b_zero_point: _VarInfo + y_scale: _VarInfo + y_zero_point: _VarInfo @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("QLinearMatMul", "", 21) @@ -679,13 +679,13 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - x: Var - y_scale: Var - y_zero_point: Optional[Var] + x: _VarInfo + y_scale: _VarInfo + y_zero_point: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - y: Var + y: _VarInfo op_type = OpType("QuantizeLinear", "", 21) @@ -701,12 +701,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - shape: Var + data: _VarInfo + shape: _VarInfo @dataclass class Outputs(BaseOutputs): - reshaped: Var + reshaped: _VarInfo op_type = OpType("Reshape", "", 21) @@ -727,11 +727,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - initial_state_and_scan_inputs: Sequence[Var] + initial_state_and_scan_inputs: Sequence[_VarInfo] @dataclass class Outputs(BaseOutputs): - final_state_and_scan_outputs: Sequence[Var] + final_state_and_scan_outputs: Sequence[_VarInfo] op_type = OpType("Scan", "", 21) @@ -748,11 +748,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - shape: Var + shape: _VarInfo op_type = OpType("Shape", "", 21) @@ -768,11 +768,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - size: Var + size: _VarInfo op_type = OpType("Size", "", 21) @@ -788,12 +788,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Optional[Var] + data: _VarInfo + axes: Optional[_VarInfo] @dataclass class Outputs(BaseOutputs): - squeezed: Var + squeezed: _VarInfo op_type = OpType("Squeeze", "", 21) @@ -809,11 +809,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - transposed: Var + transposed: _VarInfo op_type = OpType("Transpose", "", 21) @@ -829,12 +829,12 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var - axes: Var + data: _VarInfo + axes: _VarInfo @dataclass class Outputs(BaseOutputs): - expanded: Var + expanded: _VarInfo op_type = OpType("Unsqueeze", "", 21) @@ -964,15 +964,23 @@ def cast( - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Cast( - _Cast.Attributes( - saturate=AttrInt64(saturate, name="saturate"), - to=AttrDtype(to, name="to"), - ), - _Cast.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Cast( + _Cast.Attributes( + saturate=AttrInt64(saturate, name="saturate"), + to=AttrDtype(to, name="to"), + ), + _Cast.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def cast_like( @@ -1018,15 +1026,24 @@ def cast_like( - T1: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _CastLike( - _CastLike.Attributes( - saturate=AttrInt64(saturate, name="saturate"), - ), - _CastLike.Inputs( - input=input, - target_type=target_type, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + target_type=target_type, + ) + output_vars = ( + _CastLike( + _CastLike.Attributes( + saturate=AttrInt64(saturate, name="saturate"), + ), + _CastLike.Inputs( + input=unwrap_vars(input), + target_type=unwrap_vars(target_type), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def constant( @@ -1084,18 +1101,24 @@ def constant( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Constant( - _Constant.Attributes( - value=AttrTensor.maybe(value, name="value"), - value_float=AttrFloat32.maybe(value_float, name="value_float"), - value_floats=AttrFloat32s.maybe(value_floats, name="value_floats"), - value_int=AttrInt64.maybe(value_int, name="value_int"), - value_ints=AttrInt64s.maybe(value_ints, name="value_ints"), - value_string=AttrString.maybe(value_string, name="value_string"), - value_strings=AttrStrings.maybe(value_strings, name="value_strings"), - ), - _Constant.Inputs(), - ).outputs.output + input_prop_values = create_prop_dict() + output_vars = ( + _Constant( + _Constant.Attributes( + value=AttrTensor.maybe(value, name="value"), + value_float=AttrFloat32.maybe(value_float, name="value_float"), + value_floats=AttrFloat32s.maybe(value_floats, name="value_floats"), + value_int=AttrInt64.maybe(value_int, name="value_int"), + value_ints=AttrInt64s.maybe(value_ints, name="value_ints"), + value_string=AttrString.maybe(value_string, name="value_string"), + value_strings=AttrStrings.maybe(value_strings, name="value_strings"), + ), + _Constant.Inputs(), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def constant_of_shape( @@ -1135,14 +1158,22 @@ def constant_of_shape( - T1: `tensor(int64)` - T2: `tensor(bfloat16)`, `tensor(bool)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _ConstantOfShape( - _ConstantOfShape.Attributes( - value=AttrTensor.maybe(value, name="value"), - ), - _ConstantOfShape.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _ConstantOfShape( + _ConstantOfShape.Attributes( + value=AttrTensor.maybe(value, name="value"), + ), + _ConstantOfShape.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def dequantize_linear( @@ -1213,17 +1244,27 @@ def dequantize_linear( - T1: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint4)`, `tensor(uint8)` - T2: `tensor(bfloat16)`, `tensor(float)`, `tensor(float16)` """ - return _DequantizeLinear( - _DequantizeLinear.Attributes( - axis=AttrInt64(axis, name="axis"), - block_size=AttrInt64(block_size, name="block_size"), - ), - _DequantizeLinear.Inputs( - x=x, - x_scale=x_scale, - x_zero_point=x_zero_point, - ), - ).outputs.y + input_prop_values = create_prop_dict( + x=x, + x_scale=x_scale, + x_zero_point=x_zero_point, + ) + output_vars = ( + _DequantizeLinear( + _DequantizeLinear.Attributes( + axis=AttrInt64(axis, name="axis"), + block_size=AttrInt64(block_size, name="block_size"), + ), + _DequantizeLinear.Inputs( + x=unwrap_vars(x), + x_scale=unwrap_vars(x_scale), + x_zero_point=unwrap_vars(x_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) + return output_vars # type: ignore def flatten( @@ -1265,14 +1306,22 @@ def flatten( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Flatten( - _Flatten.Attributes( - axis=AttrInt64(axis, name="axis"), - ), - _Flatten.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Flatten( + _Flatten.Attributes( + axis=AttrInt64(axis, name="axis"), + ), + _Flatten.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def group_normalization( @@ -1354,18 +1403,28 @@ def group_normalization( Type constraints: - T: `tensor(bfloat16)`, `tensor(double)`, `tensor(float)`, `tensor(float16)` """ - return _GroupNormalization( - _GroupNormalization.Attributes( - epsilon=AttrFloat32(epsilon, name="epsilon"), - num_groups=AttrInt64(num_groups, name="num_groups"), - stash_type=AttrInt64(stash_type, name="stash_type"), - ), - _GroupNormalization.Inputs( - X=X, - scale=scale, - bias=bias, - ), - ).outputs.Y + input_prop_values = create_prop_dict( + X=X, + scale=scale, + bias=bias, + ) + output_vars = ( + _GroupNormalization( + _GroupNormalization.Attributes( + epsilon=AttrFloat32(epsilon, name="epsilon"), + num_groups=AttrInt64(num_groups, name="num_groups"), + stash_type=AttrInt64(stash_type, name="stash_type"), + ), + _GroupNormalization.Inputs( + X=unwrap_vars(X), + scale=unwrap_vars(scale), + bias=unwrap_vars(bias), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .Y + ) + return output_vars # type: ignore def identity( @@ -1393,12 +1452,20 @@ def identity( Type constraints: - V: `optional(seq(tensor(bool)))`, `optional(seq(tensor(complex128)))`, `optional(seq(tensor(complex64)))`, `optional(seq(tensor(double)))`, `optional(seq(tensor(float)))`, `optional(seq(tensor(float16)))`, `optional(seq(tensor(int16)))`, `optional(seq(tensor(int32)))`, `optional(seq(tensor(int64)))`, `optional(seq(tensor(int8)))`, `optional(seq(tensor(string)))`, `optional(seq(tensor(uint16)))`, `optional(seq(tensor(uint32)))`, `optional(seq(tensor(uint64)))`, `optional(seq(tensor(uint8)))`, `optional(tensor(bool))`, `optional(tensor(complex128))`, `optional(tensor(complex64))`, `optional(tensor(double))`, `optional(tensor(float))`, `optional(tensor(float16))`, `optional(tensor(int16))`, `optional(tensor(int32))`, `optional(tensor(int64))`, `optional(tensor(int8))`, `optional(tensor(string))`, `optional(tensor(uint16))`, `optional(tensor(uint32))`, `optional(tensor(uint64))`, `optional(tensor(uint8))`, `seq(tensor(bool))`, `seq(tensor(complex128))`, `seq(tensor(complex64))`, `seq(tensor(double))`, `seq(tensor(float))`, `seq(tensor(float16))`, `seq(tensor(int16))`, `seq(tensor(int32))`, `seq(tensor(int64))`, `seq(tensor(int8))`, `seq(tensor(string))`, `seq(tensor(uint16))`, `seq(tensor(uint32))`, `seq(tensor(uint64))`, `seq(tensor(uint8))`, `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Identity( - _Identity.Attributes(), - _Identity.Inputs( - input=input, - ), - ).outputs.output + input_prop_values = create_prop_dict( + input=input, + ) + output_vars = ( + _Identity( + _Identity.Attributes(), + _Identity.Inputs( + input=unwrap_vars(input), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def if_( @@ -1455,16 +1522,24 @@ def if_( """ _else_branch_subgraph: Graph = subgraph((), else_branch) _then_branch_subgraph: Graph = subgraph((), then_branch) - return _If( - _If.Attributes( - else_branch=AttrGraph(_else_branch_subgraph, name="else_branch"), - then_branch=AttrGraph(_then_branch_subgraph, name="then_branch"), - ), - _If.Inputs( - cond=cond, - ), - out_variadic=len(_else_branch_subgraph.requested_results), - ).outputs.outputs + input_prop_values = create_prop_dict( + cond=cond, + ) + output_vars = ( + _If( + _If.Attributes( + else_branch=AttrGraph(_else_branch_subgraph, name="else_branch"), + then_branch=AttrGraph(_then_branch_subgraph, name="then_branch"), + ), + _If.Inputs( + cond=unwrap_vars(cond), + ), + out_variadic=len(_else_branch_subgraph.requested_results), + ) + .get_output_vars(input_prop_values=input_prop_values) + .outputs + ) + return output_vars # type: ignore def loop( @@ -1648,17 +1723,27 @@ def loop( + [var.unwrap_type() for var in v_initial], body, ) - return _Loop( - _Loop.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - ), - _Loop.Inputs( - M=M, - cond=cond, - v_initial=v_initial, - ), - out_variadic=len(_body_subgraph.requested_results) - 1, - ).outputs.v_final_and_scan_outputs + input_prop_values = create_prop_dict( + M=M, + cond=cond, + v_initial=v_initial, + ) + output_vars = ( + _Loop( + _Loop.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + ), + _Loop.Inputs( + M=unwrap_vars(M), + cond=unwrap_vars(cond), + v_initial=unwrap_vars(v_initial), + ), + out_variadic=len(_body_subgraph.requested_results) - 1, + ) + .get_output_vars(input_prop_values=input_prop_values) + .v_final_and_scan_outputs + ) + return output_vars # type: ignore def pad( @@ -1821,17 +1906,28 @@ def pad( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` - Tind: `tensor(int32)`, `tensor(int64)` """ - return _Pad( - _Pad.Attributes( - mode=AttrString(mode, name="mode"), - ), - _Pad.Inputs( - data=data, - pads=pads, - constant_value=constant_value, - axes=axes, - ), - ).outputs.output + input_prop_values = create_prop_dict( + data=data, + pads=pads, + constant_value=constant_value, + axes=axes, + ) + output_vars = ( + _Pad( + _Pad.Attributes( + mode=AttrString(mode, name="mode"), + ), + _Pad.Inputs( + data=unwrap_vars(data), + pads=unwrap_vars(pads), + constant_value=unwrap_vars(constant_value), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .output + ) + return output_vars # type: ignore def qlinear_matmul( @@ -1907,19 +2003,34 @@ def qlinear_matmul( - T2: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int8)`, `tensor(uint8)` - T3: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int8)`, `tensor(uint8)` """ - return _QLinearMatMul( - _QLinearMatMul.Attributes(), - _QLinearMatMul.Inputs( - a=a, - a_scale=a_scale, - a_zero_point=a_zero_point, - b=b, - b_scale=b_scale, - b_zero_point=b_zero_point, - y_scale=y_scale, - y_zero_point=y_zero_point, - ), - ).outputs.y + input_prop_values = create_prop_dict( + a=a, + a_scale=a_scale, + a_zero_point=a_zero_point, + b=b, + b_scale=b_scale, + b_zero_point=b_zero_point, + y_scale=y_scale, + y_zero_point=y_zero_point, + ) + output_vars = ( + _QLinearMatMul( + _QLinearMatMul.Attributes(), + _QLinearMatMul.Inputs( + a=unwrap_vars(a), + a_scale=unwrap_vars(a_scale), + a_zero_point=unwrap_vars(a_zero_point), + b=unwrap_vars(b), + b_scale=unwrap_vars(b_scale), + b_zero_point=unwrap_vars(b_zero_point), + y_scale=unwrap_vars(y_scale), + y_zero_point=unwrap_vars(y_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) + return output_vars # type: ignore def quantize_linear( @@ -2029,19 +2140,29 @@ def quantize_linear( - T1: `tensor(bfloat16)`, `tensor(float)`, `tensor(float16)`, `tensor(int32)` - T2: `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int4)`, `tensor(int8)`, `tensor(uint16)`, `tensor(uint4)`, `tensor(uint8)` """ - return _QuantizeLinear( - _QuantizeLinear.Attributes( - axis=AttrInt64(axis, name="axis"), - block_size=AttrInt64(block_size, name="block_size"), - output_dtype=AttrInt64(output_dtype, name="output_dtype"), - saturate=AttrInt64(saturate, name="saturate"), - ), - _QuantizeLinear.Inputs( - x=x, - y_scale=y_scale, - y_zero_point=y_zero_point, - ), - ).outputs.y + input_prop_values = create_prop_dict( + x=x, + y_scale=y_scale, + y_zero_point=y_zero_point, + ) + output_vars = ( + _QuantizeLinear( + _QuantizeLinear.Attributes( + axis=AttrInt64(axis, name="axis"), + block_size=AttrInt64(block_size, name="block_size"), + output_dtype=AttrInt64(output_dtype, name="output_dtype"), + saturate=AttrInt64(saturate, name="saturate"), + ), + _QuantizeLinear.Inputs( + x=unwrap_vars(x), + y_scale=unwrap_vars(y_scale), + y_zero_point=unwrap_vars(y_zero_point), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .y + ) + return output_vars # type: ignore def reshape( @@ -2095,15 +2216,24 @@ def reshape( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Reshape( - _Reshape.Attributes( - allowzero=AttrInt64(allowzero, name="allowzero"), - ), - _Reshape.Inputs( - data=data, - shape=shape, - ), - ).outputs.reshaped + input_prop_values = create_prop_dict( + data=data, + shape=shape, + ) + output_vars = ( + _Reshape( + _Reshape.Attributes( + allowzero=AttrInt64(allowzero, name="allowzero"), + ), + _Reshape.Inputs( + data=unwrap_vars(data), + shape=unwrap_vars(shape), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .reshaped + ) + return output_vars # type: ignore def scan( @@ -2327,26 +2457,38 @@ def scan( ], body, ) - return _Scan( - _Scan.Attributes( - body=AttrGraph(_body_subgraph, name="body"), - num_scan_inputs=AttrInt64(num_scan_inputs, name="num_scan_inputs"), - scan_input_axes=AttrInt64s.maybe(scan_input_axes, name="scan_input_axes"), - scan_input_directions=AttrInt64s.maybe( - scan_input_directions, name="scan_input_directions" - ), - scan_output_axes=AttrInt64s.maybe( - scan_output_axes, name="scan_output_axes" + input_prop_values = create_prop_dict( + initial_state_and_scan_inputs=initial_state_and_scan_inputs, + ) + output_vars = ( + _Scan( + _Scan.Attributes( + body=AttrGraph(_body_subgraph, name="body"), + num_scan_inputs=AttrInt64(num_scan_inputs, name="num_scan_inputs"), + scan_input_axes=AttrInt64s.maybe( + scan_input_axes, name="scan_input_axes" + ), + scan_input_directions=AttrInt64s.maybe( + scan_input_directions, name="scan_input_directions" + ), + scan_output_axes=AttrInt64s.maybe( + scan_output_axes, name="scan_output_axes" + ), + scan_output_directions=AttrInt64s.maybe( + scan_output_directions, name="scan_output_directions" + ), ), - scan_output_directions=AttrInt64s.maybe( - scan_output_directions, name="scan_output_directions" + _Scan.Inputs( + initial_state_and_scan_inputs=unwrap_vars( + initial_state_and_scan_inputs + ), ), - ), - _Scan.Inputs( - initial_state_and_scan_inputs=initial_state_and_scan_inputs, - ), - out_variadic=len(_body_subgraph.requested_results), - ).outputs.final_state_and_scan_outputs + out_variadic=len(_body_subgraph.requested_results), + ) + .get_output_vars(input_prop_values=input_prop_values) + .final_state_and_scan_outputs + ) + return output_vars # type: ignore def shape( @@ -2425,15 +2567,23 @@ def shape( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(int64)` """ - return _Shape( - _Shape.Attributes( - end=AttrInt64.maybe(end, name="end"), - start=AttrInt64(start, name="start"), - ), - _Shape.Inputs( - data=data, - ), - ).outputs.shape + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _Shape( + _Shape.Attributes( + end=AttrInt64.maybe(end, name="end"), + start=AttrInt64(start, name="start"), + ), + _Shape.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .shape + ) + return output_vars # type: ignore def size( @@ -2463,12 +2613,20 @@ def size( - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` - T1: `tensor(int64)` """ - return _Size( - _Size.Attributes(), - _Size.Inputs( - data=data, - ), - ).outputs.size + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _Size( + _Size.Attributes(), + _Size.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .size + ) + return output_vars # type: ignore def squeeze( @@ -2506,13 +2664,22 @@ def squeeze( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Squeeze( - _Squeeze.Attributes(), - _Squeeze.Inputs( - data=data, - axes=axes, - ), - ).outputs.squeezed + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + output_vars = ( + _Squeeze( + _Squeeze.Attributes(), + _Squeeze.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .squeezed + ) + return output_vars # type: ignore def transpose( @@ -2549,14 +2716,22 @@ def transpose( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Transpose( - _Transpose.Attributes( - perm=AttrInt64s.maybe(perm, name="perm"), - ), - _Transpose.Inputs( - data=data, - ), - ).outputs.transposed + input_prop_values = create_prop_dict( + data=data, + ) + output_vars = ( + _Transpose( + _Transpose.Attributes( + perm=AttrInt64s.maybe(perm, name="perm"), + ), + _Transpose.Inputs( + data=unwrap_vars(data), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .transposed + ) + return output_vars # type: ignore def unsqueeze( @@ -2604,13 +2779,22 @@ def unsqueeze( Type constraints: - T: `tensor(bfloat16)`, `tensor(bool)`, `tensor(complex128)`, `tensor(complex64)`, `tensor(double)`, `tensor(float)`, `tensor(float16)`, `tensor(float8e4m3fn)`, `tensor(float8e4m3fnuz)`, `tensor(float8e5m2)`, `tensor(float8e5m2fnuz)`, `tensor(int16)`, `tensor(int32)`, `tensor(int4)`, `tensor(int64)`, `tensor(int8)`, `tensor(string)`, `tensor(uint16)`, `tensor(uint32)`, `tensor(uint4)`, `tensor(uint64)`, `tensor(uint8)` """ - return _Unsqueeze( - _Unsqueeze.Attributes(), - _Unsqueeze.Inputs( - data=data, - axes=axes, - ), - ).outputs.expanded + input_prop_values = create_prop_dict( + data=data, + axes=axes, + ) + output_vars = ( + _Unsqueeze( + _Unsqueeze.Attributes(), + _Unsqueeze.Inputs( + data=unwrap_vars(data), + axes=unwrap_vars(axes), + ), + ) + .get_output_vars(input_prop_values=input_prop_values) + .expanded + ) + return output_vars # type: ignore def const(value: npt.ArrayLike, dtype: npt.DTypeLike = None) -> Var: diff --git a/tests/test_adapt.py b/tests/test_adapt.py index e552110c..2a6a2450 100644 --- a/tests/test_adapt.py +++ b/tests/test_adapt.py @@ -19,6 +19,7 @@ from spox._graph import arguments, results from spox._node import OpType from spox._standard import StandardNode +from spox._var import _VarInfo @pytest.fixture @@ -83,11 +84,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - data: Var + data: _VarInfo @dataclass class Outputs(BaseOutputs): - squeezed: Var + squeezed: _VarInfo op_type = OpType("Squeeze", "", 11) @@ -97,9 +98,14 @@ class Outputs(BaseOutputs): def squeeze11(_data: Var, _axes: Iterable[int]): - return Squeeze11( - Squeeze11.Attributes(AttrInt64s(_axes, "axes")), Squeeze11.Inputs(_data) - ).outputs.squeezed + return ( + Squeeze11( + Squeeze11.Attributes(AttrInt64s(_axes, "axes")), + Squeeze11.Inputs(_data._var_info), + ) + .get_output_vars() + .squeezed + ) @pytest.fixture diff --git a/tests/test_constructors.py b/tests/test_constructors.py index 5dc3c641..55f0de1f 100644 --- a/tests/test_constructors.py +++ b/tests/test_constructors.py @@ -34,7 +34,10 @@ def test_variadic_no_input_list_mutation(onnx_helper): ins = [a, b] concat = op.concat(ins, axis=0) ins[1] = b - assert list(concat._op.inputs) == [a, b] + assert list(concat._op.inputs.get_var_infos().values()) == [ + a._var_info, + b._var_info, + ] def test_variadic_no_attr_mutation_array(onnx_helper): diff --git a/tests/test_custom_operator.py b/tests/test_custom_operator.py index 1c3c195c..66b369d0 100644 --- a/tests/test_custom_operator.py +++ b/tests/test_custom_operator.py @@ -19,6 +19,7 @@ from spox._graph import arguments, results from spox._node import Node, OpType from spox._type_system import Tensor, Type +from spox._var import _VarInfo # Define the Node for this operator - need to know the attributes, inputs and outputs statically @@ -32,18 +33,18 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo # This is optional, but is useful when defining the inference functions below. attrs: Attributes inputs: Inputs outputs: Outputs - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values) -> dict[str, Type]: # This is technically optional, but using an operator without type inference may be inconvenient. if self.inputs.X.type is None: return {} @@ -54,19 +55,23 @@ def infer_output_types(self) -> dict[str, Type]: ) return {"Y": t} - def propagate_values(self) -> dict[str, np.ndarray]: + def propagate_values(self, initializers) -> dict[str, np.ndarray]: # This is optional and implements value propagation ('partial data propagation' in ONNX). # In essence constant folding carried through for purposes of type inference. return ( - {"Y": np.linalg.inv(self.inputs.X._get_value())} - if self.inputs.X._value is not None + {"Y": np.linalg.inv(initializers["X"].value)} + if initializers["X"] is not None else {} ) # Define the operator constructor which is actually used def inverse(matrix: Var) -> Var: - return Inverse(Inverse.Attributes(), Inverse.Inputs(matrix)).outputs.Y + return ( + Inverse(Inverse.Attributes(), Inverse.Inputs(matrix._var_info)) + .get_output_vars(input_prop_values={"X": matrix._value}) + .Y + ) # Test the correct runtime behaviour with ORT diff --git a/tests/test_function.py b/tests/test_function.py index fd03d1b1..9933f611 100644 --- a/tests/test_function.py +++ b/tests/test_function.py @@ -14,12 +14,12 @@ import spox.opset.ai.onnx.v17 as op from spox._attributes import Attr, AttrFloat32, _Ref -from spox._fields import BaseAttributes, BaseInputs, BaseOutputs +from spox._fields import BaseAttributes, BaseInputs, BaseOutputs, BaseVars from spox._function import Function, to_function from spox._graph import arguments, results from spox._node import OpType from spox._type_system import Tensor -from spox._var import Var +from spox._var import Var, _VarInfo @pytest.fixture @@ -32,11 +32,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("LinearFunction", "spox.test", 0) @@ -44,7 +44,7 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - def constructor(self, attrs: dict[str, Attr], inputs: Inputs) -> Outputs: + def constructor(self, attrs: dict[str, Attr], inputs: BaseVars) -> Outputs: # FIXME: At some point, attribute references should be properly type-hinted. a = op.constant( value_float=_Ref( @@ -57,18 +57,22 @@ def constructor(self, attrs: dict[str, Attr], inputs: Inputs) -> Outputs: ) # type: ignore ) x = inputs.X - return self.Outputs(op.add(op.mul(a, x), b)) + return self.Outputs(op.add(op.mul(a, x), b)._var_info) def linear_inner( x: Var, a: Union[float, _Ref[float]], b: Union[float, _Ref[float]] ) -> Var: - return LinearFunction( - LinearFunction.Attributes( - slope_outer=AttrFloat32(a, "slope_outer"), - shift_outer=AttrFloat32(b, "shift_outer"), - ), - LinearFunction.Inputs(x), - ).outputs.Y + return ( + LinearFunction( + LinearFunction.Attributes( + slope_outer=AttrFloat32(a, "slope_outer"), + shift_outer=AttrFloat32(b, "shift_outer"), + ), + LinearFunction.Inputs(x._var_info), + ) + .get_output_vars(input_prop_values={"x": x._value}) + .Y + ) return linear_inner @@ -83,11 +87,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("LinearFunction2", "spox.test", 0) @@ -95,24 +99,29 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - def constructor(self, attrs: dict[str, Attr], inputs: Inputs) -> Outputs: + def constructor(self, attrs: dict[str, Attr], inputs: BaseVars) -> Outputs: return self.Outputs( linear( inputs.X, _Ref(attrs["slope1"], outer_name="slope1", name="slope_outer"), _Ref(attrs["shift1"], outer_name="shift1", name="shift_outer"), - ) + )._var_info ) def linear_inner( x: Var, a: Union[float, _Ref[float]], b: Union[float, _Ref[float]] ) -> Var: - return LinearFunction2( - LinearFunction2.Attributes( - slope1=AttrFloat32(a, name="slope1"), shift1=AttrFloat32(b, "shift1") - ), - LinearFunction2.Inputs(x), - ).outputs.Y + return ( + LinearFunction2( + LinearFunction2.Attributes( + slope1=AttrFloat32(a, name="slope1"), + shift1=AttrFloat32(b, "shift1"), + ), + LinearFunction2.Inputs(x._var_info), + ) + .get_output_vars({"X": x._value}) + .Y + ) return linear_inner @@ -129,11 +138,11 @@ class Attributes(BaseAttributes): @dataclass class Inputs(BaseInputs): - X: Var + X: _VarInfo @dataclass class Outputs(BaseOutputs): - Y: Var + Y: _VarInfo op_type = OpType("CubicFunction", "spox.test.extra", 0) @@ -141,7 +150,7 @@ class Outputs(BaseOutputs): inputs: Inputs outputs: Outputs - def constructor(self, attrs: dict[str, Attr], inputs: Inputs) -> Outputs: + def constructor(self, attrs: dict[str, Attr], inputs: BaseVars) -> Outputs: x = inputs.X a = op.mul( linear( @@ -165,18 +174,22 @@ def constructor(self, attrs: dict[str, Attr], inputs: Inputs) -> Outputs: ), ) y = op.add(a, b) - return self.Outputs(y) + return self.Outputs(y._var_info) def cubic_inner(x: Var, a3: float, a2: float, a1: float, a0: float) -> Var: - return CubicFunction( - CubicFunction.Attributes( - a3=AttrFloat32(a3, name="a3"), - a2=AttrFloat32(a2, name="a2"), - a1=AttrFloat32(a1, name="a1"), - a0=AttrFloat32(a0, name="a0"), - ), - CubicFunction.Inputs(X=x), - ).outputs.Y + return ( + CubicFunction( + CubicFunction.Attributes( + a3=AttrFloat32(a3, name="a3"), + a2=AttrFloat32(a2, name="a2"), + a1=AttrFloat32(a1, name="a1"), + a0=AttrFloat32(a0, name="a0"), + ), + CubicFunction.Inputs(X=x._var_info), + ) + .get_output_vars() + .Y + ) return cubic_inner diff --git a/tests/test_value_propagation.py b/tests/test_value_propagation.py index c26d4de1..e33cb581 100644 --- a/tests/test_value_propagation.py +++ b/tests/test_value_propagation.py @@ -13,6 +13,7 @@ from spox._graph import arguments, results from spox._shape import Shape from spox._value_prop import ORTValue, PropValue +from spox._var import _VarInfo @pytest.fixture( @@ -27,7 +28,7 @@ def value_prop_backend(request): def dummy_var(typ=None, value=None): """Function for creating a ``var`` without an operator but with a type and value.""" - return Var(None, typ, value) # type: ignore + return Var(_VarInfo(None, typ), value) # type: ignore def assert_equal_value(var: Var, expected: ORTValue): @@ -121,6 +122,15 @@ def test_empty_optional_has_no_element(): ) +@pytest.mark.parametrize("min", [None, 2]) +def test_optional_clip(min): + min_var = None if min is None else op.const(min) + assert_equal_value( + op.clip(op.const([1, 2, 3]), min=min_var, max=op.const(3)), + np.clip([1, 2, 3], a_min=min, a_max=3), + ) + + def test_sequence_empty(): assert_equal_value(op.sequence_empty(dtype=np.float32), []) @@ -132,6 +142,13 @@ def test_sequence_append(): ) +def test_variadict_max(): + a = op.const([2, 1, 4]) + b = op.const(3) + c = op.const([0]) + assert_equal_value(op.max([a, b, c]), [3, 3, 4]) + + def test_with_reconstruct(): a, b = arguments( a=_type_system.Tensor(np.int64, ()), diff --git a/tools/templates/class.jinja2 b/tools/templates/class.jinja2 index b2553675..d3367d2b 100644 --- a/tools/templates/class.jinja2 +++ b/tools/templates/class.jinja2 @@ -14,11 +14,11 @@ class _{{ schema.name }}(StandardNode): {% for input in schema.inputs %} {{ input.name }}: {% if is_optional(input) - %}Optional[Var]{% + %}Optional[_VarInfo]{% elif is_variadic(input) - %}Sequence[Var]{% + %}Sequence[_VarInfo]{% else - %}Var{% + %}_VarInfo{% endif %} {% endfor %} @@ -33,11 +33,11 @@ class _{{ schema.name }}(StandardNode): {% for output in schema.outputs %} {{ output.name }}: {% if is_optional(output) - %}Optional[Var]{% + %}Optional[_VarInfo]{% elif is_variadic(output) - %}Sequence[Var]{% + %}Sequence[_VarInfo]{% else - %}Var{% + %}_VarInfo{% endif %} {% endfor %} @@ -47,14 +47,14 @@ class _{{ schema.name }}(StandardNode): {% endif %} {% if type_inference %} - def infer_output_types(self) -> dict[str, Type]: + def infer_output_types(self, input_prop_values: PropDict) -> dict[str, Type]: {% filter indent(width=8) %} {%+ include type_inference %} {% endfilter %} {% endif %} {% if value_propagation %} - def propagate_values(self) -> dict[str, PropValueType]: + def propagate_values(self, input_prop_values: PropDict) -> dict[str, PropValueType]: {% filter indent(width=8) %} {%+ include value_propagation %} {% endfilter %} diff --git a/tools/templates/construct.jinja2 b/tools/templates/construct.jinja2 index 53f76989..8f159634 100644 --- a/tools/templates/construct.jinja2 +++ b/tools/templates/construct.jinja2 @@ -14,7 +14,12 @@ _{{ attr.name }}_subgraph: Graph = subgraph( ) {% endif %} {% endfor %} -return _{{ schema.name }}( +input_prop_values = create_prop_dict( +{% for param in schema.inputs + %}{{param.name}}={{param.name}}, {% +endfor %} + ) +output_vars = _{{ schema.name }}( _{{ schema.name }}.Attributes( {% for attr in attributes %} {% if attr.constructor_type_hint.startswith("Optional[") and attr.constructor_type_hint.endswith("]") %} @@ -31,13 +36,16 @@ return _{{ schema.name }}( {% endfor %} ), _{{ schema.name }}.Inputs( {% for param in schema.inputs - %}{{param.name}}={{param.name}}, {% + %}{{param.name}}=unwrap_vars({{param.name}}), {% endfor %} ), {% if schema.outputs and is_variadic(schema.outputs[-1]) %}out_variadic={{ out_variadic_solution if out_variadic_solution else "{}_count".format(schema.outputs[-1].name) }}, {% -endif %}).outputs{% +endif %} + ).get_output_vars(input_prop_values=input_prop_values){% if schema.outputs | length <= 1 %}.{{ schema.outputs[0].name }}{% else %}._unpack_to_any(){% endif %} + +return output_vars # type: ignore diff --git a/tools/templates/preamble.jinja2 b/tools/templates/preamble.jinja2 index e4e320f3..b0dd5d63 100644 --- a/tools/templates/preamble.jinja2 +++ b/tools/templates/preamble.jinja2 @@ -2,12 +2,11 @@ import typing import warnings from dataclasses import dataclass +from collections.abc import Iterable, Sequence from typing import ( Any, Callable, - Iterable, Optional, - Sequence, Union, ) from typing import cast as typing_cast @@ -15,7 +14,7 @@ from typing import cast as typing_cast import numpy as np import numpy.typing as npt -from spox._var import Var, result_type +from spox._var import Var, _VarInfo, result_type, unwrap_vars, get_value, create_prop_dict from spox._fields import BaseAttributes, BaseInputs, BaseOutputs from spox._attributes import ( AttrDtype, @@ -34,4 +33,4 @@ from spox._internal_op import intro from spox._node import OpType from spox._standard import InferenceError, StandardNode from spox._type_system import Tensor, Type, Sequence as SpoxSequence -from spox._value_prop import PropValueType +from spox._value_prop import PropValueType, PropDict diff --git a/tools/templates/type_inference/compress11.jinja2 b/tools/templates/type_inference/compress11.jinja2 index 4fe26383..a2f9b24f 100644 --- a/tools/templates/type_inference/compress11.jinja2 +++ b/tools/templates/type_inference/compress11.jinja2 @@ -1,4 +1,4 @@ -self.infer_output_types_onnx() +self.infer_output_types_onnx(input_prop_values) inp, cond = self.inputs.input.unwrap_tensor(), self.inputs.condition.unwrap_tensor() if not inp.shape: return {'output': Tensor(inp.dtype, None)} @@ -14,4 +14,4 @@ if self.attrs.axis is not None: shape[axis] = None else: shape = [None] -return {'output': Tensor(inp.dtype, tuple(shape))} \ No newline at end of file +return {'output': Tensor(inp.dtype, tuple(shape))} diff --git a/tools/templates/type_inference/loop16-fix.jinja2 b/tools/templates/type_inference/loop16-fix.jinja2 index 775e9d57..b797693c 100644 --- a/tools/templates/type_inference/loop16-fix.jinja2 +++ b/tools/templates/type_inference/loop16-fix.jinja2 @@ -1,9 +1,9 @@ -output_types = super().infer_output_types() +output_types = super().infer_output_types({}) body = self.attrs.body.value n = len(body.requested_arguments) - 2 -carried_names = list(self.outputs.get_vars())[:n] +carried_names = list(self.outputs.get_var_infos())[:n] carried_types = [v.type for v in list(body.requested_results.values())[1:][:n]] for name, typ in zip(carried_names, carried_types):