From 78cd014692f8b26628a69bbbd94acf2f9169998f Mon Sep 17 00:00:00 2001 From: "Rickert, Jonas" Date: Thu, 30 Jan 2025 08:39:24 +0000 Subject: [PATCH] Bump various ops to opset 22, adding bf16 support Bumped ops: Acos Acosh Asin Asinh Atan Atanh AveragePool Bernoulli Conv ConvTranspose Cos Cosh DeformConv Det Dropout Elu EyeLike GRU GlobalAveragePool GlobalMaxPool HardSigmoid HardSwish InstanceNormalization LSTM LpNormalization LpPool MaxPool MaxRoiPool MaxUnpool Mish Multinomial NegativeLogLikelihoodLoss RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike RoiAlign Round Selu Sin Sinh Softplus Softsign Tan ThresholdedRelu Signed-off-by: Rickert, Jonas --- docs/Dialects/onnx.md | 755 +++++++++++++++++--------------- src/Builder/OpBuildTable.inc | 92 ++-- src/Dialect/ONNX/ONNXOps.td.inc | 280 ++++++------ utils/gen_onnx_mlir.py | 92 ++-- 4 files changed, 631 insertions(+), 588 deletions(-) diff --git a/docs/Dialects/onnx.md b/docs/Dialects/onnx.md index 3996ad35d6..18a59bd4cc 100644 --- a/docs/Dialects/onnx.md +++ b/docs/Dialects/onnx.md @@ -7,7 +7,7 @@ Absolute takes one input data (Tensor) and produces one output data (Tensor) where absolute value, y = abs(x), is applied to the tensor elementwise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -31,7 +31,7 @@ _ONNX Acos operation_ Calculates the arccosine (inverse of cosine) of the given input tensor, element-wise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -41,13 +41,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `input` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `input` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `output` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.Acosh` (ONNXAcoshOp) @@ -55,7 +55,7 @@ _ONNX Acosh operation_ Calculates the hyperbolic arccosine of the given input tensor element-wise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -65,13 +65,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `input` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `input` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `output` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.Adagrad` (ONNXAdagradOp) @@ -128,7 +128,7 @@ Compute one iteration of ADAGRAD, a stochastic gradient based optimization In that reference paper, this operator is a special case of the Figure 1's composite mirror descent update. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -223,7 +223,7 @@ Compute one iteration of Adam, a stochastic gradient based optimization If there are multiple inputs to be optimized, the pseudo code will be applied independently to each of them. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -264,7 +264,7 @@ This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; fo (Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<14>`, `SameOperandsAndResultElementType` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -292,7 +292,7 @@ elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting supp This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<7>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -323,7 +323,7 @@ is selected if the max appears more than once in the input. Otherwise the index first occurrence is selected. The type of the output tensor is integer. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -362,7 +362,7 @@ is selected if the min appears more than once in the input. Otherwise the index first occurrence is selected. The type of the output tensor is integer. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -396,7 +396,7 @@ _ONNX ArrayFeatureExtractor operation_ Select elements of the input tensor based on the indices passed.
The indices are applied to the last axes of the tensor. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -421,7 +421,7 @@ _ONNX Asin operation_ Calculates the arcsine (inverse of sine) of the given input tensor, element-wise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -431,13 +431,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `input` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `input` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `output` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.Asinh` (ONNXAsinhOp) @@ -445,7 +445,7 @@ _ONNX Asinh operation_ Calculates the hyperbolic arcsine of the given input tensor element-wise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -455,13 +455,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `input` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `input` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `output` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.Atan` (ONNXAtanOp) @@ -469,7 +469,7 @@ _ONNX Atan operation_ Calculates the arctangent (inverse of tangent) of the given input tensor, element-wise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -479,13 +479,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `input` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `input` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `output` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.Atanh` (ONNXAtanhOp) @@ -493,7 +493,7 @@ _ONNX Atanh operation_ Calculates the hyperbolic arctangent of the given input tensor element-wise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -503,13 +503,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `input` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `input` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `output` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.AveragePool` (ONNXAveragePoolOp) @@ -529,7 +529,7 @@ AveragePool consumes an input tensor X and applies average pooling across ``` output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1) ``` - if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. + if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. Sliding windows that would start in the right padded region are ignored. `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following when ceil_mode is enabled: ``` @@ -548,7 +548,7 @@ AveragePool consumes an input tensor X and applies average pooling across The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -571,13 +571,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `X` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `Y` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.BatchNormalizationInferenceMode` (ONNXBatchNormalizationInferenceModeOp) @@ -600,7 +600,7 @@ by an argument that is present) may also be simply omitted. This operation is not part of the standard and was added to assist onnx-mlir. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<15>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -674,7 +674,7 @@ For previous (depreciated) non-spatial cases, implementors are suggested to flatten the input shape to (N x C * D1 * D2 * ... * Dn) before a BatchNormalization Op. This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<15>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -707,6 +707,55 @@ Effects: `MemoryEffects::Effect{}` | `running_mean` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of bfloat16 type values or none type | `running_var` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of bfloat16 type values or none type +### `onnx.BatchNormalizationV9` (ONNXBatchNormalizationV9Op) + +_ONNX BatchNormalization operation_ + +Carries out batch normalization as described in the paper +https://arxiv.org/abs/1502.03167. Depending on the mode it is being run, +there are multiple cases for the number of outputs, which we list below: + +Output case #1: Y, mean, var, saved_mean, saved_var (training mode) +Output case #2: Y (test mode) + +For previous (depreciated) non-spatial cases, implementors are suggested +to flatten the input shape to (N x C*D1*D2 ..*Dn) before a BatchNormalization Op. +This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. + +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<9>` + +Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` + +Effects: `MemoryEffects::Effect{}` + +#### Attributes: + + + + + +
AttributeMLIR TypeDescription
epsilon::mlir::FloatAttr32-bit float attribute
momentum::mlir::FloatAttr32-bit float attribute
+ +#### Operands: + +| Operand | Description | +| :-----: | ----------- | +| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `scale` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `B` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `mean` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `var` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values + +#### Results: + +| Result | Description | +| :----: | ----------- | +| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `out_mean` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `out_var` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `saved_mean` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `saved_var` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type + ### `onnx.Bernoulli` (ONNXBernoulliOp) _ONNX Bernoulli operation_ @@ -718,7 +767,7 @@ where an output of 1 is produced with probability p and an output of 0 is produc This operator is non-deterministic and may not produce the same values in different implementations (even if a seed is specified). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -736,13 +785,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `input` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `input` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of bfloat16 type values or tensor of 8-bit unsigned integer values or tensor of 16-bit unsigned integer values or tensor of 32-bit unsigned integer values or tensor of 64-bit unsigned integer values or tensor of 8-bit signless integer values or tensor of 16-bit signless integer values or tensor of 32-bit signless integer values or tensor of 64-bit signless integer values or tensor of 1-bit signless integer values +| `output` | tensor of 8-bit unsigned integer values or tensor of 16-bit unsigned integer values or tensor of 32-bit unsigned integer values or tensor of 64-bit unsigned integer values or tensor of 8-bit signless integer values or tensor of 16-bit signless integer values or tensor of 32-bit signless integer values or tensor of 64-bit signless integer values or tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of 1-bit signless integer values ### `onnx.Binarizer` (ONNXBinarizerOp) @@ -750,7 +799,7 @@ _ONNX Binarizer operation_ Maps the values of the input tensor to either 0 or 1, element-wise, based on the outcome of a comparison against a threshold value. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -792,7 +841,7 @@ Because this operator supports Numpy-style broadcasting, X's and Y's shapes are not necessarily identical. This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -827,7 +876,7 @@ elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting supp This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -852,7 +901,7 @@ _ONNX BitwiseNot operation_ Returns the bitwise not of the input tensor element-wise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -879,7 +928,7 @@ elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting supp This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -907,7 +956,7 @@ elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting supp This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -932,7 +981,7 @@ _ONNX BlackmanWindow operation_ Generates a Blackman window as described in the paper https://ieeexplore.ieee.org/document/1455106. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<17>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -966,7 +1015,7 @@ The operator casts the elements of a given input tensor (the first input) to the same data type as the elements of the second input tensor. See documentation of the Cast operator for further details. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<19>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1000,7 +1049,7 @@ Converts a map to a tensor.
The map key must be an int64 and the values will in ascending order based on this key.
The operator supports dense packing or sparse packing. If using sparse packing, the key cannot exceed the max_map-1 value. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1096,7 +1145,7 @@ The rules then become: | [x] < -FLT_MAX | NaN | NaN | -Inf | NaN | | else | RNE | RNE | RNE | RNE | -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<19>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ResultTypeInferenceOpInterface`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1135,7 +1184,7 @@ Converts strings to integers and vice versa.
If the string default value is set, it will convert integers to strings. If the int default value is set, it will convert strings to integers. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1171,7 +1220,7 @@ Ceil takes one input data (Tensor) and produces one output data (Tensor) where the ceil is, y = ceil(x), is applied to the tensor elementwise. If x is integral, +0, -0, NaN, or infinite, x itself is returned. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1201,7 +1250,7 @@ using formula: max(0,x) + min(0,alpha*(exp(x/alpha)-1)) ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<12>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1218,13 +1267,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 32-bit float values +| `X` | tensor of 32-bit float values or tensor of bfloat16 type values #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 32-bit float values +| `Y` | tensor of 32-bit float values or tensor of bfloat16 type values ### `onnx.CenterCropPad` (ONNXCenterCropPadOp) @@ -1239,7 +1288,7 @@ If the input dimensions are bigger than the crop shape, a centered cropping wind If the input dimensions are smaller than the crop shape, the input is padded on each side equally, so that the input is centered in the output. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1273,7 +1322,7 @@ Clip operator limits the given input within an interval. The interval is specified by the inputs 'min' and 'max'. They default to numeric_limits::lowest() and numeric_limits::max(), respectively. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1301,7 +1350,7 @@ Clip operator limits the given input within an interval. The interval is specified by the inputs 'min' and 'max'. They default to numeric_limits::lowest() and numeric_limits::max(), respectively. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1329,7 +1378,7 @@ Clip operator limits the given input within an interval. The interval is specified by the inputs 'min' and 'max'. They default to numeric_limits::lowest() and numeric_limits::max(), respectively. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<12>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1357,7 +1406,7 @@ Clip operator limits the given input within an interval. The interval is specified with arguments 'min' and 'max'. They default to numeric_limits::lowest() and numeric_limits::max() respectively. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<6>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1398,7 +1447,7 @@ NOTE: convolution formulas, it is required as input for more advanced scenarios as explained at PyTorch's implementation (https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/Col2Im.cpp#L10) -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1436,7 +1485,7 @@ Selects slices from an input tensor along a given axis where condition evaluates Compress behaves like numpy.compress: https://docs.scipy.org/doc/numpy/reference/generated/numpy.compress.html -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1471,7 +1520,7 @@ All input tensors must have the same shape, except for the dimension size of the By default 'new_axis' is 0, the behavior is similar to numpy.concatenate. When 'new_axis' is 1, the behavior is similar to numpy.stack. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1503,7 +1552,7 @@ _ONNX Concat operation_ Concatenate a list of tensors into a single tensor. All input tensors must have the same shape, except for the dimension size of the axis to concatenate on. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1574,7 +1623,7 @@ _ONNX ConstantOfShape operation_ Generate a tensor with given value and shape. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<20>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ResultTypeInferenceOpInterface`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1606,7 +1655,7 @@ _ONNX Constant operation_ This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value, or value_* must be specified. -Traits: `AlwaysSpeculatableImplTrait`, `ConstantLike` +Traits: `AlwaysSpeculatableImplTrait`, `ConstantLike`, `OpVersionTrait<19>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ResultTypeInferenceOpInterface`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1639,7 +1688,7 @@ _ONNX ConvInteger operation_ The integer convolution operator consumes an input tensor, its zero-point, a filter, and its zero-point, and computes the output. The production MUST never overflow. The accumulation may overflow if and only if in 32 bits. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<10>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1679,7 +1728,7 @@ _ONNX Conv operation_ The convolution operator consumes an input tensor and a filter, and computes the output. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1701,15 +1750,15 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `W` | tensor of 16-bit float values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `B` | tensor of 16-bit float values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `X` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `W` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `B` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `Y` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.ConvTranspose` (ONNXConvTransposeOp) @@ -1730,7 +1779,7 @@ output_shape can also be explicitly specified in which case pads values are auto -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1754,15 +1803,15 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `W` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `B` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `X` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `W` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `B` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `Y` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.Cos` (ONNXCosOp) @@ -1770,7 +1819,7 @@ _ONNX Cos operation_ Calculates the cosine of the given input tensor, element-wise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1780,13 +1829,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `input` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `input` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `output` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.Cosh` (ONNXCoshOp) @@ -1794,7 +1843,7 @@ _ONNX Cosh operation_ Calculates the hyperbolic cosine of the given input tensor element-wise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1804,13 +1853,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `input` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `input` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `output` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.CumSum` (ONNXCumSumOp) @@ -1837,7 +1886,7 @@ output = [5, 3, 0] ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<14>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -1970,7 +2019,7 @@ The actual shape of the output is specified in the \"output\" section. Reference: https://docs.scipy.org/doc/scipy/tutorial/fft.html -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<20>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2004,7 +2053,7 @@ _ONNX DFT operation_ Computes the discrete Fourier transform of input. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<17>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2039,7 +2088,7 @@ _ONNX DeformConv operation_ Performs deformable convolution as described in https://arxiv.org/abs/1703.06211 and https://arxiv.org/abs/1811.11168. This operator specification supports the general N-D case. Note that most common use cases have 2D or 3D data. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2061,17 +2110,17 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `W` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `offset` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `B` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type -| `mask` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `X` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `W` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `offset` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `B` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `mask` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `Y` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.DepthToSpace` (ONNXDepthToSpaceOp) @@ -2101,7 +2150,7 @@ tmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3]) y = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize]) ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2139,7 +2188,7 @@ there's no zero point (zero point is supposed to be 0). `zero-point` is usually not used in the case of float8e4m3fn, float8e4m3fnuz, float8e5m2, float8e5m2fnuz quantization, but the dequantization formula remains the same for consistency and 'x_scale' still determines the output type. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<19>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2176,7 +2225,7 @@ and the inner-most 2 dimensions form square matrices. The output is a tensor of shape `[*]`, containing the determinants of all input submatrices. e.g., When the input is 2-D, the output is a scalar(shape is empty: `[]`). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2186,13 +2235,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `X` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `Y` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.DictVectorizer` (ONNXDictVectorizerOp) @@ -2211,7 +2260,7 @@ Uses an index mapping to convert a dictionary to an array.
then an input of ``{\"a\": 4, \"c\": 8}`` will produce an output of ``[4, 8, 0, 0]``. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2322,7 +2371,7 @@ This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; fo (Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<14>`, `SameOperandsAndResultElementType` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2358,7 +2407,7 @@ scale = 1. / (1. - ratio). ``` This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2375,15 +2424,15 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `data` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of bfloat16 type values -| `ratio` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `data` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of f8E4M3FN type values or tensor of f8E4M3FNUZ type values or tensor of f8E5M2 type values or tensor of f8E5M2FNUZ type values +| `ratio` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of f8E4M3FN type values or tensor of f8E4M3FNUZ type values or tensor of f8E5M2 type values or tensor of f8E5M2FNUZ type values or none type | `training_mode` | tensor of 1-bit signless integer values or none type #### Results: | Result | Description | | :----: | ----------- | -| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of bfloat16 type values +| `output` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of f8E4M3FN type values or tensor of f8E4M3FNUZ type values or tensor of f8E5M2 type values or tensor of f8E5M2FNUZ type values | `mask` | tensor of 1-bit signless integer values or none type ### `onnx.DynamicQuantizeLinear` (ONNXDynamicQuantizeLinearOp) @@ -2418,7 +2467,7 @@ y = saturate (round (x / y_scale) + y_zero_point) * for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported. * rounding to nearest ties to even. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2468,7 +2517,7 @@ Specifically, every occurrence of ellipsis in the equation must represent the sa The right-hand side may contain exactly one ellipsis. In implicit mode, the ellipsis dimensions are set to the beginning of the output. The equation string may contain space (U+0020) character. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<12>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2502,7 +2551,7 @@ Elu takes one input data (Tensor) and produces one output data 0`, `f(x) = x for x >= 0`., is applied to the tensor elementwise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2519,13 +2568,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `X` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `Y` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.EntryPoint` (ONNXEntryPointOp) @@ -2551,7 +2600,7 @@ elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting supp This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<19>`, `SameOperandsElementType` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2576,7 +2625,7 @@ _ONNX Erf operation_ Computes the error function of the given input tensor element-wise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2600,7 +2649,7 @@ _ONNX Exp operation_ Calculates the exponential of the given input tensor, element-wise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2631,7 +2680,7 @@ but the major difference is numpy.broadcast_to() does not allow shape to be smal It is possible that the output.shape is not equal to shape, when some dimensions in shape is equal to 1, or the shape.ndim < input.shape.ndim. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2662,9 +2711,9 @@ is populated with ones, but attribute 'k' can be used to populate upper or lower The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the TensorProto message and be valid as an output type. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` -Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` +Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ResultTypeInferenceOpInterface`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` Effects: `MemoryEffects::Effect{}` @@ -2680,13 +2729,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `input` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of 8-bit signless integer values or tensor of 16-bit signless integer values or tensor of 32-bit signless integer values or tensor of 64-bit signless integer values or tensor of 8-bit unsigned integer values or tensor of 16-bit unsigned integer values or tensor of 32-bit unsigned integer values or tensor of 64-bit unsigned integer values or tensor of 1-bit signless integer values +| `input` | tensor of 8-bit unsigned integer values or tensor of 16-bit unsigned integer values or tensor of 32-bit unsigned integer values or tensor of 64-bit unsigned integer values or tensor of 8-bit signless integer values or tensor of 16-bit signless integer values or tensor of 32-bit signless integer values or tensor of 64-bit signless integer values or tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of 1-bit signless integer values #### Results: | Result | Description | | :----: | ----------- | -| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of 8-bit signless integer values or tensor of 16-bit signless integer values or tensor of 32-bit signless integer values or tensor of 64-bit signless integer values or tensor of 8-bit unsigned integer values or tensor of 16-bit unsigned integer values or tensor of 32-bit unsigned integer values or tensor of 64-bit unsigned integer values or tensor of 1-bit signless integer values +| `output` | tensor of 8-bit unsigned integer values or tensor of 16-bit unsigned integer values or tensor of 32-bit unsigned integer values or tensor of 64-bit unsigned integer values or tensor of 8-bit signless integer values or tensor of 16-bit signless integer values or tensor of 32-bit signless integer values or tensor of 64-bit signless integer values or tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of 1-bit signless integer values ### `onnx.FeatureVectorizer` (ONNXFeatureVectorizerOp) @@ -2697,7 +2746,7 @@ Concatenates input tensors into one continuous output.
Inputs are copied to the output maintaining the order of the input arguments.
All inputs must be integers or floats, while the output will be all floating point values. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2730,7 +2779,7 @@ Flattens the input tensor into a 2D matrix. If input tensor has shape (d_0, d_1, ... d_n) then the output will have shape (d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2763,7 +2812,7 @@ Floor takes one input data (Tensor) and produces one output data (Tensor) where the floor is, y = floor(x), is applied to the tensor elementwise. If x is integral, +0, -0, NaN, or infinite, x itself is returned. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2833,7 +2882,7 @@ Equations (Default: f=Sigmoid, g=Tanh): * Ht = (1 - zt) (.) ht + zt (.) Ht-1 This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -2857,19 +2906,19 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `W` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `R` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `B` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `X` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `W` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `R` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `B` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type | `sequence_lens` | tensor of 32-bit signless integer values or none type -| `initial_h` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `initial_h` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type -| `Y_h` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `Y` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `Y_h` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type ### `onnx.GatherElements` (ONNXGatherElementsOp) @@ -2927,7 +2976,7 @@ output = [ ] ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3042,7 +3091,7 @@ indices = [[1],[0]] # indices_shape = [2, 1] output = [[2,3],[4,5]] # output_shape = [2, 2] ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3121,7 +3170,7 @@ output = [ ] ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3159,7 +3208,7 @@ $y = 0.5 * x * (1 + Tanh(sqrt(2/\pi) * (x + 0.044715 * x^3)))$ is used and appli to the tensor elementwise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<20>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3201,7 +3250,7 @@ computation if attribute transA is non-zero, same for B and transB. This operator supports **unidirectional broadcasting** (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check [the doc](Broadcasting.md). This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3239,7 +3288,7 @@ GlobalAveragePool consumes an input tensor X and applies average pooling across the values in the same channel. This is equivalent to AveragePool with kernel size equal to the spatial dimension of input tensor. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3249,13 +3298,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `X` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `Y` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.GlobalLpPool` (ONNXGlobalLpPoolOp) @@ -3265,7 +3314,7 @@ GlobalLpPool consumes an input tensor X and applies lp pool pooling across the values in the same channel. This is equivalent to LpPool with kernel size equal to the spatial dimension of input tensor. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<2>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3298,7 +3347,7 @@ GlobalMaxPool consumes an input tensor X and applies max pooling across the values in the same channel. This is equivalent to MaxPool with kernel size equal to the spatial dimension of input tensor. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3308,13 +3357,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `X` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `Y` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.Gradient` (ONNXGradientOp) @@ -3444,7 +3493,7 @@ forward pass can be reused if the gradient is computed via reverse-mode auto-differentiation. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3480,7 +3529,7 @@ elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting supp This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>`, `SameOperandsElementType` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3508,7 +3557,7 @@ elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting supp This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<16>`, `SameOperandsElementType` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3545,7 +3594,7 @@ They are used to interpolate output values of `Y[N, C, H_out, W_out]`. The GridSample operator is often used in doing grid generator and sampler in the [Spatial Transformer Networks](https://arxiv.org/abs/1506.02025). See also in [torch.nn.functional.grid_sample](https://pytorch.org/docs/master/generated/torch.nn.functional.grid_sample.html#torch-nn-functional-grid-sample). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<16>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3601,7 +3650,7 @@ When the number of groups is the same as the number of channels, this operator i equivalent to InstanceNormalization. When there is only one group, this operator is equivalent to LayerNormalization. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<21>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3650,7 +3699,7 @@ When the number of groups is the same as the number of channels, this operator i equivalent to InstanceNormalization. When there is only one group, this operator is equivalent to LayerNormalization. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3684,7 +3733,7 @@ _ONNX HammingWindow operation_ Generates a Hamming window as described in the paper https://ieeexplore.ieee.org/document/1455106. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<17>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3716,7 +3765,7 @@ _ONNX HannWindow operation_ Generates a Hann window as described in the paper https://ieeexplore.ieee.org/document/1455106. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<17>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3750,7 +3799,7 @@ HardSigmoid takes one input data (Tensor) and produces one output data (Tensor) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta)), is applied to the tensor elementwise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3768,13 +3817,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `X` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `Y` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.HardSwish` (ONNXHardSwishOp) @@ -3784,7 +3833,7 @@ HardSwish takes one input data (Tensor) and produces one output data (Tensor< the HardSwish function, y = x * max(0, min(1, alpha * x + beta)) = x * HardSigmoid(x), where alpha = 1/6 and beta = 0.5, is applied to the tensor elementwise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3794,13 +3843,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `X` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `Y` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.Hardmax` (ONNXHardmaxOp) @@ -3814,7 +3863,7 @@ The \"axis\" attribute indicates the dimension along which Hardmax will be performed. The output tensor has the same shape and contains the Hardmax values of the corresponding input. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3845,7 +3894,7 @@ _ONNX Identity operation_ Identity operator -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<19>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3869,7 +3918,7 @@ _ONNX If operation_ If conditional -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<19>` Interfaces: `ConditionallySpeculatable`, `HasOnnxSubgraphOpInterface`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ResultTypeInferenceOpInterface`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3900,7 +3949,7 @@ Replaces inputs that equal one value with another, leaving all other elements al which one depends on whether floats or integers are being processed.
The imputed_value attribute length can be 1 element, or it can have one element per input feature.
In other words, if the input tensor has the shape [*,F], then the length of the attribute array may be 1 or F. If it is 1, then it is broadcast along the last dimension and applied to each feature. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3939,7 +3988,7 @@ y = scale * (x - mean) / sqrt(variance + epsilon) + B, where mean and variance are computed per instance per channel. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -3956,15 +4005,15 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `input` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `scale` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `B` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `input` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `scale` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `B` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `output` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.IsInf` (ONNXIsInfOp) @@ -3972,7 +4021,7 @@ _ONNX IsInf operation_ Map infinity to true and other values to false. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<20>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4004,7 +4053,7 @@ _ONNX IsNaN operation_ Returns which elements of the input are NaN. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<20>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4037,7 +4086,7 @@ where `max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) `Y[n, c, d1, ..., dk] = X[n, c, d1, ..., dk] / (bias + alpha / size * square_sum[n, c, d1, ..., dk] ) ^ beta` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4120,7 +4169,7 @@ Equations (Default: f=Sigmoid, g=Tanh, h=Tanh): * Ht = ot (.) h(Ct) This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4144,10 +4193,10 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `W` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `R` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `B` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `X` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `W` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `R` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `B` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type | `sequence_lens` | tensor of 32-bit signless integer values or none type | `initial_h` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type | `initial_c` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type @@ -4157,9 +4206,9 @@ Effects: `MemoryEffects::Effect{}` | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type -| `Y_h` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type -| `Y_c` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `Y` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `Y_h` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `Y_c` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type ### `onnx.LabelEncoder` (ONNXLabelEncoderOp) @@ -4183,7 +4232,7 @@ Maps each element in the input tensor to another value.
For key look-up, bit-wise comparison is used so even a float NaN can be mapped to a value in 'values_*' attribute.
-Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<2>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4262,7 +4311,7 @@ This is layer normalization defined in ONNX as function. (tensors `Scale` and `B` should be unidirectional broadcastable to tensor `X`); for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<17>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4344,7 +4393,7 @@ LeakyRelu takes input data (Tensor) and an argument alpha, and produces one output data (Tensor) where the function `f(x) = alpha * x for x < 0`, `f(x) = x for x >= 0`, is applied to the data tensor elementwise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<16>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4378,7 +4427,7 @@ elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting supp This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>`, `SameOperandsElementType` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4406,7 +4455,7 @@ elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting supp This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<16>`, `SameOperandsElementType` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4431,7 +4480,7 @@ _ONNX LinearClassifier operation_ Linear classifier -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4473,7 +4522,7 @@ Generalized linear regression evaluation.
The coefficients array is of length n, and the coefficients for each target are contiguous. Intercepts are optional but if provided must match the number of targets. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4507,7 +4556,7 @@ _ONNX Log operation_ Calculates the natural log of the given input tensor, element-wise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4537,7 +4586,7 @@ The \"axis\" attribute indicates the dimension along which LogSoftmax will be performed. The output tensor has the same shape and contains the LogSoftmax values of the corresponding input. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4702,7 +4751,7 @@ point-wise operators (e.g. dropout, residual connections, linear layer). The input/output of subgraph (produced by loop node) matching is based on order instead of name. The implementation will figure out the names based on this order. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<19>` Interfaces: `ConditionallySpeculatable`, `HasOnnxSubgraphOpInterface`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ResultTypeInferenceOpInterface`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4728,7 +4777,7 @@ _ONNX LpNormalization operation_ Given a matrix, apply Lp-normalization along the provided axis. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4746,13 +4795,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `input` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `input` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `output` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.LpPool` (ONNXLpPoolOp) @@ -4782,7 +4831,7 @@ LpPool consumes an input tensor X and applies Lp pooling across pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + {kernelSpatialShape} - input_spatial_shape[i] ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4805,13 +4854,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `X` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `Y` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.MatMulInteger` (ONNXMatMulIntegerOp) @@ -4820,7 +4869,7 @@ _ONNX MatMulInteger operation_ Matrix product that behaves like [numpy.matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html). The production MUST never overflow. The accumulation may overflow if and only if in 32 bits. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<10>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4847,7 +4896,7 @@ _ONNX MatMul operation_ Matrix product that behaves like [numpy.matmul](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4874,7 +4923,7 @@ Element-wise max of each of the input tensors (with Numpy-style broadcasting sup All inputs and outputs must have the same data type. This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>`, `SameOperandsAndResultElementType` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4910,7 +4959,7 @@ MaxPool consumes an input tensor X and applies max pooling across ``` output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1) ``` - if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. + if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. Sliding windows that would start in the right padded region are ignored. `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following when ceil_mode is enabled: ``` @@ -4929,7 +4978,7 @@ MaxPool consumes an input tensor X and applies max pooling across The output of each pooling window is maximum number of elements exclude pad. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -4952,13 +5001,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of 8-bit signless integer values or tensor of 8-bit unsigned integer values +| `X` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of 8-bit signless integer values or tensor of 8-bit unsigned integer values #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of 8-bit signless integer values or tensor of 8-bit unsigned integer values +| `Y` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of 8-bit signless integer values or tensor of 8-bit unsigned integer values | `Indices` | tensor of 64-bit signless integer values or none type ### `onnx.MaxPoolSingleOut` (ONNXMaxPoolSingleOutOp) @@ -4970,7 +5019,7 @@ See ONNXMaxPoolOp for a full description of the MaxPool semantics. This operation is not part of the standard and was added to assist onnx-mlir. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<12>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5009,7 +5058,7 @@ ROI max pool consumes an input tensor X and region of interests (RoIs) to apply max pooling across each RoI, to produce output 4-D tensor of shape (num_rois, channels, pooled_shape[0], pooled_shape[1]). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5027,14 +5076,14 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `rois` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `X` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `rois` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `Y` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.MaxUnpool` (ONNXMaxUnpoolOp) @@ -5059,7 +5108,7 @@ In addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape which define the exact unpooling op. The attributes typically have the same values as the corresponding pooling op that the unpooling op is trying to invert. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5078,7 +5127,7 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `X` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values | `I` | tensor of 64-bit signless integer values | `output_shape` | tensor of 64-bit signless integer values or none type @@ -5086,7 +5135,7 @@ Effects: `MemoryEffects::Effect{}` | Result | Description | | :----: | ----------- | -| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `output` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.Mean` (ONNXMeanOp) @@ -5096,7 +5145,7 @@ Element-wise mean of each of the input tensors (with Numpy-style broadcasting su All inputs and outputs must have the same data type. This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5121,7 +5170,7 @@ _ONNX MeanVarianceNormalization operation_ A MeanVarianceNormalization Function: Perform mean variance normalization on the input tensor X using formula: `(X-EX)/sqrt(E(X-EX)^2)` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5159,7 +5208,7 @@ In the returned matrix, all the triangles (filterbanks) have a peak value of 1.0 The returned MelWeightMatrix can be used to right-multiply a spectrogram S of shape [frames, num_spectrogram_bins] of linear scale spectrum values (e.g. STFT magnitudes) to generate a \"mel spectrogram\" M of shape [frames, num_mel_bins]. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<17>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5196,7 +5245,7 @@ Element-wise min of each of the input tensors (with Numpy-style broadcasting sup All inputs and outputs must have the same data type. This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>`, `SameOperandsAndResultElementType` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5226,7 +5275,7 @@ Perform the linear unit element-wise on the input tensor X using formula: mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^{x})) ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5236,13 +5285,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `X` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `Y` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.Mod` (ONNXModOp) @@ -5262,7 +5311,7 @@ Performs element-wise binary modulus (with Numpy-style broadcasting support). This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>`, `SameOperandsAndResultElementType` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5353,7 +5402,7 @@ Compute one iteration of stochastic gradient update with momentum. concatenation of \"X_1\" and \"X_2\" (of course, their gradient and accumulate gradient should be concatenated too) and then our pseudo code becomes applicable. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5393,7 +5442,7 @@ This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; fo (Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<14>`, `SameOperandsAndResultElementType` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5419,7 +5468,7 @@ _ONNX Multinomial operation_ Generate a tensor of samples from a multinomial distribution according to the probabilities of each of the possible outcomes. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5438,7 +5487,7 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `input` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `input` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values #### Results: @@ -5454,7 +5503,7 @@ Neg takes one input data (Tensor) and produces one output data (Tensor) where each element flipped sign, y = -x, is applied to the tensor elementwise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5578,7 +5627,7 @@ loss = np.sum(loss) / weight_total // -1.57 ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5596,15 +5645,15 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `input` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `input` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values | `target` | tensor of 32-bit signless integer values or tensor of 64-bit signless integer values -| `weight` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `weight` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type #### Results: | Result | Description | | :----: | ----------- | -| `loss` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `loss` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.NonMaxSuppression` (ONNXNonMaxSuppressionOp) @@ -5618,7 +5667,7 @@ result in the same boxes being selected by the algorithm. The selected_indices output is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the Gather or GatherND operation. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5657,7 +5706,7 @@ Returns the indices of the elements that are non-zero https://docs.scipy.org/doc/numpy/reference/generated/numpy.nonzero.html, but for scalar input, NonZero produces output shape (0, N) instead of (1, N), which is different from Numpy's behavior. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5724,7 +5773,7 @@ Normalize the input. There are three normalization modes, which have the corres For batches, that is, [N,C] tensors, normalization is done along the C axis. In other words, each row of the batch is normalized independently. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5755,7 +5804,7 @@ _ONNX Not operation_ Returns the negation of the input tensor element-wise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5786,7 +5835,7 @@ Replace each input element with an array of ones and zeros, where a single If the input is a tensor of float, int32, or double, the data will be cast to integers and the cats_int64s category list will be used for the lookups. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5837,7 +5886,7 @@ Produces a one-hot tensor based on inputs. output[i, j, k, input[i, j, k]] = 1 for all i, j, k and 0 otherwise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5872,7 +5921,7 @@ If the input is a tensor or sequence type, it returns the input. If the input is an optional type, it outputs the element in the input. It is an error if the input is an empty optional-type (i.e. does not have an element) and the behavior is undefined in this case. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5898,7 +5947,7 @@ Returns true if (1) the input is an optional-type and contains an element, or, (2) the input is a tensor or sequence type. If the input is not provided or is an empty optional-type, this op returns false. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5923,7 +5972,7 @@ _ONNX Optional operation_ Constructs an optional-type value containing either an empty optional of a certain type specified by the attribute, or a non-empty value containing the input element. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<15>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5957,7 +6006,7 @@ elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting supp This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<7>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -5985,7 +6034,7 @@ output data (Tensor) where the function `f(x) = slope * x for x < 0`, `f(x) = x for x >= 0`., is applied to the data tensor elementwise. This operator supports **unidirectional broadcasting** (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<16>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -6109,7 +6158,7 @@ output = [ ] ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<19>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -6217,7 +6266,7 @@ Example 3 (`edge` mode): ] -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -6324,7 +6373,7 @@ Example 3 (`edge` mode): ] -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -6431,7 +6480,7 @@ output = [ ] ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -6480,7 +6529,7 @@ Example: ], ] -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<2>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -6516,7 +6565,7 @@ produces one output data (Tensor) where the function `f(x) = x^exponent`, is applied to the data tensor elementwise. This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<15>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -6569,7 +6618,7 @@ Each input or output and its related zero point must have same type. When bias is present it must be quantized using scale = input scale * weight scale and zero point as 0. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<10>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -6623,7 +6672,7 @@ for per column quantization. If the input is N-D tensor with shape [D1, D2, M, K have shape [D1, D2, M, 1] for per row quantization and shape [D1, D2, 1, K] for per column quantization. Production must never overflow, and accumulation may overflow if and only if in 32 bits. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<10>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -6662,7 +6711,7 @@ For (x / y_scale), it's rounding to the nearest even. Refer to https://en.wikipe but the quantization formula remains the same for consistency and the type of the attribute 'y_zero_point' still determines the quantization type. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<19>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -6807,7 +6856,7 @@ Equations (Default: f=Tanh): * Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -6830,19 +6879,19 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `W` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `R` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values -| `B` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `X` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `W` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `R` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `B` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type | `sequence_lens` | tensor of 32-bit signless integer values or none type -| `initial_h` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `initial_h` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type -| `Y_h` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `Y` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type +| `Y_h` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or none type ### `onnx.RandomNormalLike` (ONNXRandomNormalLikeOp) @@ -6856,7 +6905,7 @@ The data type is specified by the 'dtype' argument, or copied from the input ten The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the TensorProto message, and be valid as an output type. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -6876,13 +6925,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `input` | tensor of 8-bit unsigned integer values or tensor of 16-bit unsigned integer values or tensor of 32-bit unsigned integer values or tensor of 64-bit unsigned integer values or tensor of 8-bit signless integer values or tensor of 16-bit signless integer values or tensor of 32-bit signless integer values or tensor of 64-bit signless integer values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of string type values or tensor of 1-bit signless integer values or tensor of complex type with 32-bit float elements values or tensor of complex type with 64-bit float elements values +| `input` | tensor of 8-bit unsigned integer values or tensor of 16-bit unsigned integer values or tensor of 32-bit unsigned integer values or tensor of 64-bit unsigned integer values or tensor of 8-bit signless integer values or tensor of 16-bit signless integer values or tensor of 32-bit signless integer values or tensor of 64-bit signless integer values or tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of string type values or tensor of 1-bit signless integer values or tensor of complex type with 32-bit float elements values or tensor of complex type with 64-bit float elements values #### Results: | Result | Description | | :----: | ----------- | -| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `output` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.RandomNormal` (ONNXRandomNormalOp) @@ -6896,7 +6945,7 @@ The data type is specified by the 'dtype' argument. The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the TensorProto message. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ResultTypeInferenceOpInterface`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -6917,7 +6966,7 @@ Effects: `MemoryEffects::Effect{}` | Result | Description | | :----: | ----------- | -| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `output` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.RandomUniformLike` (ONNXRandomUniformLikeOp) @@ -6931,7 +6980,7 @@ The data type is specified by the 'dtype' argument, or copied from the input ten The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the TensorProto message and be valid as an output type. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -6970,7 +7019,7 @@ The data type is specified by the 'dtype' argument. The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the TensorProto message. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<22>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -6991,7 +7040,7 @@ Effects: `MemoryEffects::Effect{}` | Result | Description | | :----: | ----------- | -| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `output` | tensor of bfloat16 type values or tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values ### `onnx.Range` (ONNXRangeOp) @@ -7028,7 +7077,7 @@ Inputs: start = 10, limit = 4, delta = -2 Output: [10, 8, 6] ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7056,7 +7105,7 @@ Reciprocal takes one input data (Tensor) and produces one output data (Tensor) where the reciprocal is, y = 1/x, is applied to the tensor elementwise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7087,7 +7136,7 @@ valid. Reduction over an empty set of values yields 0. The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7127,7 +7176,7 @@ valid. Reduction over an empty set of values yields 0. The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7166,7 +7215,7 @@ valid. Reduction over an empty set of values yields 0. The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7206,7 +7255,7 @@ valid. Reduction over an empty set of values yields 0. The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7245,7 +7294,7 @@ valid. Reduction over an empty set of values yields minus infinity (if supported The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7285,7 +7334,7 @@ valid. Reduction over an empty set of values yields minus infinity (if supported The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7324,7 +7373,7 @@ valid. Reduction over an empty set of values yields minus infinity (if supported The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7364,7 +7413,7 @@ valid. Reduction over an empty set of values yields minus infinity (if supported The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7405,7 +7454,7 @@ If the input data type is Boolean, the comparison should consider `False < True` The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<20>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7445,7 +7494,7 @@ valid. Reduction over an empty set of values yields minus infinity (if supported The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7484,7 +7533,7 @@ valid. Reduction over an empty set of values yields minus infinity (if supported The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7524,7 +7573,7 @@ valid. Reduction over an empty set of values yields undefined. The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7564,7 +7613,7 @@ valid. Reduction over an empty set of values yields undefined. The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7605,7 +7654,7 @@ If the input data type is Boolean, the comparison should consider `False < True` The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<20>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7645,7 +7694,7 @@ valid. Reduction over an empty set of values yields plus infinity (if supported The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7684,7 +7733,7 @@ valid. Reduction over an empty set of values yields plus infinity (if supported The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7724,7 +7773,7 @@ valid. Reduction over an empty set of values yields 1. The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7764,7 +7813,7 @@ valid. Reduction over an empty set of values yields 1. The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7803,7 +7852,7 @@ valid. Reduction over an empty set of values yields 0. The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7843,7 +7892,7 @@ valid. Reduction over an empty set of values yields 0. The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7883,7 +7932,7 @@ valid. Reduction over an empty set of values yields 0. The above behavior is similar to numpy, with the exception that numpy defaults `keepdims` to `False` instead of `True`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7920,7 +7969,7 @@ the resulted tensor have the reduced dimension pruned. The above behavior is similar to numpy, with the exception that numpy defaults keepdims to False instead of True. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7954,7 +8003,7 @@ Relu takes one input data (Tensor) and produces one output data (Tensor) where the rectified linear function, y = max(0, x), is applied to the tensor elementwise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<14>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -7990,7 +8039,7 @@ If the attribute 'allowzero' is set, it is invalid for the specified shape to contain both a zero value and -1, as the value of the dimension corresponding to -1 cannot be determined uniquely. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<19>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -8027,7 +8076,7 @@ output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) ``` if input \\"sizes\\" is not specified. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<19>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -8071,7 +8120,7 @@ Resize the input tensor. Each dimension value of the output tensor is: output_dimension = floor(input_dimension * scale). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<10>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -8105,7 +8154,7 @@ Resize the input tensor. In general, it calculates every value in the output ten Each dimension value of the output tensor is: output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) if input \\"sizes\\" is not specified. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -8146,7 +8195,7 @@ Resize the input tensor. In general, it calculates every value in the output ten Each dimension value of the output tensor is: output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) if input \\"sizes\\" is not specified. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -8188,7 +8237,7 @@ Each dimension value of the output tensor is:
`output_dimension = floor(input_dimension * (roi_end - roi_start) * scale)`
if input \\"sizes\\" is not specified. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -8295,7 +8344,7 @@ Example 2: [10.0, 9.0, 8.0, 11.0], [15.0, 14.0, 13.0, 12.0]] -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<10>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -8338,7 +8387,7 @@ map and from feature map into RoI feature; in each ROI bin, the value of the sampled locations are computed directly through bilinear interpolation. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<16>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -8389,7 +8438,7 @@ round([1.5]) = [2.0] round([-4.5]) = [-4.0] ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -8399,13 +8448,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of bfloat16 type values #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of bfloat16 type values ### `onnx.STFT` (ONNXSTFTOp) @@ -8413,7 +8462,7 @@ _ONNX STFT operation_ Computes the Short-time Fourier Transform of the signal. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<17>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -8447,7 +8496,7 @@ _ONNX SVMClassifier operation_ Support Vector Machine classifier -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -8489,7 +8538,7 @@ _ONNX SVMRegressor operation_ Support Vector Machine regression prediction and one-class SVM anomaly detection. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -8527,7 +8576,7 @@ _ONNX Scaler operation_ Rescale input data, for example to standardize features by removing the mean and scaling to unit variance. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -8679,7 +8728,7 @@ values are computed in the outer graph, they need to be passed in as extra state } -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<19>` Interfaces: `ConditionallySpeculatable`, `HasOnnxSubgraphOpInterface`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ResultTypeInferenceOpInterface`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -8775,7 +8824,7 @@ axis = 1 output = [[1.0, 1.1, 3.0, 2.1, 5.0]] ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -8883,7 +8932,7 @@ output = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]] ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -8968,7 +9017,7 @@ Example 2: output = [[1.0, 1.1, 3.0, 2.1, 5.0]] ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9004,7 +9053,7 @@ Selu takes one input data (Tensor) and produces one output data `y = gamma * (alpha * e^x - alpha) for x <= 0`, `y = gamma * x for x > 0`, is applied to the tensor elementwise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<6>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9022,13 +9071,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of bfloat16 type values #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of bfloat16 type values ### `onnx.SequenceAt` (ONNXSequenceAtOp) @@ -9038,7 +9087,7 @@ Outputs a tensor copy from the tensor at 'position' in 'input_sequence'. Accepted range for 'position' is in `[-n, n - 1]`, where `n` is the number of tensors in 'input_sequence'. Negative value means counting positions from the back. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9064,7 +9113,7 @@ _ONNX SequenceConstruct operation_ Construct a tensor sequence containing 'inputs' tensors. All tensors in 'inputs' must have the same data type. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9088,7 +9137,7 @@ _ONNX SequenceEmpty operation_ Construct an empty tensor sequence, with given data type. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9116,7 +9165,7 @@ Accepted range for 'position' is in `[-n, n - 1]`, where `n` is the number of te Negative value means counting positions from the back. 'position' is optional, by default it erases the last tensor from 'input_sequence'. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9145,7 +9194,7 @@ Accepted range for 'position' is in `[-n, n]`, where `n` is the number of tensor Negative value means counting positions from the back. 'position' is optional, by default it inserts 'tensor' to the back of 'input_sequence'. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9171,7 +9220,7 @@ _ONNX SequenceLength operation_ Produces a scalar(tensor of empty shape) containing the number of tensors in 'input_sequence'. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9208,7 +9257,7 @@ the input. This operator assumes that processing each sample is independent and could executed in parallel or in any order. Users cannot expect any specific ordering in which each subgraph is computed. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<17>` Interfaces: `ConditionallySpeculatable`, `HasOnnxSubgraphOpInterface`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9270,7 +9319,7 @@ end: 2 Output: [3] ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<19>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9365,7 +9414,7 @@ having same datatype and shape with input. It has two attributes, lambd and bias. The formula of this operator is: If x < -lambd, y = x + bias; If x > lambd, y = x - bias; Otherwise, y = 0. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<9>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9399,7 +9448,7 @@ Sigmoid takes one input data (Tensor) and produces one output data (Tensor) where the sigmoid function, y = 1 / (1 + exp(-x)), is applied to the tensor elementwise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9424,7 +9473,7 @@ _ONNX Sign operation_ Calculate the sign of the given input tensor element-wise. If input > 0, output 1. if input < 0, output -1. if input == 0, output 0. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9448,7 +9497,7 @@ _ONNX Sin operation_ Calculates the sine of the given input tensor, element-wise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<7>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9458,13 +9507,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `input` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `input` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of bfloat16 type values #### Results: | Result | Description | | :----: | ----------- | -| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `output` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of bfloat16 type values ### `onnx.Sinh` (ONNXSinhOp) @@ -9472,7 +9521,7 @@ _ONNX Sinh operation_ Calculates the hyperbolic sine of the given input tensor element-wise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<9>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9496,7 +9545,7 @@ _ONNX Size operation_ Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<19>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9581,7 +9630,7 @@ result = [ ] ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9648,7 +9697,7 @@ Finally, L is optionally reduced: * If reduction = 'mean', the output is scalar: ReduceMean(L), or if weight is provided: `ReduceSum(L) / ReduceSum(W)`, where tensor W is of shape `(N, D1, D2, ..., Dk)` and `W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]]`. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9689,7 +9738,7 @@ The \"axis\" attribute indicates the dimension along which Softmax will be performed. The output tensor has the same shape and contains the Softmax values of the corresponding input. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9733,7 +9782,7 @@ Each of these dimensions must be matched correctly, or else the operator will throw errors. The output tensor has the same shape and contains the softmax values of the corresponding input. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9766,7 +9815,7 @@ Softplus takes one input data (Tensor) and produces one output data (Tensor) where the softplus function, y = ln(exp(x) + 1), is applied to the tensor elementwise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9776,13 +9825,13 @@ Effects: `MemoryEffects::Effect{}` | Operand | Description | | :-----: | ----------- | -| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `X` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of bfloat16 type values #### Results: | Result | Description | | :----: | ----------- | -| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values +| `Y` | tensor of 16-bit float values or tensor of 32-bit float values or tensor of 64-bit float values or tensor of bfloat16 type values ### `onnx.Softsign` (ONNXSoftsignOp) @@ -9790,7 +9839,7 @@ _ONNX Softsign operation_ Calculates the softsign (x/(1+|x|)) of the given input tensor element-wise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9816,7 +9865,7 @@ SpaceToDepth rearranges blocks of spatial data into depth. More specifically, this op outputs a copy of the input tensor where values from the height and width dimensions are moved to the depth dimension. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9851,7 +9900,7 @@ If the attribute 'num_outputs' is specified, then the tensor is split into equal If the tensor is not evenly splittable into `num_outputs`, the last chunk will be smaller. If the input 'split' is specified, it indicates the sizes of each output in the split. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<18>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9895,7 +9944,7 @@ If 'split' is a 1-dimensional tensor, the input tensor is split into 'size(split with lengths of the parts on 'axis' specified in 'split'. In this scenario, the sum of entries in 'split' must be equal to the dimension size of input tensor on 'axis'. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9930,7 +9979,7 @@ Split a tensor into a list of tensors, along the specified 'axis'. Lengths of the parts can be specified using argument 'split'. Otherwise, the tensor is split to equal sized parts. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9964,7 +10013,7 @@ Split a tensor into a list of tensors, along the specified 'axis'. Lengths of the parts can be specified using input 'split'. Otherwise, the tensor is split to equal sized parts. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -9998,7 +10047,7 @@ Square root takes one input data (Tensor) and produces one output data (Tensor) where the square root is, y = x^0.5, is applied to the tensor elementwise. If x is negative, then it will return NaN. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10025,7 +10074,7 @@ Takes an input `axes` with a list of axes to squeeze. If `axes` is not provided, all the single dimensions will be removed from the shape. If an axis is selected with shape entry not equal to one, an error is raised. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10053,7 +10102,7 @@ Takes a parameter `axes` with a list of axes to squeeze. If `axes` is not provided, all the single dimensions will be removed from the shape. If an axis is selected with shape entry not equal to one, an error is raised. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10092,7 +10141,7 @@ This operator only accepts [C]- and [1, C]-tensor. If all elements in X are dropped, the output will be the empty value of string tensor with shape [1] if input shape is [C] and shape [1, 1] if input shape is [1, C]. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<10>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10130,7 +10179,7 @@ This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; fo (Opset 14 change): Extend supported types to include uint8, int8, uint16, and int16. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<14>`, `SameOperandsAndResultElementType` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10157,7 +10206,7 @@ Element-wise sum of each of the input tensors (with Numpy-style broadcasting sup All inputs and outputs must have the same data type. This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>`, `SameOperandsAndResultElementType` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10181,7 +10230,7 @@ _ONNX Tan operation_ Calculates the tangent of the given input tensor, element-wise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<7>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10205,7 +10254,7 @@ _ONNX Tanh operation_ Calculates the hyperbolic tangent of the given input tensor element-wise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10255,7 +10304,7 @@ this operator first computes the counts of all n-grams and then scale them by th Only one of pool_strings and pool_int64s can be set. If pool_int64s is set, the input should be an integer tensor. If pool_strings is set, the input must be a string tensor. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<9>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10296,7 +10345,7 @@ ThresholdedRelu takes one input data (Tensor) and produces one output data (Tensor) where the rectified linear function, y = x for x > alpha, y = 0 otherwise, is applied to the tensor elementwise. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<10>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10329,7 +10378,7 @@ Constructs a tensor by tiling a given tensor. This is the same as function `tile` in Numpy, but no broadcast. For example A = [[1, 2], [3, 4]], B = [1, 2], tile(A, B) = [[1, 2, 1, 2], [3, 4, 3, 4]] -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10368,7 +10417,7 @@ shape [a_0, a_1, ..., a_{n-1\}\] and integer argument k, return two outputs: Given two equivalent values, this operator uses the indices along the axis as a tiebreaker. That is, the element with the lower index will appear first. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10405,7 +10454,7 @@ Transpose the input tensor similar to numpy.transpose. For example, when perm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output shape will be (2, 1, 3). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10444,7 +10493,7 @@ Tree Ensemble classifier. Returns the top class for each of N inputs.
One and only one of classlabels_strings or classlabels_int64s will be defined. The class_ids are indices into this list. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10501,7 +10550,7 @@ Tree Ensemble regressor. Returns the regressed values for each input in N.
All trees must have their node ids start at 0 and increment by 1.
Mode enum is BRANCH_LEQ, BRANCH_LT, BRANCH_GTE, BRANCH_GT, BRANCH_EQ, BRANCH_NEQ, LEAF -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10559,7 +10608,7 @@ A negative k value retains the main diagonal and |k| diagonals below it. If upper is set to false, a positive k retains the lower triangular matrix including the main diagonal and k diagonals above it. A negative k value excludes the main diagonal and (|k|-1) diagonals below it. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<14>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10687,7 +10736,7 @@ output_counts: [2, 1, 1] ``` -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10731,7 +10780,7 @@ The rank of the output tensor (`output_rank`) is the rank of the input tensor (` Each value in `axes` should be within the (inclusive) range [-output_rank , output_rank - 1]. The order of values in `axes` does not matter and can come in any order. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<13>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10767,7 +10816,7 @@ Each value in `axes` should be within the (inclusive) range [-output_rank , outp The order of values in `axes` does not matter and can come in any order. -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<11>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10800,7 +10849,7 @@ Upsample the input tensor. Each dimension value of the output tensor is: output_dimension = floor(input_dimension * scale). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<9>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10834,7 +10883,7 @@ Upsample the input tensor. Each dimension value of the output tensor is: output_dimension = floor(input_dimension * scale). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<7>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10871,7 +10920,7 @@ with three parameters. This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<16>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10900,7 +10949,7 @@ elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting supp This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). -Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<7>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` @@ -10957,7 +11006,7 @@ Creates a map from the input and the attributes.
Must provide keys in either classlabels_strings or classlabels_int64s (but not both).
The columns of the tensor correspond one-by-one to the keys specified by the attributes. There must be as many columns as keys.
-Traits: `AlwaysSpeculatableImplTrait` +Traits: `AlwaysSpeculatableImplTrait`, `OpVersionTrait<1>` Interfaces: `ConditionallySpeculatable`, `NoMemoryEffect (MemoryEffectOpInterface)`, `ShapeHelperOpInterface`, `ShapeInferenceOpInterface` diff --git a/src/Builder/OpBuildTable.inc b/src/Builder/OpBuildTable.inc index 0a63f65ff7..2a779228da 100644 --- a/src/Builder/OpBuildTable.inc +++ b/src/Builder/OpBuildTable.inc @@ -5,8 +5,8 @@ //******************************************************** op_dialect_version_map_["Abs"] = {13}; -op_dialect_version_map_["Acos"] = {7}; -op_dialect_version_map_["Acosh"] = {9}; +op_dialect_version_map_["Acos"] = {22}; +op_dialect_version_map_["Acosh"] = {22}; op_dialect_version_map_["Adagrad"] = {1}; op_dialect_version_map_["Adam"] = {1}; op_dialect_version_map_["Add"] = {14}; @@ -14,13 +14,13 @@ op_dialect_version_map_["And"] = {7}; op_dialect_version_map_["ArgMax"] = {13}; op_dialect_version_map_["ArgMin"] = {13}; op_dialect_version_map_["ArrayFeatureExtractor"] = {1}; -op_dialect_version_map_["Asin"] = {7}; -op_dialect_version_map_["Asinh"] = {9}; -op_dialect_version_map_["Atan"] = {7}; -op_dialect_version_map_["Atanh"] = {9}; -op_dialect_version_map_["AveragePool"] = {19}; +op_dialect_version_map_["Asin"] = {22}; +op_dialect_version_map_["Asinh"] = {22}; +op_dialect_version_map_["Atan"] = {22}; +op_dialect_version_map_["Atanh"] = {22}; +op_dialect_version_map_["AveragePool"] = {22}; op_dialect_version_map_["BatchNormalization"] = {15, 9}; -op_dialect_version_map_["Bernoulli"] = {15}; +op_dialect_version_map_["Bernoulli"] = {22}; op_dialect_version_map_["Binarizer"] = {1}; op_dialect_version_map_["BitShift"] = {11}; op_dialect_version_map_["BitwiseAnd"] = {18}; @@ -41,41 +41,41 @@ op_dialect_version_map_["Concat"] = {13}; op_dialect_version_map_["ConcatFromSequence"] = {11}; op_dialect_version_map_["Constant"] = {19}; op_dialect_version_map_["ConstantOfShape"] = {20}; -op_dialect_version_map_["Conv"] = {11}; +op_dialect_version_map_["Conv"] = {22}; op_dialect_version_map_["ConvInteger"] = {10}; -op_dialect_version_map_["ConvTranspose"] = {11}; -op_dialect_version_map_["Cos"] = {7}; -op_dialect_version_map_["Cosh"] = {9}; +op_dialect_version_map_["ConvTranspose"] = {22}; +op_dialect_version_map_["Cos"] = {22}; +op_dialect_version_map_["Cosh"] = {22}; op_dialect_version_map_["Col2Im"] = {18}; op_dialect_version_map_["CumSum"] = {14}; -op_dialect_version_map_["DeformConv"] = {19}; +op_dialect_version_map_["DeformConv"] = {22}; op_dialect_version_map_["DepthToSpace"] = {13}; op_dialect_version_map_["DequantizeLinear"] = {19}; -op_dialect_version_map_["Det"] = {11}; +op_dialect_version_map_["Det"] = {22}; op_dialect_version_map_["DFT"] = {20, 17}; op_dialect_version_map_["DictVectorizer"] = {1}; op_dialect_version_map_["Div"] = {14}; -op_dialect_version_map_["Dropout"] = {13}; +op_dialect_version_map_["Dropout"] = {22}; op_dialect_version_map_["DynamicQuantizeLinear"] = {11}; op_dialect_version_map_["Einsum"] = {12}; -op_dialect_version_map_["Elu"] = {6}; +op_dialect_version_map_["Elu"] = {22}; op_dialect_version_map_["Equal"] = {19}; op_dialect_version_map_["Erf"] = {13}; op_dialect_version_map_["Exp"] = {13}; op_dialect_version_map_["Expand"] = {13}; -op_dialect_version_map_["EyeLike"] = {9}; +op_dialect_version_map_["EyeLike"] = {22}; op_dialect_version_map_["FeatureVectorizer"] = {1}; op_dialect_version_map_["Flatten"] = {13}; op_dialect_version_map_["Floor"] = {13}; -op_dialect_version_map_["GRU"] = {14}; +op_dialect_version_map_["GRU"] = {22}; op_dialect_version_map_["Gather"] = {13}; op_dialect_version_map_["GatherElements"] = {13}; op_dialect_version_map_["GatherND"] = {13}; op_dialect_version_map_["Gelu"] = {20}; op_dialect_version_map_["Gemm"] = {13}; -op_dialect_version_map_["GlobalAveragePool"] = {1}; +op_dialect_version_map_["GlobalAveragePool"] = {22}; op_dialect_version_map_["GlobalLpPool"] = {2}; -op_dialect_version_map_["GlobalMaxPool"] = {1}; +op_dialect_version_map_["GlobalMaxPool"] = {22}; op_dialect_version_map_["Gradient"] = {1}; op_dialect_version_map_["Greater"] = {13}; op_dialect_version_map_["GreaterOrEqual"] = {16}; @@ -83,18 +83,18 @@ op_dialect_version_map_["GridSample"] = {16}; op_dialect_version_map_["GroupNormalization"] = {21, 18}; op_dialect_version_map_["HammingWindow"] = {17}; op_dialect_version_map_["HannWindow"] = {17}; -op_dialect_version_map_["HardSigmoid"] = {6}; +op_dialect_version_map_["HardSigmoid"] = {22}; op_dialect_version_map_["Hardmax"] = {13}; -op_dialect_version_map_["HardSwish"] = {14}; +op_dialect_version_map_["HardSwish"] = {22}; op_dialect_version_map_["Identity"] = {19}; op_dialect_version_map_["If"] = {19}; op_dialect_version_map_["Imputer"] = {1}; -op_dialect_version_map_["InstanceNormalization"] = {6}; +op_dialect_version_map_["InstanceNormalization"] = {22}; op_dialect_version_map_["IsInf"] = {20}; op_dialect_version_map_["IsNaN"] = {20}; op_dialect_version_map_["LayerNormalization"] = {17}; op_dialect_version_map_["LRN"] = {13}; -op_dialect_version_map_["LSTM"] = {14}; +op_dialect_version_map_["LSTM"] = {22}; op_dialect_version_map_["LabelEncoder"] = {2}; op_dialect_version_map_["LeakyRelu"] = {16}; op_dialect_version_map_["Less"] = {13}; @@ -104,25 +104,25 @@ op_dialect_version_map_["LinearRegressor"] = {1}; op_dialect_version_map_["Log"] = {13}; op_dialect_version_map_["LogSoftmax"] = {13}; op_dialect_version_map_["Loop"] = {19}; -op_dialect_version_map_["LpNormalization"] = {1}; -op_dialect_version_map_["LpPool"] = {18}; +op_dialect_version_map_["LpNormalization"] = {22}; +op_dialect_version_map_["LpPool"] = {22}; op_dialect_version_map_["MatMul"] = {13}; op_dialect_version_map_["MatMulInteger"] = {10}; op_dialect_version_map_["Max"] = {13}; -op_dialect_version_map_["MaxPool"] = {12}; -op_dialect_version_map_["MaxRoiPool"] = {1}; -op_dialect_version_map_["MaxUnpool"] = {11}; +op_dialect_version_map_["MaxPool"] = {22}; +op_dialect_version_map_["MaxRoiPool"] = {22}; +op_dialect_version_map_["MaxUnpool"] = {22}; op_dialect_version_map_["Mean"] = {13}; op_dialect_version_map_["MeanVarianceNormalization"] = {13}; op_dialect_version_map_["MelWeightMatrix"] = {17}; op_dialect_version_map_["Min"] = {13}; -op_dialect_version_map_["Mish"] = {18}; +op_dialect_version_map_["Mish"] = {22}; op_dialect_version_map_["Mod"] = {13}; op_dialect_version_map_["Momentum"] = {1}; op_dialect_version_map_["Mul"] = {14}; -op_dialect_version_map_["Multinomial"] = {7}; +op_dialect_version_map_["Multinomial"] = {22}; op_dialect_version_map_["Neg"] = {13}; -op_dialect_version_map_["NegativeLogLikelihoodLoss"] = {13}; +op_dialect_version_map_["NegativeLogLikelihoodLoss"] = {22}; op_dialect_version_map_["NonMaxSuppression"] = {11}; op_dialect_version_map_["NonZero"] = {13}; op_dialect_version_map_["Normalizer"] = {1}; @@ -139,11 +139,11 @@ op_dialect_version_map_["Pow"] = {15}; op_dialect_version_map_["QLinearConv"] = {10}; op_dialect_version_map_["QLinearMatMul"] = {10}; op_dialect_version_map_["QuantizeLinear"] = {19}; -op_dialect_version_map_["RNN"] = {14}; -op_dialect_version_map_["RandomNormal"] = {1}; -op_dialect_version_map_["RandomNormalLike"] = {1}; -op_dialect_version_map_["RandomUniform"] = {1}; -op_dialect_version_map_["RandomUniformLike"] = {1}; +op_dialect_version_map_["RNN"] = {22}; +op_dialect_version_map_["RandomNormal"] = {22}; +op_dialect_version_map_["RandomNormalLike"] = {22}; +op_dialect_version_map_["RandomUniform"] = {22}; +op_dialect_version_map_["RandomUniformLike"] = {22}; op_dialect_version_map_["Range"] = {11}; op_dialect_version_map_["Reciprocal"] = {13}; op_dialect_version_map_["ReduceL1"] = {18, 13}; @@ -160,8 +160,8 @@ op_dialect_version_map_["Relu"] = {14}; op_dialect_version_map_["Reshape"] = {19}; op_dialect_version_map_["Resize"] = {19, 18, 13, 11, 10}; op_dialect_version_map_["ReverseSequence"] = {10}; -op_dialect_version_map_["RoiAlign"] = {16}; -op_dialect_version_map_["Round"] = {11}; +op_dialect_version_map_["RoiAlign"] = {22}; +op_dialect_version_map_["Round"] = {22}; op_dialect_version_map_["SVMClassifier"] = {1}; op_dialect_version_map_["SVMRegressor"] = {1}; op_dialect_version_map_["Scaler"] = {1}; @@ -169,7 +169,7 @@ op_dialect_version_map_["Scan"] = {19}; op_dialect_version_map_["Scatter"] = {11}; op_dialect_version_map_["ScatterElements"] = {18}; op_dialect_version_map_["ScatterND"] = {18}; -op_dialect_version_map_["Selu"] = {6}; +op_dialect_version_map_["Selu"] = {22}; op_dialect_version_map_["SequenceAt"] = {11}; op_dialect_version_map_["SequenceConstruct"] = {11}; op_dialect_version_map_["SequenceEmpty"] = {11}; @@ -181,14 +181,14 @@ op_dialect_version_map_["Shape"] = {19}; op_dialect_version_map_["Shrink"] = {9}; op_dialect_version_map_["Sigmoid"] = {13}; op_dialect_version_map_["Sign"] = {13}; -op_dialect_version_map_["Sin"] = {7}; -op_dialect_version_map_["Sinh"] = {9}; +op_dialect_version_map_["Sin"] = {22}; +op_dialect_version_map_["Sinh"] = {22}; op_dialect_version_map_["Size"] = {19}; op_dialect_version_map_["Slice"] = {13}; op_dialect_version_map_["Softmax"] = {13, 11}; op_dialect_version_map_["SoftmaxCrossEntropyLoss"] = {13}; -op_dialect_version_map_["Softplus"] = {1}; -op_dialect_version_map_["Softsign"] = {1}; +op_dialect_version_map_["Softplus"] = {22}; +op_dialect_version_map_["Softsign"] = {22}; op_dialect_version_map_["SpaceToDepth"] = {13}; op_dialect_version_map_["Split"] = {18, 13, 11}; op_dialect_version_map_["SplitToSequence"] = {11}; @@ -198,10 +198,10 @@ op_dialect_version_map_["StringNormalizer"] = {10}; op_dialect_version_map_["STFT"] = {17}; op_dialect_version_map_["Sub"] = {14}; op_dialect_version_map_["Sum"] = {13}; -op_dialect_version_map_["Tan"] = {7}; +op_dialect_version_map_["Tan"] = {22}; op_dialect_version_map_["Tanh"] = {13}; op_dialect_version_map_["TfIdfVectorizer"] = {9}; -op_dialect_version_map_["ThresholdedRelu"] = {10}; +op_dialect_version_map_["ThresholdedRelu"] = {22}; op_dialect_version_map_["Tile"] = {13}; op_dialect_version_map_["TopK"] = {11}; op_dialect_version_map_["Transpose"] = {13}; diff --git a/src/Dialect/ONNX/ONNXOps.td.inc b/src/Dialect/ONNX/ONNXOps.td.inc index 0516cd5f3e..a12da38c75 100644 --- a/src/Dialect/ONNX/ONNXOps.td.inc +++ b/src/Dialect/ONNX/ONNXOps.td.inc @@ -46,13 +46,13 @@ def ONNXAbsOp:ONNX_Op<"Abs", } def ONNXAcosOp:ONNX_Op<"Acos", - [Pure, OpVersionTrait<7>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Acos operation"; let description = [{ Calculates the arccosine (inverse of cosine) of the given input tensor, element-wise. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -75,13 +75,13 @@ def ONNXAcosOp:ONNX_Op<"Acos", } def ONNXAcoshOp:ONNX_Op<"Acosh", - [Pure, OpVersionTrait<9>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Acosh operation"; let description = [{ Calculates the hyperbolic arccosine of the given input tensor element-wise. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -293,13 +293,13 @@ def ONNXArgMinOp:ONNX_Op<"ArgMin", } def ONNXAsinOp:ONNX_Op<"Asin", - [Pure, OpVersionTrait<7>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Asin operation"; let description = [{ Calculates the arcsine (inverse of sine) of the given input tensor, element-wise. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -322,13 +322,13 @@ def ONNXAsinOp:ONNX_Op<"Asin", } def ONNXAsinhOp:ONNX_Op<"Asinh", - [Pure, OpVersionTrait<9>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Asinh operation"; let description = [{ Calculates the hyperbolic arcsine of the given input tensor element-wise. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -351,13 +351,13 @@ def ONNXAsinhOp:ONNX_Op<"Asinh", } def ONNXAtanOp:ONNX_Op<"Atan", - [Pure, OpVersionTrait<7>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Atan operation"; let description = [{ Calculates the arctangent (inverse of tangent) of the given input tensor, element-wise. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -380,13 +380,13 @@ def ONNXAtanOp:ONNX_Op<"Atan", } def ONNXAtanhOp:ONNX_Op<"Atanh", - [Pure, OpVersionTrait<9>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Atanh operation"; let description = [{ Calculates the hyperbolic arctangent of the given input tensor element-wise. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -409,7 +409,7 @@ def ONNXAtanhOp:ONNX_Op<"Atanh", } def ONNXAveragePoolOp:ONNX_Op<"AveragePool", - [Pure, OpVersionTrait<19>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX AveragePool operation"; let description = [{ AveragePool consumes an input tensor X and applies average pooling across @@ -426,7 +426,7 @@ def ONNXAveragePoolOp:ONNX_Op<"AveragePool", ``` output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1) ``` - if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. + if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. Sliding windows that would start in the right padded region are ignored. `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following when ceil_mode is enabled: ``` @@ -445,8 +445,7 @@ def ONNXAveragePoolOp:ONNX_Op<"AveragePool", The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero). }]; - // FIXME(FXML-4136): Remove manual modification of BF16 type support and update upstream operation definition. - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$X, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, DefaultValuedStrAttr:$auto_pad, DefaultValuedAttr:$ceil_mode, DefaultValuedAttr:$count_include_pad, @@ -454,7 +453,7 @@ def ONNXAveragePoolOp:ONNX_Op<"AveragePool", I64ArrayAttr:$kernel_shape, OptionalAttr:$pads, OptionalAttr:$strides); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$Y); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -602,7 +601,7 @@ def ONNXBatchNormalizationV9Op:ONNX_Op<"BatchNormalizationV9", } def ONNXBernoulliOp:ONNX_Op<"Bernoulli", - [Pure, OpVersionTrait<15>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Bernoulli operation"; let description = [{ Draws binary random numbers (0 or 1) from a Bernoulli distribution. The input tensor should be a tensor @@ -612,10 +611,10 @@ def ONNXBernoulliOp:ONNX_Op<"Bernoulli", This operator is non-deterministic and may not produce the same values in different implementations (even if a seed is specified). }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input, OptionalAttr:$dtype, OptionalAttr:$seed); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>, TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[I1]>]>:$output); + let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I1]>]>:$output); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -1444,23 +1443,22 @@ def ONNXConstantOfShapeOp:ONNX_Op<"ConstantOfShape", } def ONNXConvOp:ONNX_Op<"Conv", - [Pure, OpVersionTrait<11>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Conv operation"; let description = [{ The convolution operator consumes an input tensor and a filter, and computes the output. }]; - // FIXME(FXML-4136): Remove manual modification of BF16 type support and update upstream operation definition. - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$X, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$W, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>, NoneType]>:$B, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$W, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$B, DefaultValuedStrAttr:$auto_pad, OptionalAttr:$dilations, DefaultValuedAttr:$group, OptionalAttr:$kernel_shape, OptionalAttr:$pads, OptionalAttr:$strides); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$Y); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let builders = [ OpBuilder<(ins "Value":$X, "Value":$W, "Value":$B, "StringAttr":$auto_pad, "ArrayAttr":$dilations, "IntegerAttr":$group, "ArrayAttr":$kernel_shape, "ArrayAttr":$pads, "ArrayAttr":$strides), [{ auto resultType = UnrankedTensorType::get(mlir::cast(X.getType()).getElementType()); @@ -1533,7 +1531,7 @@ def ONNXConvIntegerOp:ONNX_Op<"ConvInteger", } def ONNXConvTransposeOp:ONNX_Op<"ConvTranspose", - [Pure, OpVersionTrait<11>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ConvTranspose operation"; let description = [{ The convolution transpose operator consumes an input tensor and a filter, @@ -1551,9 +1549,9 @@ def ONNXConvTransposeOp:ONNX_Op<"ConvTranspose", }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$W, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$B, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$W, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$B, DefaultValuedStrAttr:$auto_pad, OptionalAttr:$dilations, DefaultValuedAttr:$group, @@ -1562,7 +1560,7 @@ def ONNXConvTransposeOp:ONNX_Op<"ConvTranspose", OptionalAttr:$output_shape, OptionalAttr:$pads, OptionalAttr:$strides); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 3; @@ -1586,13 +1584,13 @@ def ONNXConvTransposeOp:ONNX_Op<"ConvTranspose", } def ONNXCosOp:ONNX_Op<"Cos", - [Pure, OpVersionTrait<7>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Cos operation"; let description = [{ Calculates the cosine of the given input tensor, element-wise. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -1615,13 +1613,13 @@ def ONNXCosOp:ONNX_Op<"Cos", } def ONNXCoshOp:ONNX_Op<"Cosh", - [Pure, OpVersionTrait<9>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Cosh operation"; let description = [{ Calculates the hyperbolic cosine of the given input tensor element-wise. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -1777,24 +1775,24 @@ def ONNXDFTV17Op:ONNX_Op<"DFTV17", } def ONNXDeformConvOp:ONNX_Op<"DeformConv", - [Pure, OpVersionTrait<19>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX DeformConv operation"; let description = [{ Performs deformable convolution as described in https://arxiv.org/abs/1703.06211 and https://arxiv.org/abs/1811.11168. This operator specification supports the general N-D case. Note that most common use cases have 2D or 3D data. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$W, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$offset, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$B, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$mask, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$W, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$offset, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$B, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$mask, OptionalAttr:$dilations, DefaultValuedAttr:$group, OptionalAttr:$kernel_shape, DefaultValuedAttr:$offset_group, OptionalAttr:$pads, OptionalAttr:$strides); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 5; @@ -1912,7 +1910,7 @@ def ONNXDequantizeLinearOp:ONNX_Op<"DequantizeLinear", } def ONNXDetOp:ONNX_Op<"Det", - [Pure, OpVersionTrait<11>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Det operation"; let description = [{ Det calculates determinant of a square matrix or batches of square matrices. @@ -1921,8 +1919,8 @@ def ONNXDetOp:ONNX_Op<"Det", The output is a tensor of shape `[*]`, containing the determinants of all input submatrices. e.g., When the input is 2-D, the output is a scalar(shape is empty: `[]`). }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -2001,7 +1999,7 @@ def ONNXDivOp:ONNX_Op<"Div", } def ONNXDropoutOp:ONNX_Op<"Dropout", - [Pure, OpVersionTrait<13>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let hasCanonicalizer = 1; let summary = "ONNX Dropout operation"; let description = [{ @@ -2018,11 +2016,11 @@ def ONNXDropoutOp:ONNX_Op<"Dropout", ``` This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$data, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$ratio, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[F8E4M3FN]>, TensorOf<[F8E4M3FNUZ]>, TensorOf<[F8E5M2]>, TensorOf<[F8E5M2FNUZ]>]>:$data, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[F8E4M3FN]>, TensorOf<[F8E4M3FNUZ]>, TensorOf<[F8E5M2]>, TensorOf<[F8E5M2FNUZ]>, NoneType]>:$ratio, AnyTypeOf<[TensorOf<[I1]>, NoneType]>:$training_mode, OptionalAttr:$seed); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$output, + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[F8E4M3FN]>, TensorOf<[F8E4M3FNUZ]>, TensorOf<[F8E5M2]>, TensorOf<[F8E5M2FNUZ]>]>:$output, AnyTypeOf<[TensorOf<[I1]>, NoneType]>:$mask); let extraClassDeclaration = [{ static int getNumberOfOperands() { @@ -2158,7 +2156,7 @@ def ONNXEinsumOp:ONNX_Op<"Einsum", } def ONNXEluOp:ONNX_Op<"Elu", - [Pure, OpVersionTrait<6>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Elu operation"; let description = [{ Elu takes one input data (Tensor) and produces one output data @@ -2166,10 +2164,9 @@ def ONNXEluOp:ONNX_Op<"Elu", 0`, `f(x) = x for x >= 0`., is applied to the tensor elementwise. }]; - // FIXME(FXML-4138): Remove manual modification of BF16 support when the operation definition is updated upstream - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$X, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, DefaultValuedAttr:$alpha); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$Y); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -2355,7 +2352,7 @@ def ONNXExpandOp:ONNX_Op<"Expand", } def ONNXEyeLikeOp:ONNX_Op<"EyeLike", - [Pure, OpVersionTrait<9>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX EyeLike operation"; let description = [{ Generate a 2D tensor (matrix) with ones on the diagonal and zeros everywhere else. Only 2D @@ -2366,10 +2363,10 @@ def ONNXEyeLikeOp:ONNX_Op<"EyeLike", The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the TensorProto message and be valid as an output type. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I1]>]>:$input, + let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I1]>]>:$input, OptionalAttr:$dtype, DefaultValuedAttr:$k); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I1]>]>:$output); + let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I1]>]>:$output); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -2458,7 +2455,7 @@ def ONNXFloorOp:ONNX_Op<"Floor", } def ONNXGRUOp:ONNX_Op<"GRU", - [Pure, OpVersionTrait<14>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let hasCanonicalizer = 1; let summary = "ONNX GRU operation"; let description = [{ @@ -2510,12 +2507,12 @@ def ONNXGRUOp:ONNX_Op<"GRU", * Ht = (1 - zt) (.) ht + zt (.) Ht-1 This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$W, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$R, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$B, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$W, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$R, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$B, AnyTypeOf<[TensorOf<[I32]>, NoneType]>:$sequence_lens, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$initial_h, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$initial_h, OptionalAttr:$activation_alpha, OptionalAttr:$activation_beta, OptionalAttr:$activations, @@ -2524,8 +2521,8 @@ def ONNXGRUOp:ONNX_Op<"GRU", OptionalAttr:$hidden_size, DefaultValuedAttr:$layout, DefaultValuedAttr:$linear_before_reset); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y_h); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y_h); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 6; @@ -2907,7 +2904,7 @@ def ONNXGemmOp:ONNX_Op<"Gemm", } def ONNXGlobalAveragePoolOp:ONNX_Op<"GlobalAveragePool", - [Pure, OpVersionTrait<1>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let hasCanonicalizer = 1; let summary = "ONNX GlobalAveragePool operation"; let description = [{ @@ -2915,9 +2912,8 @@ def ONNXGlobalAveragePoolOp:ONNX_Op<"GlobalAveragePool", the values in the same channel. This is equivalent to AveragePool with kernel size equal to the spatial dimension of input tensor. }]; - // FIXME(FXML-4136): Remove manual modification of BF16 type support and update upstream operation definition. - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$X); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$Y); + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -2972,7 +2968,7 @@ def ONNXGlobalLpPoolOp:ONNX_Op<"GlobalLpPool", } def ONNXGlobalMaxPoolOp:ONNX_Op<"GlobalMaxPool", - [Pure, OpVersionTrait<1>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let hasCanonicalizer = 1; let summary = "ONNX GlobalMaxPool operation"; let description = [{ @@ -2980,8 +2976,8 @@ def ONNXGlobalMaxPoolOp:ONNX_Op<"GlobalMaxPool", the values in the same channel. This is equivalent to MaxPool with kernel size equal to the spatial dimension of input tensor. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -3163,7 +3159,7 @@ def ONNXGridSampleOp:ONNX_Op<"GridSample", } def ONNXGroupNormalizationOp:ONNX_Op<"GroupNormalization", - [Pure, OpVersionTrait<18>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<21>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX GroupNormalization operation"; let description = [{ A GroupNormalization function. Carries out group normalization as described in @@ -3219,7 +3215,7 @@ def ONNXGroupNormalizationOp:ONNX_Op<"GroupNormalization", } def ONNXGroupNormalizationV18Op:ONNX_Op<"GroupNormalizationV18", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<18>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX GroupNormalization operation"; let description = [{ A GroupNormalization function. Carries out group normalization as described in @@ -3329,18 +3325,17 @@ def ONNXHannWindowOp:ONNX_Op<"HannWindow", } def ONNXHardSigmoidOp:ONNX_Op<"HardSigmoid", - [Pure, OpVersionTrait<6>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX HardSigmoid operation"; let description = [{ HardSigmoid takes one input data (Tensor) and produces one output data (Tensor) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta)), is applied to the tensor elementwise. }]; - // FIXME(FXML-4138): Remove manual modification of BF16 support when the operation definition is updated upstream - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$X, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, DefaultValuedAttr:$alpha, DefaultValuedAttr:$beta); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$Y); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -3363,15 +3358,15 @@ def ONNXHardSigmoidOp:ONNX_Op<"HardSigmoid", } def ONNXHardSwishOp:ONNX_Op<"HardSwish", - [Pure, OpVersionTrait<14>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX HardSwish operation"; let description = [{ HardSwish takes one input data (Tensor) and produces one output data (Tensor) where the HardSwish function, y = x * max(0, min(1, alpha * x + beta)) = x * HardSigmoid(x), where alpha = 1/6 and beta = 0.5, is applied to the tensor elementwise. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -3508,7 +3503,7 @@ def ONNXIfOp:ONNX_Op<"If", } def ONNXInstanceNormalizationOp:ONNX_Op<"InstanceNormalization", - [Pure, OpVersionTrait<6>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX InstanceNormalization operation"; let description = [{ Carries out instance normalization as described in the paper @@ -3518,11 +3513,11 @@ def ONNXInstanceNormalizationOp:ONNX_Op<"InstanceNormalization", where mean and variance are computed per instance per channel. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$scale, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$B, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$scale, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$B, DefaultValuedAttr:$epsilon); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 3; @@ -3649,7 +3644,7 @@ def ONNXLRNOp:ONNX_Op<"LRN", } def ONNXLSTMOp:ONNX_Op<"LSTM", - [Pure, OpVersionTrait<14>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let hasCanonicalizer = 1; let summary = "ONNX LSTM operation"; let description = [{ @@ -3704,10 +3699,10 @@ def ONNXLSTMOp:ONNX_Op<"LSTM", * Ht = ot (.) h(Ct) This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$W, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$R, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$B, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$W, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$R, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$B, AnyTypeOf<[TensorOf<[I32]>, NoneType]>:$sequence_lens, AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$initial_h, AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$initial_c, @@ -3720,9 +3715,9 @@ def ONNXLSTMOp:ONNX_Op<"LSTM", OptionalAttr:$hidden_size, DefaultValuedAttr:$input_forget, DefaultValuedAttr:$layout); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y_h, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y_c); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y_h, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y_c); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 8; @@ -4207,15 +4202,15 @@ def ONNXLoopOp:ONNX_Op<"Loop", } def ONNXLpNormalizationOp:ONNX_Op<"LpNormalization", - [Pure, OpVersionTrait<1>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX LpNormalization operation"; let description = [{ Given a matrix, apply Lp-normalization along the provided axis. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input, DefaultValuedAttr:$axis, DefaultValuedAttr:$p); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -4238,7 +4233,7 @@ def ONNXLpNormalizationOp:ONNX_Op<"LpNormalization", } def ONNXLpPoolOp:ONNX_Op<"LpPool", - [Pure, OpVersionTrait<18>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX LpPool operation"; let description = [{ LpPool consumes an input tensor X and applies Lp pooling across @@ -4265,7 +4260,7 @@ def ONNXLpPoolOp:ONNX_Op<"LpPool", pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + {kernelSpatialShape} - input_spatial_shape[i] ``` }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, DefaultValuedStrAttr:$auto_pad, DefaultValuedAttr:$ceil_mode, OptionalAttr:$dilations, @@ -4273,7 +4268,7 @@ def ONNXLpPoolOp:ONNX_Op<"LpPool", DefaultValuedAttr:$p, OptionalAttr:$pads, OptionalAttr:$strides); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -4392,7 +4387,7 @@ def ONNXMaxOp:ONNX_Op<"Max", } def ONNXMaxPoolOp:ONNX_Op<"MaxPool", - [Pure, OpVersionTrait<12>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX MaxPool operation"; let description = [{ MaxPool consumes an input tensor X and applies max pooling across @@ -4409,7 +4404,7 @@ def ONNXMaxPoolOp:ONNX_Op<"MaxPool", ``` output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - dilation[i] * (kernel_shape[i] - 1) - 1) / strides_spatial_shape[i] + 1) ``` - if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. + if ceil_mode is enabled. `pad_shape[i]` is the sum of pads along axis `i`. Sliding windows that would start in the right padded region are ignored. `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following when ceil_mode is enabled: ``` @@ -4428,8 +4423,7 @@ def ONNXMaxPoolOp:ONNX_Op<"MaxPool", The output of each pooling window is maximum number of elements exclude pad. }]; - // FIXME(FXML-4136): Remove manual modification of BF16 type support and update upstream operation definition. - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I8]>, TensorOf<[UI8]>, TensorOf<[BF16]>]>:$X, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I8]>, TensorOf<[UI8]>]>:$X, DefaultValuedStrAttr:$auto_pad, DefaultValuedAttr:$ceil_mode, OptionalAttr:$dilations, @@ -4437,7 +4431,7 @@ def ONNXMaxPoolOp:ONNX_Op<"MaxPool", OptionalAttr:$pads, DefaultValuedAttr:$storage_order, OptionalAttr:$strides); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I8]>, TensorOf<[UI8]>, TensorOf<[BF16]>]>:$Y, + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I8]>, TensorOf<[UI8]>]>:$Y, AnyTypeOf<[TensorOf<[I64]>, NoneType]>:$Indices); let extraClassDeclaration = [{ static int getNumberOfOperands() { @@ -4461,18 +4455,18 @@ def ONNXMaxPoolOp:ONNX_Op<"MaxPool", } def ONNXMaxRoiPoolOp:ONNX_Op<"MaxRoiPool", - [Pure, OpVersionTrait<1>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX MaxRoiPool operation"; let description = [{ ROI max pool consumes an input tensor X and region of interests (RoIs) to apply max pooling across each RoI, to produce output 4-D tensor of shape (num_rois, channels, pooled_shape[0], pooled_shape[1]). }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$rois, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$rois, I64ArrayAttr:$pooled_shape, DefaultValuedAttr:$spatial_scale); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 2; @@ -4495,7 +4489,7 @@ def ONNXMaxRoiPoolOp:ONNX_Op<"MaxRoiPool", } def ONNXMaxUnpoolOp:ONNX_Op<"MaxUnpool", - [Pure, OpVersionTrait<11>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX MaxUnpool operation"; let description = [{ MaxUnpool essentially computes the partial inverse of the MaxPool op. @@ -4517,13 +4511,13 @@ def ONNXMaxUnpoolOp:ONNX_Op<"MaxUnpool", which define the exact unpooling op. The attributes typically have the same values as the corresponding pooling op that the unpooling op is trying to invert. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, TensorOf<[I64]>:$I, AnyTypeOf<[TensorOf<[I64]>, NoneType]>:$output_shape, I64ArrayAttr:$kernel_shape, OptionalAttr:$pads, OptionalAttr:$strides); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 3; @@ -4682,7 +4676,7 @@ def ONNXMinOp:ONNX_Op<"Min", } def ONNXMishOp:ONNX_Op<"Mish", - [Pure, OpVersionTrait<18>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, SameOperandsAndResultElementType]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Mish operation"; let description = [{ Mish: A Self Regularized Non-Monotonic Neural Activation Function. @@ -4693,8 +4687,8 @@ def ONNXMishOp:ONNX_Op<"Mish", mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^{x})) ``` }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -4817,13 +4811,13 @@ def ONNXMulOp:ONNX_Op<"Mul", } def ONNXMultinomialOp:ONNX_Op<"Multinomial", - [Pure, OpVersionTrait<7>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Multinomial operation"; let description = [{ Generate a tensor of samples from a multinomial distribution according to the probabilities of each of the possible outcomes. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input, DefaultValuedAttr:$dtype, DefaultValuedAttr:$sample_size, OptionalAttr:$seed); @@ -4891,7 +4885,7 @@ def ONNXNegOp:ONNX_Op<"Neg", } def ONNXNegativeLogLikelihoodLossOp:ONNX_Op<"NegativeLogLikelihoodLoss", - [Pure, OpVersionTrait<13>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX NegativeLogLikelihoodLoss operation"; let description = [{ A NegativeLogLikelihoodLoss operator computes (weighted) negative log likelihood loss. @@ -4996,12 +4990,12 @@ def ONNXNegativeLogLikelihoodLossOp:ONNX_Op<"NegativeLogLikelihoodLoss", // -1.57 ``` }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input, AnyTypeOf<[TensorOf<[I32]>, TensorOf<[I64]>]>:$target, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$weight, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$weight, OptionalAttr:$ignore_index, DefaultValuedStrAttr:$reduction); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$loss); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$loss); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 3; @@ -6063,7 +6057,7 @@ def ONNXQuantizeLinearOp:ONNX_Op<"QuantizeLinear", } def ONNXRNNOp:ONNX_Op<"RNN", - [Pure, OpVersionTrait<14>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let hasCanonicalizer = 1; let summary = "ONNX RNN operation"; let description = [{ @@ -6108,12 +6102,12 @@ def ONNXRNNOp:ONNX_Op<"RNN", * Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) This operator has **optional** inputs/outputs. See [the doc](IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$W, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$R, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$B, + let arguments = (ins AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$W, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$R, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$B, AnyTypeOf<[TensorOf<[I32]>, NoneType]>:$sequence_lens, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$initial_h, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$initial_h, OptionalAttr:$activation_alpha, OptionalAttr:$activation_beta, DefaultValuedAttr:$activations, @@ -6121,8 +6115,8 @@ def ONNXRNNOp:ONNX_Op<"RNN", DefaultValuedStrAttr:$direction, OptionalAttr:$hidden_size, DefaultValuedAttr:$layout); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y, - AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y_h); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y, + AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y_h); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 6; @@ -6145,7 +6139,7 @@ def ONNXRNNOp:ONNX_Op<"RNN", } def ONNXRandomNormalOp:ONNX_Op<"RandomNormal", - [Pure, OpVersionTrait<1>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX RandomNormal operation"; let description = [{ Generate a tensor with random values drawn from a normal distribution. The shape @@ -6161,7 +6155,7 @@ def ONNXRandomNormalOp:ONNX_Op<"RandomNormal", DefaultValuedAttr:$scale, OptionalAttr:$seed, I64ArrayAttr:$shape); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 0; @@ -6184,7 +6178,7 @@ def ONNXRandomNormalOp:ONNX_Op<"RandomNormal", } def ONNXRandomNormalLikeOp:ONNX_Op<"RandomNormalLike", - [Pure, OpVersionTrait<1>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX RandomNormalLike operation"; let description = [{ Generate a tensor with random values drawn from a normal distribution. @@ -6195,12 +6189,12 @@ def ONNXRandomNormalLikeOp:ONNX_Op<"RandomNormalLike", The 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the TensorProto message, and be valid as an output type. }]; - let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$input, + let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$input, OptionalAttr:$dtype, DefaultValuedAttr:$mean, DefaultValuedAttr:$scale, OptionalAttr:$seed); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 1; @@ -6224,7 +6218,7 @@ def ONNXRandomNormalLikeOp:ONNX_Op<"RandomNormalLike", } def ONNXRandomUniformOp:ONNX_Op<"RandomUniform", - [Pure, OpVersionTrait<1>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, OpVersionTrait<22>, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX RandomUniform operation"; let description = [{ Generate a tensor with random values drawn from a uniform distribution. The shape @@ -6239,7 +6233,7 @@ def ONNXRandomUniformOp:ONNX_Op<"RandomUniform", DefaultValuedAttr:$low, OptionalAttr:$seed, I64ArrayAttr:$shape); - let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); + let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ static int getNumberOfOperands() { return 0; diff --git a/utils/gen_onnx_mlir.py b/utils/gen_onnx_mlir.py index 45e879b0e1..525b1043b3 100755 --- a/utils/gen_onnx_mlir.py +++ b/utils/gen_onnx_mlir.py @@ -86,8 +86,8 @@ version_dict = { "Abs": [13], - "Acos": [7], - "Acosh": [9], + "Acos": [22], + "Acosh": [22], "Adagrad": [1], "Adam": [1], "Add": [14], @@ -95,13 +95,13 @@ "ArgMax": [13], "ArgMin": [13], "ArrayFeatureExtractor": [1], - "Asin": [7], - "Asinh": [9], - "Atan": [7], - "Atanh": [9], - "AveragePool": [19], + "Asin": [22], + "Asinh": [22], + "Atan": [22], + "Atanh": [22], + "AveragePool": [22], "BatchNormalization": [15, 9], - "Bernoulli": [15], + "Bernoulli": [22], "Binarizer": [1], "BitShift": [11], "BitwiseAnd": [18], @@ -122,41 +122,41 @@ "ConcatFromSequence": [11], "Constant": [19], "ConstantOfShape": [20], - "Conv": [11], + "Conv": [22], "ConvInteger": [10], - "ConvTranspose": [11], - "Cos": [7], - "Cosh": [9], + "ConvTranspose": [22], + "Cos": [22], + "Cosh": [22], "Col2Im": [18], "CumSum": [14], - "DeformConv": [19], + "DeformConv": [22], "DepthToSpace": [13], "DequantizeLinear": [19], - "Det": [11], + "Det": [22], "DFT": [20, 17], "DictVectorizer": [1], "Div": [14], - "Dropout": [13], + "Dropout": [22], "DynamicQuantizeLinear": [11], "Einsum": [12], - "Elu": [6], + "Elu": [22], "Equal": [19], "Erf": [13], "Exp": [13], "Expand": [13], - "EyeLike": [9], + "EyeLike": [22], "FeatureVectorizer": [1], "Flatten": [13], "Floor": [13], - "GRU": [14], + "GRU": [22], "Gather": [13], "GatherElements": [13], "GatherND": [13], "Gelu": [20], "Gemm": [13], - "GlobalAveragePool": [1], + "GlobalAveragePool": [22], "GlobalLpPool": [2], - "GlobalMaxPool": [1], + "GlobalMaxPool": [22], "Gradient": [1], "Greater": [13], "GreaterOrEqual": [16], @@ -164,18 +164,18 @@ "GroupNormalization": [21, 18], "HammingWindow": [17], "HannWindow": [17], - "HardSigmoid": [6], + "HardSigmoid": [22], "Hardmax": [13], - "HardSwish": [14], + "HardSwish": [22], "Identity": [19], "If": [19], "Imputer": [1], - "InstanceNormalization": [6], + "InstanceNormalization": [22], "IsInf": [20], "IsNaN": [20], "LayerNormalization": [17], "LRN": [13], - "LSTM": [14], + "LSTM": [22], "LabelEncoder": [2], "LeakyRelu": [16], "Less": [13], @@ -185,25 +185,25 @@ "Log": [13], "LogSoftmax": [13], "Loop": [19], - "LpNormalization": [1], - "LpPool": [18], + "LpNormalization": [22], + "LpPool": [22], "MatMul": [13], "MatMulInteger": [10], "Max": [13], - "MaxPool": [12], - "MaxRoiPool": [1], - "MaxUnpool": [11], + "MaxPool": [22], + "MaxRoiPool": [22], + "MaxUnpool": [22], "Mean": [13], "MeanVarianceNormalization": [13], "MelWeightMatrix": [17], "Min": [13], - "Mish": [18], + "Mish": [22], "Mod": [13], "Momentum": [1], "Mul": [14], - "Multinomial": [7], + "Multinomial": [22], "Neg": [13], - "NegativeLogLikelihoodLoss": [13], + "NegativeLogLikelihoodLoss": [22], "NonMaxSuppression": [11], "NonZero": [13], "Normalizer": [1], @@ -220,11 +220,11 @@ "QLinearConv": [10], "QLinearMatMul": [10], "QuantizeLinear": [19], - "RNN": [14], - "RandomNormal": [1], - "RandomNormalLike": [1], - "RandomUniform": [1], - "RandomUniformLike": [1], + "RNN": [22], + "RandomNormal": [22], + "RandomNormalLike": [22], + "RandomUniform": [22], + "RandomUniformLike": [22], "Range": [11], "Reciprocal": [13], "ReduceL1": [18, 13], @@ -241,8 +241,8 @@ "Reshape": [19], "Resize": [19, 18, 13, 11, 10], "ReverseSequence": [10], - "RoiAlign": [16], - "Round": [11], + "RoiAlign": [22], + "Round": [22], "SVMClassifier": [1], "SVMRegressor": [1], "Scaler": [1], @@ -250,7 +250,7 @@ "Scatter": [11], "ScatterElements": [18], "ScatterND": [18], - "Selu": [6], + "Selu": [22], "SequenceAt": [11], "SequenceConstruct": [11], "SequenceEmpty": [11], @@ -262,14 +262,14 @@ "Shrink": [9], "Sigmoid": [13], "Sign": [13], - "Sin": [7], - "Sinh": [9], + "Sin": [22], + "Sinh": [22], "Size": [19], "Slice": [13], "Softmax": [13, 11], "SoftmaxCrossEntropyLoss": [13], - "Softplus": [1], - "Softsign": [1], + "Softplus": [22], + "Softsign": [22], "SpaceToDepth": [13], "Split": [18, 13, 11], "SplitToSequence": [11], @@ -279,10 +279,10 @@ "STFT": [17], "Sub": [14], "Sum": [13], - "Tan": [7], + "Tan": [22], "Tanh": [13], "TfIdfVectorizer": [9], - "ThresholdedRelu": [10], + "ThresholdedRelu": [22], "Tile": [13], "TopK": [11], "Transpose": [13],