diff --git a/compiler/src/iree/compiler/Dialect/LinalgExt/Transforms/Passes.h b/compiler/src/iree/compiler/Dialect/LinalgExt/Transforms/Passes.h index 7349acb3870b..04c1e8f68c39 100644 --- a/compiler/src/iree/compiler/Dialect/LinalgExt/Transforms/Passes.h +++ b/compiler/src/iree/compiler/Dialect/LinalgExt/Transforms/Passes.h @@ -7,7 +7,6 @@ #ifndef IREE_COMPILER_DIALECT_LINALGEXT_TRANSFORMS_PASSES_H_ #define IREE_COMPILER_DIALECT_LINALGEXT_TRANSFORMS_PASSES_H_ -#include #include "iree/compiler/Dialect/LinalgExt/IR/LinalgExtOps.h" #include "iree/compiler/Dialect/LinalgExt/Utils/Utils.h" #include "mlir/Dialect/Linalg/Transforms/Transforms.h" diff --git a/compiler/src/iree/compiler/Dialect/LinalgExt/Transforms/test/conv2d_to_im2col.mlir b/compiler/src/iree/compiler/Dialect/LinalgExt/Transforms/test/conv2d_to_im2col.mlir index f8d8478e1b39..e827a7f3cfe4 100644 --- a/compiler/src/iree/compiler/Dialect/LinalgExt/Transforms/test/conv2d_to_im2col.mlir +++ b/compiler/src/iree/compiler/Dialect/LinalgExt/Transforms/test/conv2d_to_im2col.mlir @@ -1,16 +1,16 @@ -// RUN: iree-opt --split-input-file --pass-pipeline="builtin.module(func.func(iree-linalg-ext-convert-conv2d-to-im2col-op),canonicalize,cse)" %s | FileCheck %s +// RUN: iree-opt --split-input-file --pass-pipeline="builtin.module(util.func(iree-linalg-ext-convert-conv2d-to-im2col-op))" %s | FileCheck %s -func.func public @conv_2d_nhwc_hwcf(%arg0: tensor<1x16x16x4xf32>, %arg1: tensor<3x3x4x16xf32>, %arg2: tensor<1x14x14x16xf32>) -> tensor<1x14x14x16xf32> { +util.func public @conv_2d_nhwc_hwcf(%arg0: tensor<1x16x16x4xf32>, %arg1: tensor<3x3x4x16xf32>, %arg2: tensor<1x14x14x16xf32>) -> tensor<1x14x14x16xf32> { %0 = linalg.conv_2d_nhwc_hwcf {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64> } ins(%arg0, %arg1: tensor<1x16x16x4xf32>, tensor<3x3x4x16xf32>) outs(%arg2: tensor<1x14x14x16xf32>) -> tensor<1x14x14x16xf32> - return %0 : tensor<1x14x14x16xf32> + util.return %0 : tensor<1x14x14x16xf32> } // CHECK-DAG: #[[MAP:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> // CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d3, d2)> // CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> -// CHECK: func.func public @conv_2d_nhwc_hwcf( +// CHECK: util.func public @conv_2d_nhwc_hwcf( // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<1x16x16x4xf32> // CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<3x3x4x16xf32> // CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]+]]: tensor<1x14x14x16xf32> @@ -32,21 +32,21 @@ func.func public @conv_2d_nhwc_hwcf(%arg0: tensor<1x16x16x4xf32>, %arg1: tensor< // CHECK: arith.addf // CHECK: } -> tensor<1x196x16xf32> // CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[MATMUL]] {{\[}}[0], [1, 2], [3]] output_shape [1, 14, 14, 16] : tensor<1x196x16xf32> into tensor<1x14x14x16xf32> -// CHECK: return %[[EXPANDED]] : tensor<1x14x14x16xf32> +// CHECK: util.return %[[EXPANDED]] : tensor<1x14x14x16xf32> // ----- -func.func public @conv_2d_nchw_fchw(%arg0: tensor<1x4x16x16xf32>, %arg1: tensor<16x4x3x3xf32>, %arg2: tensor<1x16x14x14xf32>) -> tensor<1x16x14x14xf32> { +util.func public @conv_2d_nchw_fchw(%arg0: tensor<1x4x16x16xf32>, %arg1: tensor<16x4x3x3xf32>, %arg2: tensor<1x16x14x14xf32>) -> tensor<1x16x14x14xf32> { %0 = linalg.conv_2d_nchw_fchw {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64> } ins(%arg0, %arg1: tensor<1x4x16x16xf32>, tensor<16x4x3x3xf32>) outs(%arg2: tensor<1x16x14x14xf32>) -> tensor<1x16x14x14xf32> - return %0 : tensor<1x16x14x14xf32> + util.return %0 : tensor<1x16x14x14xf32> } // CHECK-DAG: #[[MAP:.+]] = affine_map<(d0, d1, d2, d3) -> (d1, d3)> // CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d2, d3)> // CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> -// CHECK: func.func public @conv_2d_nchw_fchw( +// CHECK: util.func public @conv_2d_nchw_fchw( // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<1x4x16x16xf32> // CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<16x4x3x3xf32> // CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]+]]: tensor<1x16x14x14xf32> @@ -68,21 +68,21 @@ func.func public @conv_2d_nchw_fchw(%arg0: tensor<1x4x16x16xf32>, %arg1: tensor< // CHECK: arith.addf // CHECK: } -> tensor<1x16x196xf32> // CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[MATMUL]] {{\[}}[0], [1], [2, 3]] output_shape [1, 16, 14, 14] : tensor<1x16x196xf32> into tensor<1x16x14x14xf32> -// CHECK: return %[[EXPANDED]] : tensor<1x16x14x14xf32> +// CHECK: util.return %[[EXPANDED]] : tensor<1x16x14x14xf32> // ----- -func.func public @conv_mixed_types(%arg0: tensor<1x16x16x4xf16>, %arg1: tensor<3x3x4x16xf16>, %arg2: tensor<1x14x14x16xf32>) -> tensor<1x14x14x16xf32> { +util.func public @conv_mixed_types(%arg0: tensor<1x16x16x4xf16>, %arg1: tensor<3x3x4x16xf16>, %arg2: tensor<1x14x14x16xf32>) -> tensor<1x14x14x16xf32> { %0 = linalg.conv_2d_nhwc_hwcf {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64> } ins(%arg0, %arg1: tensor<1x16x16x4xf16>, tensor<3x3x4x16xf16>) outs(%arg2: tensor<1x14x14x16xf32>) -> tensor<1x14x14x16xf32> - return %0 : tensor<1x14x14x16xf32> + util.return %0 : tensor<1x14x14x16xf32> } // CHECK-DAG: #[[MAP:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> // CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d3, d2)> // CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> -// CHECK: func.func public @conv_mixed_types( +// CHECK: util.func public @conv_mixed_types( // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<1x16x16x4xf16> // CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<3x3x4x16xf16> // CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]+]]: tensor<1x14x14x16xf32> @@ -106,21 +106,21 @@ func.func public @conv_mixed_types(%arg0: tensor<1x16x16x4xf16>, %arg1: tensor<3 // CHECK: arith.addf // CHECK: } -> tensor<1x196x16xf32> // CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[MATMUL]] {{\[}}[0], [1, 2], [3]] output_shape [1, 14, 14, 16] : tensor<1x196x16xf32> into tensor<1x14x14x16xf32> -// CHECK: return %[[EXPANDED]] : tensor<1x14x14x16xf32> +// CHECK: util.return %[[EXPANDED]] : tensor<1x14x14x16xf32> // ----- -func.func public @conv_strided(%arg0: tensor<1x16x16x4xf16>, %arg1: tensor<3x3x4x16xf16>, %arg2: tensor<1x7x7x16xf32>) -> tensor<1x7x7x16xf32> { +util.func public @conv_strided(%arg0: tensor<1x16x16x4xf16>, %arg1: tensor<3x3x4x16xf16>, %arg2: tensor<1x7x7x16xf32>) -> tensor<1x7x7x16xf32> { %0 = linalg.conv_2d_nhwc_hwcf {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64> } ins(%arg0, %arg1: tensor<1x16x16x4xf16>, tensor<3x3x4x16xf16>) outs(%arg2: tensor<1x7x7x16xf32>) -> tensor<1x7x7x16xf32> - return %0 : tensor<1x7x7x16xf32> + util.return %0 : tensor<1x7x7x16xf32> } // CHECK-DAG: #[[MAP:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)> // CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d3, d2)> // CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)> -// CHECK: func.func public @conv_strided( +// CHECK: util.func public @conv_strided( // CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor<1x16x16x4xf16> // CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor<3x3x4x16xf16> // CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]+]]: tensor<1x7x7x16xf32> @@ -144,4 +144,4 @@ func.func public @conv_strided(%arg0: tensor<1x16x16x4xf16>, %arg1: tensor<3x3x4 // CHECK: arith.addf // CHECK: } -> tensor<1x49x16xf32> // CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[MATMUL]] {{\[}}[0], [1, 2], [3]] output_shape [1, 7, 7, 16] : tensor<1x49x16xf32> into tensor<1x7x7x16xf32> -// CHECK: return %[[EXPANDED]] : tensor<1x7x7x16xf32> +// CHECK: util.return %[[EXPANDED]] : tensor<1x7x7x16xf32>