[mlir] Add test-convergence
option to Canonicalizer tests
This new option is set to `false` by default. It should be set only in Canonicalizer tests to detect faulty canonicalization patterns. I.e., patterns that prevent the canonicalizer from converging. The canonicalizer should always convergence on such small unit tests that we have in `canonicalize.mlir`. Two faulty canonicalization patterns were detected and fixed with this change. Differential Revision: https://reviews.llvm.org/D140873
This commit is contained in:
parent
8a06b2362a
commit
e7790fbed3
|
@ -39,7 +39,9 @@ def Canonicalizer : Pass<"canonicalize"> {
|
||||||
/*default=*/"10",
|
/*default=*/"10",
|
||||||
"Max. iterations between applying patterns / simplifying regions">,
|
"Max. iterations between applying patterns / simplifying regions">,
|
||||||
Option<"maxNumRewrites", "max-num-rewrites", "int64_t", /*default=*/"-1",
|
Option<"maxNumRewrites", "max-num-rewrites", "int64_t", /*default=*/"-1",
|
||||||
"Max. number of pattern rewrites within an iteration">
|
"Max. number of pattern rewrites within an iteration">,
|
||||||
|
Option<"testConvergence", "test-convergence", "bool", /*default=*/"false",
|
||||||
|
"Test only: Fail pass on non-convergence to detect cyclic pattern">
|
||||||
] # RewritePassUtils.options;
|
] # RewritePassUtils.options;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -687,6 +687,8 @@ struct FoldLaunchArguments : public OpRewritePattern<LaunchOp> {
|
||||||
// Check if size is trivially one.
|
// Check if size is trivially one.
|
||||||
if (!matchPattern(size, m_One()))
|
if (!matchPattern(size, m_One()))
|
||||||
return;
|
return;
|
||||||
|
if (id.getUses().empty())
|
||||||
|
return;
|
||||||
if (!simplified) {
|
if (!simplified) {
|
||||||
// Create a zero value the first time.
|
// Create a zero value the first time.
|
||||||
OpBuilder::InsertionGuard guard(rewriter);
|
OpBuilder::InsertionGuard guard(rewriter);
|
||||||
|
@ -694,7 +696,7 @@ struct FoldLaunchArguments : public OpRewritePattern<LaunchOp> {
|
||||||
zero =
|
zero =
|
||||||
rewriter.create<arith::ConstantIndexOp>(op.getLoc(), /*value=*/0);
|
rewriter.create<arith::ConstantIndexOp>(op.getLoc(), /*value=*/0);
|
||||||
}
|
}
|
||||||
id.replaceAllUsesWith(zero);
|
rewriter.replaceAllUsesWith(id, zero);
|
||||||
simplified = true;
|
simplified = true;
|
||||||
};
|
};
|
||||||
constPropIdUses(op.getBlockIds().x, op.getGridSizeX());
|
constPropIdUses(op.getBlockIds().x, op.getGridSizeX());
|
||||||
|
|
|
@ -178,16 +178,15 @@ struct RemoveConstantIfCondition : public OpRewritePattern<OpTy> {
|
||||||
// Early return if there is no condition.
|
// Early return if there is no condition.
|
||||||
Value ifCond = op.getIfCond();
|
Value ifCond = op.getIfCond();
|
||||||
if (!ifCond)
|
if (!ifCond)
|
||||||
return success();
|
return failure();
|
||||||
|
|
||||||
IntegerAttr constAttr;
|
IntegerAttr constAttr;
|
||||||
if (matchPattern(ifCond, m_Constant(&constAttr))) {
|
if (!matchPattern(ifCond, m_Constant(&constAttr)))
|
||||||
if (constAttr.getInt())
|
return failure();
|
||||||
rewriter.updateRootInPlace(op,
|
if (constAttr.getInt())
|
||||||
[&]() { op.getIfCondMutable().erase(0); });
|
rewriter.updateRootInPlace(op, [&]() { op.getIfCondMutable().erase(0); });
|
||||||
else
|
else
|
||||||
rewriter.eraseOp(op);
|
rewriter.eraseOp(op);
|
||||||
}
|
|
||||||
|
|
||||||
return success();
|
return success();
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,8 +57,11 @@ struct Canonicalizer : public impl::CanonicalizerBase<Canonicalizer> {
|
||||||
config.enableRegionSimplification = enableRegionSimplification;
|
config.enableRegionSimplification = enableRegionSimplification;
|
||||||
config.maxIterations = maxIterations;
|
config.maxIterations = maxIterations;
|
||||||
config.maxNumRewrites = maxNumRewrites;
|
config.maxNumRewrites = maxNumRewrites;
|
||||||
|
LogicalResult converged =
|
||||||
|
applyPatternsAndFoldGreedily(getOperation(), patterns, config);
|
||||||
// Canonicalization is best-effort. Non-convergence is not a pass failure.
|
// Canonicalization is best-effort. Non-convergence is not a pass failure.
|
||||||
(void)applyPatternsAndFoldGreedily(getOperation(), patterns, config);
|
if (testConvergence && failed(converged))
|
||||||
|
signalPassFailure();
|
||||||
}
|
}
|
||||||
|
|
||||||
FrozenRewritePatternSet patterns;
|
FrozenRewritePatternSet patterns;
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt %s -split-input-file -canonicalize | FileCheck %s
|
// RUN: mlir-opt %s -split-input-file -canonicalize="test-convergence" | FileCheck %s
|
||||||
|
|
||||||
// CHECK-LABEL: func @known_oob_load
|
// CHECK-LABEL: func @known_oob_load
|
||||||
func.func @known_oob_load(%arg0: memref<4xf32>) -> f32 {
|
func.func @known_oob_load(%arg0: memref<4xf32>) -> f32 {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -canonicalize | FileCheck %s
|
// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -canonicalize="test-convergence" | FileCheck %s
|
||||||
// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -canonicalize="top-down=0" | FileCheck %s --check-prefix=CHECK-BOTTOM-UP
|
// RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -canonicalize="test-convergence top-down=0" | FileCheck %s --check-prefix=CHECK-BOTTOM-UP
|
||||||
|
|
||||||
// -----
|
// -----
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt %s -canonicalize --split-input-file | FileCheck %s
|
// RUN: mlir-opt %s -canonicalize="test-convergence" --split-input-file | FileCheck %s
|
||||||
|
|
||||||
// CHECK-LABEL: @select_same_val
|
// CHECK-LABEL: @select_same_val
|
||||||
// CHECK: return %arg1
|
// CHECK: return %arg1
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
// RUN: mlir-opt %s -canonicalize --split-input-file \
|
// RUN: mlir-opt %s \
|
||||||
// RUN: -allow-unregistered-dialect |\
|
// RUN: -canonicalize="test-convergence" \
|
||||||
|
// RUN: --split-input-file -allow-unregistered-dialect | \
|
||||||
// RUN: FileCheck %s
|
// RUN: FileCheck %s
|
||||||
|
|
||||||
// Basic folding of to_tensor(to_memref(t)) -> t
|
// Basic folding of to_tensor(to_memref(t)) -> t
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt %s -canonicalize | FileCheck %s
|
// RUN: mlir-opt %s -canonicalize="test-convergence" | FileCheck %s
|
||||||
|
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
// UnrealizedConversionCastOp
|
// UnrealizedConversionCastOp
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt %s -canonicalize | FileCheck %s
|
// RUN: mlir-opt %s -canonicalize="test-convergence" | FileCheck %s
|
||||||
|
|
||||||
// CHECK-LABEL: func @create_of_real_and_imag
|
// CHECK-LABEL: func @create_of_real_and_imag
|
||||||
// CHECK-SAME: (%[[CPLX:.*]]: complex<f32>)
|
// CHECK-SAME: (%[[CPLX:.*]]: complex<f32>)
|
||||||
|
@ -154,4 +154,4 @@ func.func @complex_sub_zero() -> complex<f32> {
|
||||||
// CHECK-NEXT: return %[[CPLX:.*]] : complex<f32>
|
// CHECK-NEXT: return %[[CPLX:.*]] : complex<f32>
|
||||||
%sub = complex.sub %complex1, %complex2 : complex<f32>
|
%sub = complex.sub %complex1, %complex2 : complex<f32>
|
||||||
return %sub : complex<f32>
|
return %sub : complex<f32>
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt %s -allow-unregistered-dialect -pass-pipeline='builtin.module(func.func(canonicalize))' -split-input-file | FileCheck --dump-input-context 20 %s
|
// RUN: mlir-opt %s -allow-unregistered-dialect -pass-pipeline='builtin.module(func.func(canonicalize{test-convergence}))' -split-input-file | FileCheck --dump-input-context 20 %s
|
||||||
|
|
||||||
/// Test the folding of BranchOp.
|
/// Test the folding of BranchOp.
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt %s -canonicalize --split-input-file -allow-unregistered-dialect | FileCheck %s
|
// RUN: mlir-opt %s -canonicalize="test-convergence" --split-input-file -allow-unregistered-dialect | FileCheck %s
|
||||||
|
|
||||||
// Fold all the gpu.wait ops as they are redundant.
|
// Fold all the gpu.wait ops as they are redundant.
|
||||||
// CHECK-LABEL: func @fold_wait_op_test1
|
// CHECK-LABEL: func @fold_wait_op_test1
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt -canonicalize %s -split-input-file | FileCheck %s
|
// RUN: mlir-opt -canonicalize="test-convergence" %s -split-input-file | FileCheck %s
|
||||||
|
|
||||||
// CHECK-LABEL: fold_extractvalue
|
// CHECK-LABEL: fold_extractvalue
|
||||||
llvm.func @fold_extractvalue() -> i32 {
|
llvm.func @fold_extractvalue() -> i32 {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt %s -canonicalize -split-input-file | FileCheck %s
|
// RUN: mlir-opt %s -canonicalize="test-convergence" -split-input-file | FileCheck %s
|
||||||
|
|
||||||
// CHECK-LABEL: func @memref_cast(
|
// CHECK-LABEL: func @memref_cast(
|
||||||
func.func @memref_cast(%a: index, %b: index) -> memref<?x?xf32> {
|
func.func @memref_cast(%a: index, %b: index) -> memref<?x?xf32> {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt %s -canonicalize | FileCheck %s
|
// RUN: mlir-opt %s -canonicalize="test-convergence" | FileCheck %s
|
||||||
|
|
||||||
// CHECK-LABEL: @ceil_fold
|
// CHECK-LABEL: @ceil_fold
|
||||||
// CHECK: %[[cst:.+]] = arith.constant 1.000000e+00 : f32
|
// CHECK: %[[cst:.+]] = arith.constant 1.000000e+00 : f32
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt %s -canonicalize --split-input-file -allow-unregistered-dialect | FileCheck %s
|
// RUN: mlir-opt %s -canonicalize="test-convergence" --split-input-file -allow-unregistered-dialect | FileCheck %s
|
||||||
|
|
||||||
// CHECK-LABEL: func @subview_of_size_memcast
|
// CHECK-LABEL: func @subview_of_size_memcast
|
||||||
// CHECK-SAME: %[[ARG0:.[a-z0-9A-Z_]+]]: memref<4x6x16x32xi8>
|
// CHECK-SAME: %[[ARG0:.[a-z0-9A-Z_]+]]: memref<4x6x16x32xi8>
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt %s -canonicalize -split-input-file | FileCheck %s
|
// RUN: mlir-opt %s -canonicalize="test-convergence" -split-input-file | FileCheck %s
|
||||||
|
|
||||||
func.func @testenterdataop(%a: memref<10xf32>) -> () {
|
func.func @testenterdataop(%a: memref<10xf32>) -> () {
|
||||||
%ifCond = arith.constant true
|
%ifCond = arith.constant true
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt %s -canonicalize -split-input-file | FileCheck %s
|
// RUN: mlir-opt %s -canonicalize="test-convergence" -split-input-file | FileCheck %s
|
||||||
|
|
||||||
func.func @update_no_op(%x : memref<i32>) {
|
func.func @update_no_op(%x : memref<i32>) {
|
||||||
omp.atomic.update %x : memref<i32> {
|
omp.atomic.update %x : memref<i32> {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt -canonicalize %s | FileCheck %s
|
// RUN: mlir-opt -canonicalize="test-convergence" %s | FileCheck %s
|
||||||
|
|
||||||
pdl.pattern @operation_op : benefit(1) {
|
pdl.pattern @operation_op : benefit(1) {
|
||||||
%root = operation "foo.op"
|
%root = operation "foo.op"
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt %s -split-input-file -pass-pipeline='builtin.module(func.func(canonicalize))' | FileCheck %s
|
// RUN: mlir-opt %s -split-input-file -pass-pipeline='builtin.module(func.func(canonicalize{test-convergence}))' | FileCheck %s
|
||||||
|
|
||||||
// -----
|
// -----
|
||||||
// CHECK-LABEL: redundant_scast
|
// CHECK-LABEL: redundant_scast
|
||||||
|
|
|
@ -1,7 +1,4 @@
|
||||||
// RUN: mlir-opt %s -pass-pipeline='builtin.module(func.func(canonicalize))' -split-input-file | FileCheck %s
|
// RUN: mlir-opt %s -pass-pipeline='builtin.module(func.func(canonicalize{test-convergence}))' -split-input-file | FileCheck %s
|
||||||
|
|
||||||
|
|
||||||
// -----
|
|
||||||
|
|
||||||
func.func @single_iteration_some(%A: memref<?x?x?xi32>) {
|
func.func @single_iteration_some(%A: memref<?x?x?xi32>) {
|
||||||
%c0 = arith.constant 0 : index
|
%c0 = arith.constant 0 : index
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt %s -split-input-file -pass-pipeline='builtin.module(func.func(canonicalize))' | FileCheck %s
|
// RUN: mlir-opt %s -split-input-file -pass-pipeline='builtin.module(func.func(canonicalize{test-convergence}))' | FileCheck %s
|
||||||
|
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
// spirv.AccessChain
|
// spirv.AccessChain
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt -split-input-file -allow-unregistered-dialect -canonicalize %s | FileCheck %s
|
// RUN: mlir-opt -split-input-file -allow-unregistered-dialect -canonicalize="test-convergence" %s | FileCheck %s
|
||||||
|
|
||||||
// CHECK-LABEL: func @f
|
// CHECK-LABEL: func @f
|
||||||
func.func @f(%arg0: tensor<2x3x4xf32>) -> tensor<3xindex> {
|
func.func @f(%arg0: tensor<2x3x4xf32>) -> tensor<3xindex> {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt %s -split-input-file -canonicalize | FileCheck %s
|
// RUN: mlir-opt %s -split-input-file -canonicalize="test-convergence" | FileCheck %s
|
||||||
|
|
||||||
// Checks that NOP casts are removed.
|
// Checks that NOP casts are removed.
|
||||||
// CHECK-LABEL: cast_values
|
// CHECK-LABEL: cast_values
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt --canonicalize %s | FileCheck %s
|
// RUN: mlir-opt -canonicalize="test-convergence" %s | FileCheck %s
|
||||||
|
|
||||||
// CHECK-LABEL: @argmax_nofold
|
// CHECK-LABEL: @argmax_nofold
|
||||||
func.func @argmax_nofold(%arg0: tensor<?x1xf32>) -> tensor<?x1xf32> {
|
func.func @argmax_nofold(%arg0: tensor<?x1xf32>) -> tensor<?x1xf32> {
|
||||||
|
|
|
@ -1,6 +1,4 @@
|
||||||
// RUN: mlir-opt %s -pass-pipeline='builtin.module(func.func(canonicalize))' -split-input-file -allow-unregistered-dialect | FileCheck %s
|
// RUN: mlir-opt %s -canonicalize="test-convergence" -split-input-file -allow-unregistered-dialect | FileCheck %s
|
||||||
|
|
||||||
// -----
|
|
||||||
|
|
||||||
// CHECK-LABEL: create_vector_mask_to_constant_mask
|
// CHECK-LABEL: create_vector_mask_to_constant_mask
|
||||||
func.func @create_vector_mask_to_constant_mask() -> (vector<4x3xi1>) {
|
func.func @create_vector_mask_to_constant_mask() -> (vector<4x3xi1>) {
|
||||||
|
|
|
@ -14,7 +14,7 @@ func.func @bar() {
|
||||||
external_resources: {
|
external_resources: {
|
||||||
mlir_reproducer: {
|
mlir_reproducer: {
|
||||||
verify_each: true,
|
verify_each: true,
|
||||||
// CHECK: builtin.module(func.func(cse,canonicalize{ max-iterations=1 max-num-rewrites=-1 region-simplify=false top-down=false}))
|
// CHECK: builtin.module(func.func(cse,canonicalize{ max-iterations=1 max-num-rewrites=-1 region-simplify=false test-convergence=false top-down=false}))
|
||||||
pipeline: "builtin.module(func.func(cse,canonicalize{max-iterations=1 max-num-rewrites=-1 region-simplify=false top-down=false}))",
|
pipeline: "builtin.module(func.func(cse,canonicalize{max-iterations=1 max-num-rewrites=-1 region-simplify=false top-down=false}))",
|
||||||
disable_threading: true
|
disable_threading: true
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.module(func.func(canonicalize))' -split-input-file | FileCheck %s
|
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.module(func.func(canonicalize{test-convergence}))' -split-input-file | FileCheck %s
|
||||||
|
|
||||||
// CHECK-LABEL: func @test_subi_zero
|
// CHECK-LABEL: func @test_subi_zero
|
||||||
func.func @test_subi_zero(%arg0: i32) -> i32 {
|
func.func @test_subi_zero(%arg0: i32) -> i32 {
|
||||||
|
|
Loading…
Reference in New Issue
Block a user