summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSpenser Bauman <sbauman@mathworks.com>2024-05-11 07:28:52 -0400
committerGitHub <noreply@github.com>2024-05-11 07:28:52 -0400
commit9d66dcaf172cdaa78a6709df989bcaaf90d2ccee (patch)
treec6b6428fd2ae23e3ceacf17de2f5cf956ed5e988
parent31774b6a8a88b435ce79f9ba048ef8bb00e2817e (diff)
downloadllvm-libc-9d66dcaf172cdaa78a6709df989bcaaf90d2ccee.tar.gz
[mlir][tosa] Work around GCC bug in tosa-to-tensor (#91521)
GCC 12 and 13 generate incorrect code for a pattern in the tosa-to-tensor pass responsible for lowering tosa.reshape. This results in the tosa.reshape lowering producing IR which fails to verify. I've narrowed down the set of cmake flags needed to reproduce the issue to this: cmake -G Ninja ../llvm \ -DLLVM_ENABLE_PROJECTS="mlir" \ -DLLVM_TARGETS_TO_BUILD=host \ -DLLVM_ENABLE_PROJECTS=mlir \ -DCMAKE_BUILD_TYPE="Release" \ -DCMAKE_CXX_FLAGS_RELEASE="-O2" \ -DCMAKE_CXX_FLAGS="-O2" \ -DCMAKE_CXX_COMPILER=g++ \ -DCMAKE_C_COMPILER=gcc This is the failing test case: func.func @fails_in_gcc_12(%arg0: tensor<?xf32>) -> tensor<1x1x1x?xf32> { %0 = tosa.reshape %arg0 {new_shape = array<i64: 1, 1, 1, -1>} : (tensor<?xf32>) -> tensor<1x1x1x?xf32> return %0 : tensor<1x1x1x?xf32> } This should lower to a tensor.expand_shape operation like so: func.func @foo(%arg0: tensor<?xf32>) -> tensor<1x1x1x?xf32> { %c0 = arith.constant 0 : index %dim = tensor.dim %arg0, %c0 : tensor<?xf32> %c1 = arith.constant 1 : index %expanded = tensor.expand_shape %arg0 [[0, 1, 2, 3]] output_shape [1, 1, 1, %dim] : tensor<?xf32> into tensor<1x1x1x?xf32> return %expanded : tensor<1x1x1x?xf32> } Under GCC 12/13 with the above cmake configuration, the tensor.expand_shape looks like this %2 = "tensor.expand_shape"(%arg0) <{reassociation = [[0, 1, 2, 3]], static_output_shape = array<i64>}> : (tensor<?xf32>) -> tensor<?x1x1x?xf32> The key difference is the computed output type of `tensor<?x1x1x?xf32>` rather than the expected `tensor<1x1x1x?xf32>`. This expand_shape fails to verify with this error message: error: 'tensor.expand_shape' op expected number of static shape dims to be equal to the output rank (4) but found 0 inputs instead The problematic code is calculating the intermediate shape of the generated tensor.expand_shape operation in the expand_shape/collapse_shape sequence that implements tosa.reshape. // Compute result shape bool resultIsStatic = true; auto resultShape = llvm::map_to_vector(newShape, [&](int64_t size) { // Omitted // If we do not know the total size of the tensor, keep this dimension // dynamic in the result shape. if (!inputIsStatic) { resultIsStatic = false; return ShapedType::kDynamic; } }); if (resultIsStatic) { // do something return; } // do something else return; The failure point seems to be the update of the resultIsStatic variable in the lambda body. The assignment of false is not propagated to the use in the if-statement, resulting in the branch being taken when it should not. I've found several modification to the code that gets around the bug. The version I settled on is one which makes the logic a little more obvious.
-rw-r--r--mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp11
-rw-r--r--mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir15
2 files changed, 20 insertions, 6 deletions
diff --git a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
index cd6da3558246..89f956a5e701 100644
--- a/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
+++ b/mlir/lib/Conversion/TosaToTensor/TosaToTensor.cpp
@@ -55,9 +55,8 @@ TensorType inferReshapeExpandedType(TensorType inputType,
// Check if the input is static, and if so, get its total size
bool inputIsStatic = inputType.hasStaticShape();
int64_t totalSize = inputIsStatic ? inputType.getNumElements() : -1;
-
+
// Compute result shape
- bool resultIsStatic = true;
auto resultShape = llvm::map_to_vector(newShape, [&](int64_t size) -> int64_t {
// If this is not a placeholder, do not change it
if (size >= 0)
@@ -65,10 +64,8 @@ TensorType inferReshapeExpandedType(TensorType inputType,
// If we do not know the total size of the tensor, keep this dimension
// dynamic in the result shape.
- if (!inputIsStatic) {
- resultIsStatic = false;
+ if (!inputIsStatic)
return ShapedType::kDynamic;
- }
// Calculate the product of all elements in 'newShape' except for the -1
// placeholder, which we discard by negating the result.
@@ -84,12 +81,14 @@ TensorType inferReshapeExpandedType(TensorType inputType,
return totalSize / totalSizeNoPlaceholder;
});
+ bool resultIsStatic = !ShapedType::isDynamicShape(resultShape);
+
// A syntactic restriction in 'tensor.expand_shape' forbids a dynamically
// shaped input from being reshaped into a statically shaped result. We may
// simply turn the first result dimension dynamic to address this.
if (!inputIsStatic && resultIsStatic)
resultShape[0] = ShapedType::kDynamic;
-
+
// The 'tensor.expand_shape' op also forbids a statically shaped input from
// being reshaped into a dynamically shaped result, but the placeholder
// inference algorithm above guarantees that this will never be the case.
diff --git a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir
index b8c3d56f21f1..72e7e4cc8408 100644
--- a/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir
+++ b/mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir
@@ -394,6 +394,21 @@ func.func @test_reshape_6d_down_s2s_auto(%arg0: tensor<1x2x3x5x7x11xf32>) -> ten
// -----
+// This test would previously fail on GCC with certain compiler flags.
+// The GCC issue would cause invalid IR after tosa-to-tensor, so this test
+// locks down that the code goes through tosa-to-tensor and verifies.
+//
+// See https://github.com/llvm/llvm-project/pull/91521 for a full description.
+
+// CHECK-LABEL: reshape_bug_fix
+// CHECK: tensor.expand_shape
+func.func @reshape_bug_fix(%arg0: tensor<?xf32>) -> tensor<1x1x1x?xf32> {
+ %0 = tosa.reshape %arg0 {new_shape = array<i64: 1, 1, 1, -1>} : (tensor<?xf32>) -> tensor<1x1x1x?xf32>
+ return %0 : tensor<1x1x1x?xf32>
+}
+
+// -----
+
// CHECK-LABEL: test_reshape_6d_down_s2s_explicit
// CHECK-SAME: %[[ARG_0:[a-zA-Z0-9_]+]]: tensor<1x2x3x5x7x11xf32>
// CHECK: %[[VAL_0:.*]] = tensor.collapse_shape %[[ARG_0]] {{\[\[}}0, 1, 2], [3], [4, 5]] : tensor<1x2x3x5x7x11xf32> into tensor<6x5x77xf32>