iree: Failure to lower std.dim operations in shape math
Describe the bug IREE
To Reproduce
$ cat > repro.mlir <<EOF
module {
func @is_finite__nan_and_inf(%arg0: tensor<?x?xf32> {tf._user_specified_name = "args_0"}) -> tensor<?x?xi1> attributes {iree.module.export, iree.reflection = {abi = "sip", abiv = 1 : i32, sip = "I8!S5!k0_0R3!_0"}, tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf.shape<?x?>]} {
%0 = mhlo.constant dense<0x7F800000> : tensor<f32>
%1 = shapex.get_ranked_shape %arg0 : tensor<?x?xf32> -> !shapex.ranked_shape<[?,?]>
%2 = shapex.get_ranked_shape %arg0 : tensor<?x?xf32> -> !shapex.ranked_shape<[?,?]>
%3 = "shapex.ranked_broadcast_shape"(%1, %2) {lhs_broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>, rhs_broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>} : (!shapex.ranked_shape<[?,?]>, !shapex.ranked_shape<[?,?]>) -> !shapex.ranked_shape<[?,?]>
%4 = "shapex.ranked_broadcast_in_dim"(%arg0, %3) {broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<?x?xf32>, !shapex.ranked_shape<[?,?]>) -> tensor<?x?xf32>
%5 = "shapex.ranked_broadcast_in_dim"(%arg0, %3) {broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<?x?xf32>, !shapex.ranked_shape<[?,?]>) -> tensor<?x?xf32>
%6 = "mhlo.compare"(%4, %5) {comparison_direction = "EQ"} : (tensor<?x?xf32>, tensor<?x?xf32>) -> tensor<?x?xi1>
%7 = "mhlo.abs"(%arg0) : (tensor<?x?xf32>) -> tensor<?x?xf32>
%8 = shapex.get_ranked_shape %7 : tensor<?x?xf32> -> !shapex.ranked_shape<[?,?]>
%9 = "shapex.ranked_broadcast_in_dim"(%7, %8) {broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<?x?xf32>, !shapex.ranked_shape<[?,?]>) -> tensor<?x?xf32>
%10 = "shapex.ranked_broadcast_in_dim"(%0, %8) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>, !shapex.ranked_shape<[?,?]>) -> tensor<?x?xf32>
%11 = "mhlo.compare"(%9, %10) {comparison_direction = "NE"} : (tensor<?x?xf32>, tensor<?x?xf32>) -> tensor<?x?xi1>
%12 = shapex.get_ranked_shape %6 : tensor<?x?xi1> -> !shapex.ranked_shape<[?,?]>
%13 = shapex.get_ranked_shape %11 : tensor<?x?xi1> -> !shapex.ranked_shape<[?,?]>
%14 = "shapex.ranked_broadcast_shape"(%12, %13) {lhs_broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>, rhs_broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>} : (!shapex.ranked_shape<[?,?]>, !shapex.ranked_shape<[?,?]>) -> !shapex.ranked_shape<[?,?]>
%15 = "shapex.ranked_broadcast_in_dim"(%6, %14) {broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<?x?xi1>, !shapex.ranked_shape<[?,?]>) -> tensor<?x?xi1>
%16 = "shapex.ranked_broadcast_in_dim"(%11, %14) {broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<?x?xi1>, !shapex.ranked_shape<[?,?]>) -> tensor<?x?xi1>
%17 = mhlo.and %15, %16 : tensor<?x?xi1>
return %17 : tensor<?x?xi1>
}
}
EOF
$ iree-opt repro.mlir --iree-hal-target-backends=vmla --iree-transformation-pipeline
(note that it’s not just VMLA. This also happens on the other backends).
repro.mlir:20:11: note: see current operation: %13 = hal.interface.load.tensor @legacy_io::@arg5, offset = %c0 : tensor<?x?xi8>
repro.mlir:20:11: error: 'std.dim' op operand #0 must be any tensor or memref type, but got '!vmla.buffer'
%17 = mhlo.and %15, %16 : tensor<?x?xi1>
Here’s the local reproducer: https://gist.github.com/GMNGeoffrey/822da580266b8f459d78add5a04dc6a7 although I’m not sure if that’s actually the root cause or if dim operations are supposed to be all gone by that point.
Additional context
This comes from a failure in our isfinite integration test of an attempt at a reasonable lowering of tf.IsFinite
to tf.NotEqual(tf.Abs(x), inf)
. It’s possible that the bug is further upstream in the pipeline. This seems related to shape math. The input IR for that test is
module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 693 : i32}, tf_saved_model.semantics} {
func @"__inference_<lambda>_870"(%arg0: tensor<?x?xf32> {tf._user_specified_name = "args_0", tf_saved_model.index_path = [0]}) -> (tensor<?x?xi1> {tf_saved_model.index_path = []}) attributes {tf._construction_context = "kEagerRuntime", tf._input_shapes = [#tf.shape<?x?>], tf_saved_model.exported_names = ["is_finite__nan_and_inf"]} {
%0 = tf_executor.graph {
%outputs, %control = tf_executor.island wraps "tf.IsFinite"(%arg0) {device = ""} : (tensor<?x?xf32>) -> tensor<?x?xi1>
%outputs_0, %control_1 = tf_executor.island wraps "tf.Identity"(%outputs) {device = ""} : (tensor<?x?xi1>) -> tensor<?x?xi1>
tf_executor.fetch %outputs_0 : tensor<?x?xi1>
}
return %0 : tensor<?x?xi1>
}
}
About this issue
- Original URL
- State: closed
- Created 3 years ago
- Comments: 15 (9 by maintainers)
what’s the actual failure? need a print-ir-after-all - I doubt there’s anything shape related here, but likely an unconverted op that then is keeping an unfolded dim around that is causing the error.