From ec314b7c83a9f83fbbb7edfda9124ade59a94d25 Mon Sep 17 00:00:00 2001 From: Ferdinand Lemaire Date: Wed, 2 Oct 2024 21:54:05 +0200 Subject: [PATCH] Bump llvm to 29b92d07746fac26cd64c914bc9c5c3833974f6d (#2962) * bump llvm to 29b92d07746fac26cd64c914bc9c5c3833974f6d Signed-off-by: Ferdinand Lemaire * Revert changes on refs Signed-off-by: Ferdinand Lemaire --------- Signed-off-by: Ferdinand Lemaire Co-authored-by: hamptonm1 <79232909+hamptonm1@users.noreply.github.com> --- docs/BuildOnLinuxOSX.md | 2 +- docs/BuildOnWindows.md | 2 +- .../onnx_to_krnl/ControlFlow/Loop.mlir | 31 ++++---- .../Normalization_O3_SIMD_canonicalize.mlir | 4 +- .../onnx_lowering_depth_to_space_op.mlir | 75 +++++++++---------- .../Tensor/onnx_lowering_space_to_depth.mlir | 72 +++++++++--------- third_party/stablehlo | 2 +- utils/clone-mlir.sh | 2 +- 8 files changed, 93 insertions(+), 97 deletions(-) diff --git a/docs/BuildOnLinuxOSX.md b/docs/BuildOnLinuxOSX.md index c0571a37fc..fd93b7e69b 100644 --- a/docs/BuildOnLinuxOSX.md +++ b/docs/BuildOnLinuxOSX.md @@ -15,7 +15,7 @@ Firstly, install MLIR (as a part of LLVM-Project): ``` bash git clone -n https://github.com/llvm/llvm-project.git # Check out a specific branch that is known to work with ONNX-MLIR. -cd llvm-project && git checkout eaa95a1c2bd38332c1a4e634595f29d22b28ffea && cd .. +cd llvm-project && git checkout 29b92d07746fac26cd64c914bc9c5c3833974f6d && cd .. ``` [same-as-file]: <> (utils/build-mlir.sh) diff --git a/docs/BuildOnWindows.md b/docs/BuildOnWindows.md index ad7283a53c..7a891c62ef 100644 --- a/docs/BuildOnWindows.md +++ b/docs/BuildOnWindows.md @@ -52,7 +52,7 @@ Install MLIR (as a part of LLVM-Project): ```shell git clone -n https://github.com/llvm/llvm-project.git # Check out a specific branch that is known to work with ONNX-MLIR. -cd llvm-project && git checkout eaa95a1c2bd38332c1a4e634595f29d22b28ffea && cd .. +cd llvm-project && git checkout 29b92d07746fac26cd64c914bc9c5c3833974f6d && cd .. ``` [same-as-file]: <> (utils/build-mlir.cmd) diff --git a/test/mlir/conversion/onnx_to_krnl/ControlFlow/Loop.mlir b/test/mlir/conversion/onnx_to_krnl/ControlFlow/Loop.mlir index 49ed94b610..838e3a4bca 100644 --- a/test/mlir/conversion/onnx_to_krnl/ControlFlow/Loop.mlir +++ b/test/mlir/conversion/onnx_to_krnl/ControlFlow/Loop.mlir @@ -41,19 +41,19 @@ func.func private @test_loop_simple_main_graph(%arg0: tensor, %arg1: tensor // CHECK-DAG: [[CST_1_2_:%.+]] = arith.constant 1 : index // CHECK-DAG: [[CST_1_3_:%.+]] = arith.constant 1 : index // CHECK-DAG: [[RES_3_:%.+]] = memref.alloc() {{.*}}: memref<1xi64> +// CHECK-NOT: separator of consecutive DAGs +// CHECK-DAG: [[VAR_8_:%.+]] = builtin.unrealized_conversion_cast [[RES_3_]] : memref<1xi64> to tensor<1xi64> // CHECK-DAG: [[CST_0_2_:%.+]] = arith.constant 0 : index // CHECK-NOT: separator of consecutive DAGs // CHECK-DAG: [[LOAD_RES_MEM_:%.+]] = krnl.load [[RES_]]{{.}}[[CST_0_2_]]{{.}} : memref<1xi64> // CHECK-DAG: [[LOAD_RES_2_MEM_:%.+]] = krnl.load [[RES_2_]][] : memref // CHECK-NOT: separator of consecutive DAGs -// CHECK-DAG: [[VAR_10_:%.+]] = arith.addi [[LOAD_RES_MEM_]], [[LOAD_RES_2_MEM_]] : i64 +// CHECK-DAG: [[VAR_11_:%.+]] = arith.addi [[LOAD_RES_MEM_]], [[LOAD_RES_2_MEM_]] : i64 // CHECK-DAG: [[CST_0_3_:%.+]] = arith.constant 0 : index -// CHECK: krnl.store [[VAR_10_]], [[RES_3_]]{{.}}[[CST_0_3_]]{{.}} : memref<1xi64> -// CHECK-DAG: [[VAR_11_:%.+]] = builtin.unrealized_conversion_cast [[RES_3_]] : memref<1xi64> to tensor<1xi64> +// CHECK: krnl.store [[VAR_11_]], [[RES_3_]]{{.}}[[CST_0_3_]]{{.}} : memref<1xi64> // CHECK-DAG: [[VAR_12_:%.+]] = builtin.unrealized_conversion_cast [[PARAM_1_]] : memref to memref -// CHECK-NOT: separator of consecutive DAGs -// CHECK-DAG: [[VAR_13_:%.+]] = builtin.unrealized_conversion_cast [[VAR_11_]] : tensor<1xi64> to memref<1xi64> -// CHECK-DAG: [[LOAD_VAR_12_MEM_:%.+]] = krnl.load [[VAR_12_]][] : memref +// CHECK-DAG: [[VAR_13_:%.+]] = builtin.unrealized_conversion_cast [[VAR_8_]] : tensor<1xi64> to memref<1xi64> +// CHECK: [[LOAD_VAR_12_MEM_:%.+]] = krnl.load [[VAR_12_]][] : memref // CHECK: krnl.store [[LOAD_VAR_12_MEM_]], [[RES_1_]][] : memref // CHECK-DAG: [[LOOP_2_:%.+]] = krnl.define_loops 1 // CHECK-DAG: [[CST_0_4_:%.+]] = arith.constant 0 : index @@ -111,8 +111,10 @@ func.func @test_loop(%arg0: tensor, %arg1: tensor, %arg2: tensor // CHECK-DAG: [[VAR_dim_7_:%.+]] = memref.dim [[PARAM_2_]], [[CST_0_1_]] : memref // CHECK-DAG: [[CST_0_2_:%.+]] = arith.constant 0 : index // CHECK: [[VAR_dim_9_:%.+]] = memref.dim [[PARAM_2_]], [[CST_0_2_]] : memref -// CHECK: [[VAR_11_:%.+]] = affine.max [[MAP_0_]]([[VAR_dim_7_]], [[VAR_dim_9_]]) -// CHECK-DAG: [[RES_3_:%.+]] = memref.alloc([[VAR_11_]]) {{.*}}: memref +// CHECK-DAG: [[VAR_11_:%.+]] = affine.max [[MAP_0_]]([[VAR_dim_7_]], [[VAR_dim_9_]]) +// CHECK-DAG: [[CST_1_1_:%.+]] = arith.constant 1 : index +// CHECK: [[RES_3_:%.+]] = memref.alloc([[VAR_11_]]) {{.*}}: memref +// CHECK-DAG: [[VAR_12_:%.+]] = builtin.unrealized_conversion_cast [[RES_3_]] : memref to tensor // CHECK-DAG: [[LOOP_1_:%.+]] = krnl.define_loops 1 // CHECK-DAG: [[CST_0_3_:%.+]] = arith.constant 0 : index // CHECK-DAG: [[CST_0_4_:%.+]] = arith.constant 0 : index @@ -123,11 +125,9 @@ func.func @test_loop(%arg0: tensor, %arg1: tensor, %arg2: tensor // CHECK: [[VAR_20_:%.+]] = arith.addf [[LOAD_PARAM_2_MEM_]], [[LOAD_PARAM_2_MEM_1_]] : f32 // CHECK: krnl.store [[VAR_20_]], [[RES_3_]]{{.}}[[VAR_17_]]{{.}} : memref // CHECK: } -// CHECK-DAG: [[VAR_13_:%.+]] = builtin.unrealized_conversion_cast [[RES_3_]] : memref to tensor // CHECK-DAG: [[VAR_14_:%.+]] = builtin.unrealized_conversion_cast [[PARAM_1_]] : memref to memref -// CHECK-NOT: separator of consecutive DAGs -// CHECK-DAG: [[VAR_15_:%.+]] = builtin.unrealized_conversion_cast [[VAR_13_]] : tensor to memref -// CHECK-DAG: [[LOAD_VAR_14_MEM_:%.+]] = krnl.load [[VAR_14_]][] : memref +// CHECK-DAG: [[VAR_15_:%.+]] = builtin.unrealized_conversion_cast [[VAR_12_]] : tensor to memref +// CHECK: [[LOAD_VAR_14_MEM_:%.+]] = krnl.load [[VAR_14_]][] : memref // CHECK: krnl.store [[LOAD_VAR_14_MEM_]], [[RES_1_]][] : memref // CHECK: "krnl.seqstore"([[VAR_15_]], [[RES_]], [[VAR_8_]]) : (memref, memref>, index) -> () // CHECK: }) : () -> () @@ -150,11 +150,10 @@ func.func @test_loop(%arg0: tensor, %arg1: tensor, %arg2: tensor // CHECK: [[VAR_dim_7_1_:%.+]] = memref.dim [[LOAD_RES_1_MEM_1_]], [[CST_0_9_]] : memref // CHECK: krnl.iterate([[LOOP_3_]]) with ([[LOOP_3_]] -> [[I_3_:%.+]] = 0 to [[MAP_2_]]([[VAR_dim_7_1_]])){ // CHECK: [[VAR_11_1_:%.+]] = krnl.get_induction_var_value([[LOOP_3_]]) : (!krnl.loop) -> index -// CHECK: [[LOOP_1_:%.+]] = krnl.load [[LOAD_RES_1_MEM_1_]]{{.}}[[VAR_11_1_]]{{.}} : memref -// CHECK: krnl.store [[LOOP_1_]], [[RES_4_]]{{.}}[[VAR_8_1_]], [[VAR_11_1_]]{{.}} : memref +// CHECK: [[VAR_12_1_:%.+]] = krnl.load [[LOAD_RES_1_MEM_1_]]{{.}}[[VAR_11_1_]]{{.}} : memref +// CHECK: krnl.store [[VAR_12_1_]], [[RES_4_]]{{.}}[[VAR_8_1_]], [[VAR_11_1_]]{{.}} : memref // CHECK: } // CHECK: }) : () -> () // CHECK: } // CHECK: return [[RES_4_]] : memref -// CHECK: } -} +} \ No newline at end of file diff --git a/test/mlir/conversion/onnx_to_krnl/NN/Normalization_O3_SIMD_canonicalize.mlir b/test/mlir/conversion/onnx_to_krnl/NN/Normalization_O3_SIMD_canonicalize.mlir index 3a5354b54d..b7a53ef4d7 100644 --- a/test/mlir/conversion/onnx_to_krnl/NN/Normalization_O3_SIMD_canonicalize.mlir +++ b/test/mlir/conversion/onnx_to_krnl/NN/Normalization_O3_SIMD_canonicalize.mlir @@ -433,6 +433,7 @@ func.func @layernorm_4D_with_scale_bias_no_SIMD(%arg0: tensor<2x64x31x3xf32>, %a // CHECK: } // CHECK: } // CHECK-DAG: [[RES_41_:%.+]] = memref.alloc() {{.*}}: memref<2x64x31x3xf32> +// CHECK-DAG: [[VAR_6_:%.+]] = builtin.unrealized_conversion_cast [[RES_41_]] : memref<2x64x31x3xf32> to tensor<2x64x31x3xf32> // CHECK-DAG: [[RES_42_:%.+]] = memref.alloc() {{.*}}: memref<3xindex> // CHECK: affine.store [[CST_2_]], [[RES_42_]][0] : memref<3xindex> // CHECK: affine.store [[CST_64_]], [[RES_42_]][1] : memref<3xindex> @@ -467,8 +468,7 @@ func.func @layernorm_4D_with_scale_bias_no_SIMD(%arg0: tensor<2x64x31x3xf32>, %a // CHECK: krnl.store [[LOAD_VAR_reshape_MEM_6_1_1_1_1_]], [[VAR_reshape_75_]]{{.}}[[VAR_8_5_]]#0, [[VAR_8_5_]]#1, [[VAR_11_12_]]{{.}} : memref<2x64x93xf32> // CHECK: } // CHECK: } -// CHECK: [[VAR_7_:%.+]] = builtin.unrealized_conversion_cast [[RES_41_]] : memref<2x64x31x3xf32> to tensor<2x64x31x3xf32> -// CHECK: onnx.Return [[VAR_7_]] : tensor<2x64x31x3xf32> +// CHECK: onnx.Return [[VAR_6_]] : tensor<2x64x31x3xf32> // CHECK: } } diff --git a/test/mlir/conversion/onnx_to_krnl/Tensor/onnx_lowering_depth_to_space_op.mlir b/test/mlir/conversion/onnx_to_krnl/Tensor/onnx_lowering_depth_to_space_op.mlir index a3abd4a8b2..d3eaec08ff 100644 --- a/test/mlir/conversion/onnx_to_krnl/Tensor/onnx_lowering_depth_to_space_op.mlir +++ b/test/mlir/conversion/onnx_to_krnl/Tensor/onnx_lowering_depth_to_space_op.mlir @@ -7,47 +7,46 @@ func.func private @test_depth_to_space_dynamic_dims(%arg0 : tensor<1x?x8x?xf32>) %0 = "onnx.DepthToSpace"(%arg0) {blocksize = 4 : si64} : (tensor<1x?x8x?xf32>) -> tensor<1x?x32x?xf32> "func.return"(%0) : (tensor<1x?x32x?xf32>) -> () -// CHECK-DAG: [[MAP_1_:#.+]] = affine_map<()[s0] -> (s0 floordiv 16)> -// CHECK-DAG: [[MAP_2_:#.+]] = affine_map<()[s0] -> (s0 * 4)> -// CHECK-LABEL: func private @test_depth_to_space_dynamic_dims +// CHECK-DAG: [[MAP_0_:#.+]] = affine_map<()[s0] -> (s0 floordiv 16)> +// CHECK-DAG: [[MAP_1_:#.+]] = affine_map<()[s0] -> (s0 * 4)> +// CHECK-LABEL: func.func private @test_depth_to_space_dynamic_dims // CHECK-SAME: ([[PARAM_0_:%.+]]: memref<1x?x8x?xf32>) -> memref<1x?x32x?xf32> { -// CHECK-DAG: [[VAR_c3_:%.+]] = arith.constant 3 : index -// CHECK-DAG: [[VAR_c2_:%.+]] = arith.constant 2 : index -// CHECK-DAG: [[VAR_c1_:%.+]] = arith.constant 1 : index -// CHECK-DAG: [[VAR_c0_:%.+]] = arith.constant 0 : index -// CHECK-DAG: [[VAR_c32_:%.+]] = arith.constant 32 : index -// CHECK-DAG: [[VAR_c5_:%.+]] = arith.constant 5 : index -// CHECK-DAG: [[VAR_c4_:%.+]] = arith.constant 4 : index -// CHECK-DAG: [[VAR_c8_:%.+]] = arith.constant 8 : index -// CHECK-DAG: [[VAR_5_:%.+]] = builtin.unrealized_conversion_cast [[PARAM_0_]] : memref<1x?x8x?xf32> to tensor<1x?x8x?xf32> +// CHECK-DAG: [[CST_5_:%.+]] = arith.constant 5 : index +// CHECK-DAG: [[CST_2_:%.+]] = arith.constant 2 : index +// CHECK-DAG: [[CST_0_:%.+]] = arith.constant 0 : index +// CHECK-DAG: [[CST_32_:%.+]] = arith.constant 32 : index +// CHECK-DAG: [[CST_4_:%.+]] = arith.constant 4 : index +// CHECK-DAG: [[CST_3_:%.+]] = arith.constant 3 : index +// CHECK-DAG: [[CST_8_:%.+]] = arith.constant 8 : index +// CHECK-DAG: [[CST_1_:%.+]] = arith.constant 1 : index // CHECK-NOT: separator of consecutive DAGs -// CHECK-DAG: [[VAR_0_:%.+]] = memref.dim [[PARAM_0_]], [[VAR_c1_]] : memref<1x?x8x?xf32> -// CHECK-DAG: [[VAR_1_:%.+]] = memref.dim [[PARAM_0_]], [[VAR_c3_]] : memref<1x?x8x?xf32> +// CHECK-DAG: [[VAR_dim_:%.+]] = memref.dim [[PARAM_0_]], [[CST_1_]] : memref<1x?x8x?xf32> +// CHECK-DAG: [[VAR_dim_0_:%.+]] = memref.dim [[PARAM_0_]], [[CST_3_]] : memref<1x?x8x?xf32> // CHECK-NOT: separator of consecutive DAGs -// CHECK-DAG: [[VAR_2_:%.+]] = affine.apply [[MAP_1_]](){{.}}[[VAR_0_]]{{.}} -// CHECK-DAG: [[VAR_3_:%.+]] = affine.apply [[MAP_2_]](){{.}}[[VAR_1_]]{{.}} +// CHECK-DAG: [[VAR_0_:%.+]] = affine.apply [[MAP_0_]](){{.}}[[VAR_dim_]]{{.}} +// CHECK-DAG: [[VAR_1_:%.+]] = affine.apply [[MAP_1_]](){{.}}[[VAR_dim_0_]]{{.}} // CHECK-DAG: [[RES_:%.+]] = memref.alloc() {{.*}}: memref<6xindex> -// CHECK: krnl.store [[VAR_c1_]], [[RES_]]{{.}}[[VAR_c0_]]{{.}} : memref<6xindex> -// CHECK: krnl.store [[VAR_c4_]], [[RES_]]{{.}}[[VAR_c1_]]{{.}} : memref<6xindex> -// CHECK: krnl.store [[VAR_c4_]], [[RES_]]{{.}}[[VAR_c2_]]{{.}} : memref<6xindex> -// CHECK: krnl.store [[VAR_2_]], [[RES_]]{{.}}[[VAR_c3_]]{{.}} : memref<6xindex> -// CHECK: krnl.store [[VAR_c8_]], [[RES_]]{{.}}[[VAR_c4_]]{{.}} : memref<6xindex> -// CHECK: krnl.store [[VAR_1_]], [[RES_]]{{.}}[[VAR_c5_]]{{.}} : memref<6xindex> -// CHECK-DAG: [[VAR_6_:%.+]] = builtin.unrealized_conversion_cast [[RES_]] : memref<6xindex> to tensor<6xi64> -// CHECK: [[VAR_7_:%.+]] = "onnx.Reshape"([[VAR_5_]], [[VAR_6_]]) {allowzero = 0 : si64} : (tensor<1x?x8x?xf32>, tensor<6xi64>) -> tensor -// CHECK: [[VAR_8_:%.+]] = builtin.unrealized_conversion_cast [[VAR_7_]] : tensor to memref -// CHECK: [[VAR_9_:%.+]] = memref.cast [[VAR_8_]] : memref to memref<1x4x4x?x8x?xf32> -// CHECK: [[VAR_10_:%.+]] = builtin.unrealized_conversion_cast [[VAR_9_]] : memref<1x4x4x?x8x?xf32> to tensor<1x4x4x?x8x?xf32> -// CHECK-DAG: [[VAR_11_:%.+]] = "onnx.Transpose"([[VAR_10_]]) {perm = [0, 3, 4, 1, 5, 2]} : (tensor<1x4x4x?x8x?xf32>) -> tensor<1x?x8x4x?x4xf32> +// CHECK: krnl.store [[CST_1_]], [[RES_]]{{.}}[[CST_0_]]{{.}} : memref<6xindex> +// CHECK: krnl.store [[CST_4_]], [[RES_]]{{.}}[[CST_1_]]{{.}} : memref<6xindex> +// CHECK: krnl.store [[CST_4_]], [[RES_]]{{.}}[[CST_2_]]{{.}} : memref<6xindex> +// CHECK: krnl.store [[VAR_0_]], [[RES_]]{{.}}[[CST_3_]]{{.}} : memref<6xindex> +// CHECK: krnl.store [[CST_8_]], [[RES_]]{{.}}[[CST_4_]]{{.}} : memref<6xindex> +// CHECK: krnl.store [[VAR_dim_0_]], [[RES_]]{{.}}[[CST_5_]]{{.}} : memref<6xindex> +// CHECK-DAG: [[VAR_2_:%.+]] = builtin.unrealized_conversion_cast [[RES_]] : memref<6xindex> to tensor<6xi64> +// CHECK-DAG: [[VAR_3_:%.+]] = builtin.unrealized_conversion_cast [[PARAM_0_]] : memref<1x?x8x?xf32> to tensor<1x?x8x?xf32> +// CHECK: [[VAR_4_:%.+]] = "onnx.Reshape"([[VAR_3_]], [[VAR_2_]]) {allowzero = 0 : si64} : (tensor<1x?x8x?xf32>, tensor<6xi64>) -> tensor +// CHECK: [[VAR_5_:%.+]] = builtin.unrealized_conversion_cast [[VAR_4_]] : tensor to memref +// CHECK: [[VAR_cast_:%.+]] = memref.cast [[VAR_5_]] : memref to memref<1x4x4x?x8x?xf32> +// CHECK: [[VAR_6_:%.+]] = builtin.unrealized_conversion_cast [[VAR_cast_]] : memref<1x4x4x?x8x?xf32> to tensor<1x4x4x?x8x?xf32> +// CHECK-DAG: [[VAR_7_:%.+]] = "onnx.Transpose"([[VAR_6_]]) {perm = [0, 3, 4, 1, 5, 2]} : (tensor<1x4x4x?x8x?xf32>) -> tensor<1x?x8x4x?x4xf32> // CHECK-DAG: [[RES_1_:%.+]] = memref.alloc() {{.*}}: memref<4xindex> -// CHECK: krnl.store [[VAR_c1_]], [[RES_1_]]{{.}}[[VAR_c0_]]{{.}} : memref<4xindex> -// CHECK: krnl.store [[VAR_2_]], [[RES_1_]]{{.}}[[VAR_c1_]]{{.}} : memref<4xindex> -// CHECK: krnl.store [[VAR_c32_]], [[RES_1_]]{{.}}[[VAR_c2_]]{{.}} : memref<4xindex> -// CHECK: krnl.store [[VAR_3_]], [[RES_1_]]{{.}}[[VAR_c3_]]{{.}} : memref<4xindex> -// CHECK: [[VAR_13_:%.+]] = builtin.unrealized_conversion_cast [[RES_1_]] : memref<4xindex> to tensor<4xi64> -// CHECK: [[VAR_14_:%.+]] = "onnx.Reshape"([[VAR_11_]], [[VAR_13_]]) {allowzero = 0 : si64} : (tensor<1x?x8x4x?x4xf32>, tensor<4xi64>) -> tensor -// CHECK: [[VAR_15_:%.+]] = builtin.unrealized_conversion_cast [[VAR_14_]] : tensor to memref -// CHECK: [[VAR_16_:%.+]] = memref.cast [[VAR_15_]] : memref to memref<1x?x32x?xf32> -// CHECK: return [[VAR_16_]] : memref<1x?x32x?xf32> -// CHECK: } +// CHECK: krnl.store [[CST_1_]], [[RES_1_]]{{.}}[[CST_0_]]{{.}} : memref<4xindex> +// CHECK: krnl.store [[VAR_0_]], [[RES_1_]]{{.}}[[CST_1_]]{{.}} : memref<4xindex> +// CHECK: krnl.store [[CST_32_]], [[RES_1_]]{{.}}[[CST_2_]]{{.}} : memref<4xindex> +// CHECK: krnl.store [[VAR_1_]], [[RES_1_]]{{.}}[[CST_3_]]{{.}} : memref<4xindex> +// CHECK: [[VAR_8_:%.+]] = builtin.unrealized_conversion_cast [[RES_1_]] : memref<4xindex> to tensor<4xi64> +// CHECK: [[VAR_9_:%.+]] = "onnx.Reshape"([[VAR_7_]], [[VAR_8_]]) {allowzero = 0 : si64} : (tensor<1x?x8x4x?x4xf32>, tensor<4xi64>) -> tensor +// CHECK: [[VAR_10_:%.+]] = builtin.unrealized_conversion_cast [[VAR_9_]] : tensor to memref +// CHECK: [[VAR_cast_2_:%.+]] = memref.cast [[VAR_10_]] : memref to memref<1x?x32x?xf32> +// CHECK: return [[VAR_cast_2_]] : memref<1x?x32x?xf32> } diff --git a/test/mlir/conversion/onnx_to_krnl/Tensor/onnx_lowering_space_to_depth.mlir b/test/mlir/conversion/onnx_to_krnl/Tensor/onnx_lowering_space_to_depth.mlir index 2cf2526c1d..ec9dbfecc8 100644 --- a/test/mlir/conversion/onnx_to_krnl/Tensor/onnx_lowering_space_to_depth.mlir +++ b/test/mlir/conversion/onnx_to_krnl/Tensor/onnx_lowering_space_to_depth.mlir @@ -7,46 +7,44 @@ func.func private @test_space_to_depth_dynamic_dims(%arg0 : tensor<1x?x8x?xf32>) %0 = "onnx.SpaceToDepth"(%arg0) {blocksize = 4 : si64} : (tensor<1x?x8x?xf32>) -> tensor<1x?x2x?xf32> "func.return"(%0) : (tensor<1x?x2x?xf32>) -> () -// CHECK-DAG: [[MAP_0_:#.+]] = affine_map<()[s0] -> (s0 * 16)> -// CHECK-DAG: [[MAP_1_:#.+]] = affine_map<()[s0] -> (s0 floordiv 4)> -// CHECK-LABEL: func private @test_space_to_depth_dynamic_dims +// CHECK-DAG: [[MAP_0_:#.+]] = affine_map<()[s0] -> (s0 * 16)> +// CHECK-DAG: [[MAP_1_:#.+]] = affine_map<()[s0] -> (s0 floordiv 4)> +// CHECK-LABEL: func.func private @test_space_to_depth_dynamic_dims // CHECK-SAME: ([[PARAM_0_:%.+]]: memref<1x?x8x?xf32>) -> memref<1x?x2x?xf32> { -// CHECK-DAG: [[VAR_c3_:%.+]] = arith.constant 3 : index -// CHECK-DAG: [[VAR_c2_:%.+]] = arith.constant 2 : index -// CHECK-DAG: [[VAR_c1_:%.+]] = arith.constant 1 : index -// CHECK-DAG: [[VAR_c0_:%.+]] = arith.constant 0 : index -// CHECK-DAG: [[VAR_c5_:%.+]] = arith.constant 5 : index -// CHECK-DAG: [[VAR_c4_:%.+]] = arith.constant 4 : index -// CHECK-DAG: [[VAR_5_:%.+]] = builtin.unrealized_conversion_cast [[PARAM_0_]] : memref<1x?x8x?xf32> to tensor<1x?x8x?xf32> +// CHECK-DAG: [[CST_5_:%.+]] = arith.constant 5 : index +// CHECK-DAG: [[CST_0_:%.+]] = arith.constant 0 : index +// CHECK-DAG: [[CST_2_:%.+]] = arith.constant 2 : index +// CHECK-DAG: [[CST_4_:%.+]] = arith.constant 4 : index +// CHECK-DAG: [[CST_3_:%.+]] = arith.constant 3 : index +// CHECK-DAG: [[CST_1_:%.+]] = arith.constant 1 : index // CHECK-NOT: separator of consecutive DAGs -// CHECK-DAG: [[VAR_0_:%.+]] = memref.dim [[PARAM_0_]], [[VAR_c1_]] : memref<1x?x8x?xf32> -// CHECK-DAG: [[VAR_1_:%.+]] = memref.dim [[PARAM_0_]], [[VAR_c3_]] : memref<1x?x8x?xf32> +// CHECK-DAG: [[VAR_dim_:%.+]] = memref.dim [[PARAM_0_]], [[CST_1_]] : memref<1x?x8x?xf32> +// CHECK-DAG: [[VAR_dim_0_:%.+]] = memref.dim [[PARAM_0_]], [[CST_3_]] : memref<1x?x8x?xf32> // CHECK-NOT: separator of consecutive DAGs -// CHECK-DAG: [[VAR_2_:%.+]] = affine.apply [[MAP_0_]](){{.}}[[VAR_0_]]{{.}} -// CHECK-DAG: [[VAR_3_:%.+]] = affine.apply [[MAP_1_]](){{.}}[[VAR_1_]]{{.}} +// CHECK-DAG: [[VAR_0_:%.+]] = affine.apply [[MAP_0_]](){{.}}[[VAR_dim_]]{{.}} +// CHECK-DAG: [[VAR_1_:%.+]] = affine.apply [[MAP_1_]](){{.}}[[VAR_dim_0_]]{{.}} // CHECK-DAG: [[RES_:%.+]] = memref.alloc() {{.*}}: memref<6xindex> -// CHECK: krnl.store [[VAR_c1_]], [[RES_]]{{.}}[[VAR_c0_]]{{.}} : memref<6xindex> -// CHECK: krnl.store [[VAR_0_]], [[RES_]]{{.}}[[VAR_c1_]]{{.}} : memref<6xindex> -// CHECK: krnl.store [[VAR_c2_]], [[RES_]]{{.}}[[VAR_c2_]]{{.}} : memref<6xindex> -// CHECK: krnl.store [[VAR_c4_]], [[RES_]]{{.}}[[VAR_c3_]]{{.}} : memref<6xindex> -// CHECK: krnl.store [[VAR_3_]], [[RES_]]{{.}}[[VAR_c4_]]{{.}} : memref<6xindex> -// CHECK: krnl.store [[VAR_c4_]], [[RES_]]{{.}}[[VAR_c5_]]{{.}} : memref<6xindex> -// CHECK: [[VAR_6_:%.+]] = builtin.unrealized_conversion_cast [[RES_]] : memref<6xindex> to tensor<6xi64> -// CHECK: [[VAR_7_:%.+]] = "onnx.Reshape"([[VAR_5_]], [[VAR_6_]]) {allowzero = 0 : si64} : (tensor<1x?x8x?xf32>, tensor<6xi64>) -> tensor -// CHECK: [[VAR_8_:%.+]] = builtin.unrealized_conversion_cast [[VAR_7_]] : tensor to memref -// CHECK: [[VAR_9_:%.+]] = memref.cast [[VAR_8_]] : memref to memref<1x?x2x4x?x4xf32> -// CHECK: [[VAR_10_:%.+]] = builtin.unrealized_conversion_cast [[VAR_9_]] : memref<1x?x2x4x?x4xf32> to tensor<1x?x2x4x?x4xf32> -// CHECK-DAG: [[VAR_11_:%.+]] = "onnx.Transpose"([[VAR_10_]]) {perm = [0, 1, 3, 5, 2, 4]} : (tensor<1x?x2x4x?x4xf32>) -> tensor<1x?x4x4x2x?xf32> +// CHECK: krnl.store [[CST_1_]], [[RES_]]{{.}}[[CST_0_]]{{.}} : memref<6xindex> +// CHECK: krnl.store [[VAR_dim_]], [[RES_]]{{.}}[[CST_1_]]{{.}} : memref<6xindex> +// CHECK: krnl.store [[CST_2_]], [[RES_]]{{.}}[[CST_2_]]{{.}} : memref<6xindex> +// CHECK: krnl.store [[CST_4_]], [[RES_]]{{.}}[[CST_3_]]{{.}} : memref<6xindex> +// CHECK: krnl.store [[VAR_1_]], [[RES_]]{{.}}[[CST_4_]]{{.}} : memref<6xindex> +// CHECK: krnl.store [[CST_4_]], [[RES_]]{{.}}[[CST_5_]]{{.}} : memref<6xindex> +// CHECK-DAG: [[VAR_2_:%.+]] = builtin.unrealized_conversion_cast [[RES_]] : memref<6xindex> to tensor<6xi64> +// CHECK-DAG: [[VAR_3_:%.+]] = builtin.unrealized_conversion_cast [[PARAM_0_]] : memref<1x?x8x?xf32> to tensor<1x?x8x?xf32> +// CHECK: [[VAR_4_:%.+]] = "onnx.Reshape"([[VAR_3_]], [[VAR_2_]]) {allowzero = 0 : si64} : (tensor<1x?x8x?xf32>, tensor<6xi64>) -> tensor +// CHECK: [[VAR_5_:%.+]] = builtin.unrealized_conversion_cast [[VAR_4_]] : tensor to memref +// CHECK: [[VAR_cast_:%.+]] = memref.cast [[VAR_5_]] : memref to memref<1x?x2x4x?x4xf32> +// CHECK: [[VAR_6_:%.+]] = builtin.unrealized_conversion_cast [[VAR_cast_]] : memref<1x?x2x4x?x4xf32> to tensor<1x?x2x4x?x4xf32> +// CHECK-DAG: [[VAR_7_:%.+]] = "onnx.Transpose"([[VAR_6_]]) {perm = [0, 1, 3, 5, 2, 4]} : (tensor<1x?x2x4x?x4xf32>) -> tensor<1x?x4x4x2x?xf32> // CHECK-DAG: [[RES_1_:%.+]] = memref.alloc() {{.*}}: memref<4xindex> -// CHECK: krnl.store [[VAR_c1_]], [[RES_1_]]{{.}}[[VAR_c0_]]{{.}} : memref<4xindex> -// CHECK: krnl.store [[VAR_2_]], [[RES_1_]]{{.}}[[VAR_c1_]]{{.}} : memref<4xindex> -// CHECK: krnl.store [[VAR_c2_]], [[RES_1_]]{{.}}[[VAR_c2_]]{{.}} : memref<4xindex> -// CHECK: krnl.store [[VAR_3_]], [[RES_1_]]{{.}}[[VAR_c3_]]{{.}} : memref<4xindex> -// CHECK: [[VAR_13_:%.+]] = builtin.unrealized_conversion_cast [[RES_1_]] : memref<4xindex> to tensor<4xi64> -// CHECK: [[VAR_14_:%.+]] = "onnx.Reshape"([[VAR_11_]], [[VAR_13_]]) {allowzero = 0 : si64} : (tensor<1x?x4x4x2x?xf32>, tensor<4xi64>) -> tensor -// CHECK: [[VAR_15_:%.+]] = builtin.unrealized_conversion_cast [[VAR_14_]] : tensor to memref -// CHECK: [[VAR_16_:%.+]] = memref.cast [[VAR_15_]] : memref to memref<1x?x2x?xf32> -// CHECK: return [[VAR_16_]] : memref<1x?x2x?xf32> -// CHECK: } - +// CHECK: krnl.store [[CST_1_]], [[RES_1_]]{{.}}[[CST_0_]]{{.}} : memref<4xindex> +// CHECK: krnl.store [[VAR_0_]], [[RES_1_]]{{.}}[[CST_1_]]{{.}} : memref<4xindex> +// CHECK: krnl.store [[CST_2_]], [[RES_1_]]{{.}}[[CST_2_]]{{.}} : memref<4xindex> +// CHECK: krnl.store [[VAR_1_]], [[RES_1_]]{{.}}[[CST_3_]]{{.}} : memref<4xindex> +// CHECK: [[VAR_8_:%.+]] = builtin.unrealized_conversion_cast [[RES_1_]] : memref<4xindex> to tensor<4xi64> +// CHECK: [[VAR_9_:%.+]] = "onnx.Reshape"([[VAR_7_]], [[VAR_8_]]) {allowzero = 0 : si64} : (tensor<1x?x4x4x2x?xf32>, tensor<4xi64>) -> tensor +// CHECK: [[VAR_10_:%.+]] = builtin.unrealized_conversion_cast [[VAR_9_]] : tensor to memref +// CHECK: [[VAR_cast_2_:%.+]] = memref.cast [[VAR_10_]] : memref to memref<1x?x2x?xf32> +// CHECK: return [[VAR_cast_2_]] : memref<1x?x2x?xf32> } diff --git a/third_party/stablehlo b/third_party/stablehlo index e51fd95e5b..9d9290dc23 160000 --- a/third_party/stablehlo +++ b/third_party/stablehlo @@ -1 +1 @@ -Subproject commit e51fd95e5b2c28861f22dc9d609fb2a7f002124e +Subproject commit 9d9290dc2308c1850cea69ea05f8c94017e484ee diff --git a/utils/clone-mlir.sh b/utils/clone-mlir.sh index b01bbf0b1f..6bc5bdcb8a 100644 --- a/utils/clone-mlir.sh +++ b/utils/clone-mlir.sh @@ -1,3 +1,3 @@ git clone -n https://github.com/llvm/llvm-project.git # Check out a specific branch that is known to work with ONNX-MLIR. -cd llvm-project && git checkout eaa95a1c2bd38332c1a4e634595f29d22b28ffea && cd .. +cd llvm-project && git checkout 29b92d07746fac26cd64c914bc9c5c3833974f6d && cd ..