1// RUN: mlir-opt %s --transform-interpreter --split-input-file -resolve-shaped-type-result-dims -canonicalize | FileCheck %s 2 3// Demonstrates what happens when peeling the 4th loop (that corresponds to the 4// "depth" dimension in depthwise convs) followed by vectorization in the 5// presence of _scalable_ vectors (these are introduced through scalable 6// tiling). The main goal is to verify that canonicalizations fold away the 7// masks in the main loop. 8 9func.func @conv(%arg0: tensor<1x1080x1962x48xi32>, %arg1: tensor<1x43x48xi32>) -> tensor<1x1080x1920x48xi32> { 10// CHECK: #[[$MAP:.+]] = affine_map<()[s0] -> (-(48 mod s0) + 48)> 11 12// CHECK-LABEL: func.func @conv( 13// CHECK-DAG: %[[C_43:.*]] = arith.constant 43 : index 14// CHECK-DAG: %[[C_48:.*]] = arith.constant 48 : index 15// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index 16// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index 17// CHECK-DAG: %[[C4:.*]] = arith.constant 4 : index 18// CHECK: %[[VSCALE:.*]] = vector.vscale 19// CHECK: %[[VSCALE_X_4:.*]] = arith.muli %[[VSCALE]], %[[C4]] : index 20 21// Loop over the channel/depth dim - the main part after vectorisation (vectorized, no masking) 22// CHECK: %[[UB_DEPTH_LOOP:.*]] = affine.apply #[[$MAP]](){{\[}}%[[VSCALE_X_4]]] 23// CHECK-NEXT: %[[VAL_21:.*]] = scf.for {{.*}} to %[[UB_DEPTH_LOOP]] step %[[VSCALE_X_4]] 24// Loop over the Filter width dim 25// CHECK: scf.for %{{.*}} = %[[C0]] to %[[C_43]] step %[[C1]] {{.*}} -> (tensor<1x1x4x?xi32>) { 26// CHECK-NOT: vector.mask 27// CHECK: vector.broadcast {{.*}} : vector<[4]xi32> to vector<1x4x[4]xi32> 28// CHECK-NEXT: arith.muli {{.*}} : vector<1x4x[4]xi32> 29// CHECK-NEXT: arith.addi {{.*}} : vector<1x4x[4]xi32> 30// CHECK-NOT: vector.mask 31// CHECK: scf.yield {{.*}} : tensor<1x1x4x?xi32> 32// CHECK: } 33// CHECK: tensor.insert_slice {{.*}} tensor<1x1x4x?xi32> into tensor<1x1080x1920x48xi32> 34// CHECK: scf.yield {{.*}} : tensor<1x1080x1920x48xi32> 35 36// CHECK-NEXT: } 37 38// Loop over the channel/depth dim - the remainder part (not vectorized) 39// CHECK: scf.for {{.*}} to %[[C_48]] step %[[VSCALE_X_4]] 40// Loop over the Filter width dim 41// CHECK: scf.for %{{.*}} = %[[C0]] to %[[C_43]] step %[[C1]] {{.*}} -> (tensor<1x1x4x?xi32>) { 42// CHECK: linalg.depthwise_conv_1d_nwc_wc {{.*}} -> tensor<1x4x?xi32> 43// CHECK: scf.yield %{{.*}} : tensor<1x1x4x?xi32> 44// CHECK: } 45// CHECK: tensor.insert_slice {{.*}} tensor<1x1x4x?xi32> into tensor<1x1080x1920x48xi32> 46// CHECK-NEXT: scf.yield %{{.*}} : tensor<1x1080x1920x48xi32> 47// CHECK-NEXT: } 48 49 50 %0 = tensor.empty() : tensor<1x1080x1920x48xi32> 51 %c0_i32 = arith.constant 0 : i32 52 %1 = linalg.fill ins(%c0_i32 : i32) outs(%0 : tensor<1x1080x1920x48xi32>) -> tensor<1x1080x1920x48xi32> 53 %2 = linalg.depthwise_conv_2d_nhwc_hwc { 54 dilations = dense<1> : tensor<2xi64>, 55 strides = dense<1> : tensor<2xi64>} 56 ins(%arg0, %arg1 : tensor<1x1080x1962x48xi32>, tensor<1x43x48xi32>) outs(%1 : tensor<1x1080x1920x48xi32>) -> tensor<1x1080x1920x48xi32> 57 return %2 : tensor<1x1080x1920x48xi32> 58} 59 60module attributes {transform.with_named_sequence} { 61 transform.named_sequence @__transform_main(%root: !transform.any_op {transform.consume}) { 62 // 1. Tile parallel dims 63 %1 = transform.structured.match ops{["linalg.depthwise_conv_2d_nhwc_hwc"]} in %root : (!transform.any_op) -> !transform.any_op 64 %tiled_linalg_op_0, %loops_1:4 = transform.structured.tile_using_for %1 tile_sizes [1, 1, 4, [4], 0, 0] : (!transform.any_op) -> (!transform.any_op, !transform.op<"scf.for">, !transform.op<"scf.for">, !transform.op<"scf.for">, !transform.op<"scf.for">) 65 66 // 2. Tile reduction dims 67 %2 = transform.structured.match ops{["linalg.depthwise_conv_2d_nhwc_hwc"]} in %loops_1#3 : (!transform.op<"scf.for">) -> !transform.any_op 68 %tiled_linalg_op_1, %loops_2:2 = transform.structured.tile_using_for %2 tile_sizes [0, 0, 0, 0, 1, 1] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op) 69 70 // 3. Decompose 2D conv into 2 x 1D conv 71 %3 = transform.structured.match ops{["linalg.depthwise_conv_2d_nhwc_hwc"]} in %loops_1#3 : (!transform.op<"scf.for">) -> !transform.any_op 72 %4 = transform.structured.decompose %3 : (!transform.any_op) -> !transform.any_op 73 74 // 4. Apply loop peeling - only the 4th loop 75 %main_loop, %remainder_loop = transform.loop.peel %loops_1#3 : (!transform.op<"scf.for">) -> (!transform.op<"scf.for">, !transform.op<"scf.for">) 76 %5 = transform.structured.match ops{["linalg.depthwise_conv_1d_nwc_wc"]} in %main_loop : (!transform.op<"scf.for">) -> !transform.any_op 77 78 // 5. Vectorize, but only the main loop 79 transform.structured.vectorize %5 vector_sizes [2, 4, [4], 16] : !transform.any_op 80 81 transform.yield 82 } 83} 84