1// RUN: mlir-opt %s -scf-for-loop-canonicalization | FileCheck %s 2 3func.func @reduce() { 4 // CHECK: %[[C64:.*]] = arith.constant 64 : index 5 %c2 = arith.constant 2 : index 6 %cst_0 = arith.constant -0.000000e+00 : f32 7 %0 = memref.alloc() : memref<128x384xf32> 8 linalg.fill ins(%cst_0 : f32) outs(%0 : memref<128x384xf32>) 9 %2 = memref.alloc() : memref<128xf32> 10 linalg.fill ins(%cst_0 : f32) outs(%2 : memref<128xf32>) 11 scf.forall (%arg0) in (%c2) { 12 %7 = affine.min affine_map<(d0) -> (d0 * -64 + 128, 64)>(%arg0) 13 %8 = affine.max affine_map<(d0) -> (0, d0)>(%7) 14 %9 = affine.apply affine_map<(d0) -> (d0 * 64)>(%arg0) 15 %10 = affine.min affine_map<(d0, d1) -> (d1 * -64 + 128, d0)>(%8, %arg0) 16 17 // CHECK: memref.subview %{{.*}}[%{{.*}}, 0] [%[[C64]], 384] [1, 1] : memref<128x384xf32> to memref<?x384xf32, {{.*}}> 18 // CHECK: memref.subview %{{.*}}[%{{.*}}] [%[[C64]]] [1] : memref<128xf32> to memref<?xf32, {{.*}}> 19 %11 = memref.subview %0[%9, 0] [%10, 384] [1, 1] : 20 memref<128x384xf32> to memref<?x384xf32, affine_map<(d0, d1)[s0] -> (d0 * 384 + s0 + d1)>> 21 %12 = memref.subview %2[%9] [%10] [1] : 22 memref<128xf32> to memref<?xf32, affine_map<(d0)[s0] -> (d0 + s0)>> 23 24 // CHECK: linalg.generic {{.*}} ins(%{{.*}} : memref<?x384xf32, {{.*}}>) outs(%{{.*}} : memref<?xf32, {{.*}}>) 25 linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, 26 affine_map<(d0, d1) -> (d0)>], 27 iterator_types = ["parallel", "reduction"]} 28 ins(%11 : memref<?x384xf32, affine_map<(d0, d1)[s0] -> (d0 * 384 + s0 + d1)>>) 29 outs(%12 : memref<?xf32, affine_map<(d0)[s0] -> (d0 + s0)>>) { 30 ^bb0(%arg1: f32, %arg2: f32): 31 %14 = arith.addf %arg1, %arg2 : f32 32 linalg.yield %14 : f32 33 } 34 } 35 return 36} 37