xref: /llvm-project/mlir/test/Dialect/NVGPU/transform-matmul-to-nvvm.mlir (revision e4384149b58f7c3d19c5d38bc46038c660b77ca9)
1// RUN: mlir-opt %s -transform-interpreter -split-input-file | FileCheck %s
2
3// CHECK: #[[$div4:.*]]  = affine_map<()[s0] -> (s0 floordiv 4)>
4// CHECK: #[[$mod4:.*]] = affine_map<()[s0] -> (s0 mod 4)>
5// CHECK: #[[$div4p8:.*]] = affine_map<()[s0] -> (s0 floordiv 4 + 8)>
6// CHECK: #[[$map3:.*]] = affine_map<()[s0] -> (s0 * 2 - (s0 floordiv 4) * 8)>
7// CHECK: #[[$map4:.*]] = affine_map<()[s0] -> (s0 * 2 - (s0 floordiv 4) * 8 + 1)>
8
9// CHECK-LABEL: func.func @matmul_16x8x4xf32_global
10func.func @matmul_16x8x4xf32_global(
11    %A: memref<16x4xf32>, %B: memref<4x8xf32>, %C: memref<16x8xf32>) {
12// CHECK-SAME:                                        %[[VAL_0:.*]]: memref<16x4xf32>,
13// CHECK-SAME:                                        %[[VAL_1:.*]]: memref<4x8xf32>,
14// CHECK-SAME:                                        %[[VAL_2:.*]]: memref<16x8xf32>) {
15
16// CHECK:           %[[TIDX:.*]] = gpu.thread_id  x
17// CHECK:           %[[VAL_4:.*]] = affine.apply #[[$div4]]()[%[[TIDX]]]
18// CHECK:           %[[VAL_5:.*]] = affine.apply #[[$mod4]]()[%[[TIDX]]]
19// CHECK:           %[[VAL_6:.*]] = memref.load %[[VAL_0]][%[[VAL_4]], %[[VAL_5]]] : memref<16x4xf32>
20// CHECK:           %[[VAL_7:.*]] = affine.apply #[[$div4p8]]()[%[[TIDX]]]
21// CHECK:           %[[VAL_8:.*]] = affine.apply #[[$mod4]]()[%[[TIDX]]]
22// CHECK:           %[[VAL_9:.*]] = memref.load %[[VAL_0]][%[[VAL_7]], %[[VAL_8]]] : memref<16x4xf32>
23// CHECK:           %[[VAL_10:.*]] = vector.splat %[[VAL_6]] : vector<2x1xf32>
24// CHECK:           %[[VAL_11:.*]] = vector.insert %[[VAL_6]], %[[VAL_10]] [0, 0] : f32 into vector<2x1xf32>
25// CHECK:           %[[LHS:.*]] = vector.insert %[[VAL_9]], %[[VAL_11]] [1, 0] : f32 into vector<2x1xf32>
26//
27// CHECK:           %[[VAL_13:.*]] = affine.apply #[[$mod4]]()[%[[TIDX]]]
28// CHECK:           %[[VAL_14:.*]] = affine.apply #[[$div4]]()[%[[TIDX]]]
29// CHECK:           %[[VAL_15:.*]] = memref.load %[[VAL_1]][%[[VAL_13]], %[[VAL_14]]] : memref<4x8xf32>
30// CHECK:           %[[VAL_16:.*]] = vector.splat %[[VAL_15]] : vector<1x1xf32>
31// CHECK:           %[[RHS:.*]] = vector.insert %[[VAL_15]], %[[VAL_16]] [0, 0] : f32 into vector<1x1xf32>
32//
33// CHECK:           %[[VAL_18:.*]] = affine.apply #[[$div4]]()[%[[TIDX]]]
34// CHECK:           %[[VAL_19:.*]] = affine.apply #[[$map3]]()[%[[TIDX]]]
35// CHECK:           %[[VAL_20:.*]] = memref.load %[[VAL_2]][%[[VAL_18]], %[[VAL_19]]] : memref<16x8xf32>
36// CHECK:           %[[VAL_21:.*]] = affine.apply #[[$div4]]()[%[[TIDX]]]
37// CHECK:           %[[VAL_22:.*]] = affine.apply #[[$map4]]()[%[[TIDX]]]
38// CHECK:           %[[VAL_23:.*]] = memref.load %[[VAL_2]][%[[VAL_21]], %[[VAL_22]]] : memref<16x8xf32>
39// CHECK:           %[[VAL_24:.*]] = affine.apply #[[$div4p8]]()[%[[TIDX]]]
40// CHECK:           %[[VAL_25:.*]] = affine.apply #[[$map3]]()[%[[TIDX]]]
41// CHECK:           %[[VAL_26:.*]] = memref.load %[[VAL_2]][%[[VAL_24]], %[[VAL_25]]] : memref<16x8xf32>
42// CHECK:           %[[VAL_27:.*]] = affine.apply #[[$div4p8]]()[%[[TIDX]]]
43// CHECK:           %[[VAL_28:.*]] = affine.apply #[[$map4]]()[%[[TIDX]]]
44// CHECK:           %[[VAL_29:.*]] = memref.load %[[VAL_2]][%[[VAL_27]], %[[VAL_28]]] : memref<16x8xf32>
45// CHECK:           %[[VAL_30:.*]] = vector.splat %[[VAL_20]] : vector<2x2xf32>
46// CHECK:           %[[VAL_31:.*]] = vector.insert %[[VAL_20]], %[[VAL_30]] [0, 0] : f32 into vector<2x2xf32>
47// CHECK:           %[[VAL_32:.*]] = vector.insert %[[VAL_23]], %[[VAL_31]] [0, 1] : f32 into vector<2x2xf32>
48// CHECK:           %[[VAL_33:.*]] = vector.insert %[[VAL_26]], %[[VAL_32]] [1, 0] : f32 into vector<2x2xf32>
49// CHECK:           %[[RES:.*]] = vector.insert %[[VAL_29]], %[[VAL_33]] [1, 1] : f32 into vector<2x2xf32>
50//
51// CHECK:           %[[VAL_35:.*]] = nvgpu.mma.sync(%[[LHS]], %[[RHS]], %[[RES]]) {mmaShape = [16, 8, 4], tf32Enabled} : (vector<2x1xf32>, vector<1x1xf32>, vector<2x2xf32>) -> vector<2x2xf32>
52//
53// CHECK:           %[[VAL_36:.*]] = vector.extract %[[VAL_35]][0, 0] : f32 from vector<2x2xf32>
54// CHECK:           %[[VAL_37:.*]] = vector.extract %[[VAL_35]][0, 1] : f32 from vector<2x2xf32>
55// CHECK:           %[[VAL_38:.*]] = vector.extract %[[VAL_35]][1, 0] : f32 from vector<2x2xf32>
56// CHECK:           %[[VAL_39:.*]] = vector.extract %[[VAL_35]][1, 1] : f32 from vector<2x2xf32>
57// CHECK:           %[[VAL_40:.*]] = affine.apply #[[$div4]]()[%[[TIDX]]]
58// CHECK:           %[[VAL_41:.*]] = affine.apply #[[$map3]]()[%[[TIDX]]]
59// CHECK:           memref.store %[[VAL_36]], %[[VAL_2]][%[[VAL_40]], %[[VAL_41]]] : memref<16x8xf32>
60// CHECK:           %[[VAL_42:.*]] = affine.apply #[[$div4]]()[%[[TIDX]]]
61// CHECK:           %[[VAL_43:.*]] = affine.apply #[[$map4]]()[%[[TIDX]]]
62// CHECK:           memref.store %[[VAL_37]], %[[VAL_2]][%[[VAL_42]], %[[VAL_43]]] : memref<16x8xf32>
63// CHECK:           %[[VAL_44:.*]] = affine.apply #[[$div4p8]]()[%[[TIDX]]]
64// CHECK:           %[[VAL_45:.*]] = affine.apply #[[$map3]]()[%[[TIDX]]]
65// CHECK:           memref.store %[[VAL_38]], %[[VAL_2]][%[[VAL_44]], %[[VAL_45]]] : memref<16x8xf32>
66// CHECK:           %[[VAL_46:.*]] = affine.apply #[[$div4p8]]()[%[[TIDX]]]
67// CHECK:           %[[VAL_47:.*]] = affine.apply #[[$map4]]()[%[[TIDX]]]
68// CHECK:           memref.store %[[VAL_39]], %[[VAL_2]][%[[VAL_46]], %[[VAL_47]]] : memref<16x8xf32>
69// CHECK:           return
70// CHECK:         }
71  linalg.matmul ins(%A, %B: memref<16x4xf32>, memref<4x8xf32>)
72            outs(%C: memref<16x8xf32>)
73  return
74}
75
76module attributes {transform.with_named_sequence} {
77  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
78    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
79      : (!transform.any_op) -> !transform.any_op
80    transform.nvgpu.rewrite_matmul_as_mma_sync %matmul
81      : (!transform.any_op) -> ()
82    transform.yield
83  }
84}
85
86// -----
87
88// CHECK-LABEL: func.func @matmul_16x8x16xf16_global
89func.func @matmul_16x8x16xf16_global(
90    %A: memref<16x16xf16>, %B: memref<16x8xf16>, %C: memref<16x8xf16>) {
91
92  // CHECK-COUNT-8: memref.load {{.*}} : memref<16x16xf16>
93  // CHECK-COUNT-8: vector.insert {{.*}} : f16 into vector<4x2xf16>
94  // CHECK-COUNT-4: memref.load {{.*}} : memref<16x8xf16>
95  // CHECK-COUNT-4: vector.insert {{.*}} : f16 into vector<2x2xf16>
96  // CHECK-COUNT-4: memref.load {{.*}} : memref<16x8xf16>
97  // CHECK-COUNT-4: vector.insert {{.*}} : f16 into vector<2x2xf16>
98  //
99  //         CHECK: nvgpu.mma.sync(%{{.*}}) {mmaShape = [16, 8, 16]}
100  //    CHECK-SAME:   : (vector<4x2xf16>, vector<2x2xf16>, vector<2x2xf16>) -> vector<2x2xf16>
101  //
102  // CHECK-COUNT-4: vector.extract %{{.*}} : f16 from vector<2x2xf16>
103  // CHECK-COUNT-4: memref.store %{{.*}} : memref<16x8xf16>
104  linalg.matmul ins(%A, %B: memref<16x16xf16>, memref<16x8xf16>)
105            outs(%C: memref<16x8xf16>)
106  return
107}
108
109module attributes {transform.with_named_sequence} {
110  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
111    %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1
112      : (!transform.any_op) -> !transform.any_op
113    transform.nvgpu.rewrite_matmul_as_mma_sync %matmul
114      : (!transform.any_op) -> ()
115    transform.yield
116  }
117}
118