xref: /llvm-project/mlir/test/Dialect/NVGPU/tmaload-transform.mlir (revision 0a600c34c8c1fe87c9661b6020e5044b24da3dc7)
1// RUN: mlir-opt %s \
2// RUN:     -transform-interpreter \
3// RUN:     -test-transform-dialect-erase-schedule \
4// RUN: | FileCheck %s
5
6memref.global "private" @bufferLhsGlobal : memref<64x32xf32, #gpu.address_space<workgroup>>
7memref.global "private" @bufferRhsGlobal : memref<8x32xf32, #gpu.address_space<workgroup>>
8
9// CHECK-LABEL: func.func @main()
10func.func @main() {
11  %c1 = arith.constant 1 : index
12  %c128 = arith.constant 128 : index
13
14  %0 = gpu.wait async
15  %memref, %asyncToken = gpu.alloc async [%0] () : memref<64x32xf32>
16  %memref_1, %asyncToken_2 = gpu.alloc async [%0] () : memref<8x32xf32>
17
18  //      CHECK: %[[M1:.*]] = memref.cast %{{.*}} : memref<64x32xf32> to memref<*xf32>
19  //      CHECK: %[[c64:.*]] = arith.constant 64 : index
20  //      CHECK: %[[c32:.*]] = arith.constant 32 : index
21  //      CHECK: %[[D1:.*]] = nvgpu.tma.create.descriptor %[[M1]] box[%[[c64]], %[[c32]]]
22  // CHECK-SAME:   : memref<*xf32> -> <tensor = memref<64x32xf32, #gpu.address_space<workgroup>>, swizzle = none, l2promo = none, oob = zero, interleave = none>
23  //      CHECK: %[[cast_2:.*]] = memref.cast %memref_0 : memref<8x32xf32> to memref<*xf32>
24  //      CHECK: %[[c8_2:.*]] = arith.constant 8 : index
25  //      CHECK: %[[c32_2:.*]] = arith.constant 32 : index
26  //      CHECK: %[[D2:.*]] = nvgpu.tma.create.descriptor %cast_2 box[%[[c8_2]], %[[c32_2]]]
27  // CHECK-SAME:   : memref<*xf32> -> <tensor = memref<8x32xf32, #gpu.address_space<workgroup>>, swizzle = none, l2promo = none, oob = zero, interleave = none>
28  // CHECK: gpu.launch
29  gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c1, %grid_y = %c1, %grid_z = %c1)
30            threads(%tx, %ty, %tz) in (%block_x = %c128, %block_y = %c1, %block_z = %c1) {
31    //      CHECK: %[[G1:.*]] = memref.get_global @bufferLhsGlobal : memref<64x32xf32, #gpu.address_space<workgroup>>
32    //      CHECK: %[[G2:.*]] = memref.get_global @bufferRhsGlobal : memref<8x32xf32, #gpu.address_space<workgroup>>
33    %out = memref.get_global @bufferLhsGlobal : memref<64x32xf32, #gpu.address_space<workgroup>>
34    %out_1 = memref.get_global @bufferRhsGlobal : memref<8x32xf32, #gpu.address_space<workgroup>>
35
36    //      CHECK: %[[B:.*]] = nvgpu.mbarrier.create -> <memorySpace = #gpu.address_space<workgroup>
37    //      CHECK: nvgpu.mbarrier.init %[[B]][%{{.*}}], %{{.*}} : <memorySpace = #gpu.address_space<workgroup>
38    //      CHECK: gpu.barrier
39    //
40    //      CHECK: %[[c0:.*]] = arith.constant 0 : index
41    //      CHECK: %[[TIDX:.*]] = gpu.thread_id  x
42    //      CHECK: %[[CMP:.*]] = arith.cmpi eq, %[[TIDX]], %[[c0]] : index
43    //
44    //      CHECK: scf.if %[[CMP]] {
45    //
46    //      CHECK:   %[[c0_7:.*]] = arith.constant 0 : index
47    //      CHECK:   nvgpu.tma.async.load %[[D1]][%[[c0_7]], %[[c0_7]]], %[[B]][%{{.*}}] to %[[G1]]
48    // CHECK-SAME:     : <tensor = memref<64x32xf32, #gpu.address_space<workgroup>>,
49    // CHECK-SAME:        swizzle = none, l2promo = none, oob = zero, interleave = none>, <memorySpace = #gpu.address_space<workgroup>
50    // CHECK-SAME:     -> memref<64x32xf32, #gpu.address_space<workgroup>>
51    //
52    //      CHECK:   %[[c0_8:.*]] = arith.constant 0 : index
53    //      CHECK:   nvgpu.tma.async.load %[[D2]][%[[c0_8]], %[[c0_8]]], %[[B]][%{{.*}}] to %[[G2]]
54    // CHECK-SAME:     : <tensor = memref<8x32xf32, #gpu.address_space<workgroup>>,
55    // CHECK-SAME:         swizzle = none, l2promo = none, oob = zero, interleave = none>, <memorySpace = #gpu.address_space<workgroup>
56    // CHECK-SAME:    -> memref<8x32xf32, #gpu.address_space<workgroup>>
57    //
58    //      CHECK:   %[[c9216:.*]] = arith.constant 9216 : index
59    //      CHECK:   nvgpu.mbarrier.arrive.expect_tx %[[B]][%{{.*}}], %[[c9216]] : <memorySpace = #gpu.address_space<workgroup>
60    //      CHECK: } else {
61    //      CHECK:   %[[c0_7:.*]] = arith.constant 0 : index
62    //      CHECK:   nvgpu.mbarrier.arrive.expect_tx %[[B]][%{{.*}}], %[[c0_7]] : <memorySpace = #gpu.address_space<workgroup>
63    //      CHECK: }
64    //
65    //      CHECK: %[[c0_6:.*]] = llvm.mlir.constant(false) : i1
66    //      CHECK: %[[c10000000:.*]] = arith.constant 10000000 : index
67    //      CHECK: nvgpu.mbarrier.try_wait.parity %[[B]][%{{.*}}], %[[c0_6]], %[[c10000000]] : <memorySpace = #gpu.address_space<workgroup>
68
69    /// Both copies are matched and end up in the same async group.
70    linalg.copy ins(%memref: memref<64x32xf32>) outs(%out: memref<64x32xf32, #gpu.address_space<workgroup>>)
71    linalg.copy ins(%memref_1: memref<8x32xf32>) outs(%out_1: memref<8x32xf32, #gpu.address_space<workgroup>>)
72
73    gpu.terminator
74  }
75
76  return
77}
78
79module attributes {transform.with_named_sequence} {
80  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
81    %copy = transform.structured.match ops{["linalg.copy"]} in %arg1
82      : (!transform.any_op) -> !transform.any_op
83    transform.nvgpu.rewrite_copy_as_tma %copy  : (!transform.any_op) -> ()
84    transform.yield
85  }
86}
87