xref: /llvm-project/llvm/test/CodeGen/AMDGPU/collapse-endcf2.mir (revision e7900e695e7dfb36be8651d914a31f42a5d6c634)
1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2# RUN: llc -mtriple=amdgcn -verify-machineinstrs -run-pass=si-optimize-exec-masking-pre-ra %s -o - | FileCheck -check-prefix=GCN %s
3
4--- |
5  define amdgpu_kernel void @call_no_explicit_exec_dependency () {
6    unreachable
7  }
8
9  declare void @func()
10
11...
12
13# Call should be assumed to read exec
14---
15name: call_no_explicit_exec_dependency
16tracksRegLiveness: true
17liveins:
18  - { reg: '$vgpr0', virtual-reg: '%0' }
19  - { reg: '$sgpr0_sgpr1', virtual-reg: '%1' }
20machineFunctionInfo:
21  isEntryFunction: true
22body:             |
23  ; GCN-LABEL: name: call_no_explicit_exec_dependency
24  ; GCN: bb.0:
25  ; GCN-NEXT:   successors: %bb.1(0x40000000), %bb.4(0x40000000)
26  ; GCN-NEXT:   liveins: $vgpr0, $sgpr0_sgpr1
27  ; GCN-NEXT: {{  $}}
28  ; GCN-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr0_sgpr1
29  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
30  ; GCN-NEXT:   [[V_CMP_LT_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_U32_e64 1, [[COPY1]], implicit $exec
31  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:sreg_64 = COPY $exec, implicit-def $exec
32  ; GCN-NEXT:   [[S_AND_B64_:%[0-9]+]]:sreg_64 = S_AND_B64 [[COPY2]], [[V_CMP_LT_U32_e64_]], implicit-def dead $scc
33  ; GCN-NEXT:   $exec = S_MOV_B64_term [[S_AND_B64_]]
34  ; GCN-NEXT:   S_CBRANCH_EXECZ %bb.4, implicit $exec
35  ; GCN-NEXT:   S_BRANCH %bb.1
36  ; GCN-NEXT: {{  $}}
37  ; GCN-NEXT: bb.1:
38  ; GCN-NEXT:   successors: %bb.2(0x40000000), %bb.3(0x40000000)
39  ; GCN-NEXT: {{  $}}
40  ; GCN-NEXT:   undef [[S_LOAD_DWORDX2_IMM:%[0-9]+]].sub0_sub1:sgpr_128 = S_LOAD_DWORDX2_IMM [[COPY]], 9, 0 :: (dereferenceable invariant load (s64), align 4, addrspace 4)
41  ; GCN-NEXT:   undef [[V_LSHLREV_B32_e32_:%[0-9]+]].sub0:vreg_64 = V_LSHLREV_B32_e32 2, [[COPY1]], implicit $exec
42  ; GCN-NEXT:   [[V_LSHLREV_B32_e32_:%[0-9]+]].sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec
43  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[S_LOAD_DWORDX2_IMM]].sub1
44  ; GCN-NEXT:   undef [[V_ADD_CO_U32_e64_:%[0-9]+]].sub0:vreg_64, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64_xexec = V_ADD_CO_U32_e64 [[S_LOAD_DWORDX2_IMM]].sub0, [[V_LSHLREV_B32_e32_]].sub0, 0, implicit $exec
45  ; GCN-NEXT:   [[V_ADD_CO_U32_e64_:%[0-9]+]].sub1:vreg_64, dead [[V_ADDC_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_ADDC_U32_e64 0, [[COPY3]], [[V_ADD_CO_U32_e64_1]], 0, implicit $exec
46  ; GCN-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]].sub3:sgpr_128 = S_MOV_B32 61440
47  ; GCN-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]].sub2:sgpr_128 = S_MOV_B32 0
48  ; GCN-NEXT:   BUFFER_STORE_DWORD_ADDR64 [[V_LSHLREV_B32_e32_]].sub1, [[V_LSHLREV_B32_e32_]], [[S_LOAD_DWORDX2_IMM]], 0, 0, 0, 0, implicit $exec :: (store (s32), addrspace 1)
49  ; GCN-NEXT:   [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NE_U32_e64 2, [[COPY1]], implicit $exec
50  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:sreg_64 = COPY $exec, implicit-def $exec
51  ; GCN-NEXT:   [[S_AND_B64_1:%[0-9]+]]:sreg_64 = S_AND_B64 [[COPY4]], [[V_CMP_NE_U32_e64_]], implicit-def dead $scc
52  ; GCN-NEXT:   $exec = S_MOV_B64_term [[S_AND_B64_1]]
53  ; GCN-NEXT:   S_CBRANCH_EXECZ %bb.3, implicit $exec
54  ; GCN-NEXT:   S_BRANCH %bb.2
55  ; GCN-NEXT: {{  $}}
56  ; GCN-NEXT: bb.2:
57  ; GCN-NEXT:   successors: %bb.3(0x80000000)
58  ; GCN-NEXT: {{  $}}
59  ; GCN-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]].sub0:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]].sub2
60  ; GCN-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]].sub1:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]].sub2
61  ; GCN-NEXT:   [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
62  ; GCN-NEXT:   BUFFER_STORE_DWORD_ADDR64 [[V_MOV_B32_e32_]], [[V_ADD_CO_U32_e64_]], [[S_LOAD_DWORDX2_IMM]], 0, 4, 0, 0, implicit $exec :: (store (s32), addrspace 1)
63  ; GCN-NEXT: {{  $}}
64  ; GCN-NEXT: bb.3:
65  ; GCN-NEXT:   successors: %bb.4(0x80000000)
66  ; GCN-NEXT: {{  $}}
67  ; GCN-NEXT:   $exec = S_OR_B64 $exec, [[COPY4]], implicit-def $scc
68  ; GCN-NEXT:   [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
69  ; GCN-NEXT:   dead [[SI_CALL:%[0-9]+]]:sreg_64 = SI_CALL [[DEF]], @func, csr_amdgpu
70  ; GCN-NEXT: {{  $}}
71  ; GCN-NEXT: bb.4:
72  ; GCN-NEXT:   $exec = S_OR_B64 $exec, [[COPY2]], implicit-def $scc
73  ; GCN-NEXT:   [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 3, implicit $exec
74  ; GCN-NEXT:   [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
75  ; GCN-NEXT:   $m0 = S_MOV_B32 -1
76  ; GCN-NEXT:   DS_WRITE_B32 [[V_MOV_B32_e32_2]], [[V_MOV_B32_e32_1]], 0, 0, implicit $m0, implicit $exec :: (store (s32), addrspace 3)
77  ; GCN-NEXT:   S_ENDPGM 0
78  bb.0:
79    successors: %bb.1, %bb.4
80    liveins: $vgpr0, $sgpr0_sgpr1
81
82    %1:sgpr_64 = COPY $sgpr0_sgpr1
83    %0:vgpr_32 = COPY $vgpr0
84    %2:sreg_64 = V_CMP_LT_U32_e64 1, %0, implicit $exec
85    %3:sreg_64 = COPY $exec, implicit-def $exec
86    %4:sreg_64 = S_AND_B64 %3, %2, implicit-def dead $scc
87    $exec = S_MOV_B64_term %4
88    S_CBRANCH_EXECZ %bb.4, implicit $exec
89    S_BRANCH %bb.1
90
91  bb.1:
92    successors: %bb.2, %bb.3
93
94    undef %5.sub0_sub1:sgpr_128 = S_LOAD_DWORDX2_IMM %1, 9, 0 :: (dereferenceable invariant load (s64), align 4, addrspace 4)
95    undef %6.sub0:vreg_64 = V_LSHLREV_B32_e32 2, %0, implicit $exec
96    %6.sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec
97    %7:vgpr_32 = COPY %5.sub1
98    undef %8.sub0:vreg_64, %9:sreg_64_xexec = V_ADD_CO_U32_e64 %5.sub0, %6.sub0, 0, implicit $exec
99    %8.sub1:vreg_64, dead %10:sreg_64_xexec = V_ADDC_U32_e64 0, %7, %9, 0, implicit $exec
100    %5.sub3:sgpr_128 = S_MOV_B32 61440
101    %5.sub2:sgpr_128 = S_MOV_B32 0
102    BUFFER_STORE_DWORD_ADDR64 %6.sub1, %6, %5, 0, 0, 0, 0, implicit $exec :: (store (s32), addrspace 1)
103    %11:sreg_64 = V_CMP_NE_U32_e64 2, %0, implicit $exec
104    %12:sreg_64 = COPY $exec, implicit-def $exec
105    %13:sreg_64 = S_AND_B64 %12, %11, implicit-def dead $scc
106    $exec = S_MOV_B64_term %13
107    S_CBRANCH_EXECZ %bb.3, implicit $exec
108    S_BRANCH %bb.2
109
110  bb.2:
111    %5.sub0:sgpr_128 = COPY %5.sub2
112    %5.sub1:sgpr_128 = COPY %5.sub2
113    %14:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
114    BUFFER_STORE_DWORD_ADDR64 %14, %8, %5, 0, 4, 0, 0, implicit $exec :: (store (s32), addrspace 1)
115
116  bb.3:
117    $exec = S_OR_B64 $exec, %12,  implicit-def $scc
118    %20:sreg_64 = IMPLICIT_DEF
119    %21:sreg_64 = SI_CALL %20, @func, csr_amdgpu
120
121  bb.4:
122    $exec = S_OR_B64 $exec, %3, implicit-def $scc
123    %17:vgpr_32 = V_MOV_B32_e32 3, implicit $exec
124    %18:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
125    $m0 = S_MOV_B32 -1
126    DS_WRITE_B32 %18, %17, 0, 0, implicit $m0, implicit $exec :: (store (s32), addrspace 3)
127    S_ENDPGM 0
128
129...
130