xref: /llvm-project/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll (revision 6548b6354d1d990e1c98736f5e7c3de876bedc8e)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=SI %s
3;
4; Most SALU instructions ignore control flow, so we need to make sure
5; they don't overwrite values from other blocks.
6
7; If the branch decision is made based on a value in an SGPR then all
8; threads will execute the same code paths, so we don't need to worry
9; about instructions in different blocks overwriting each other.
10
11define amdgpu_kernel void @sgpr_if_else_salu_br(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
12; SI-LABEL: sgpr_if_else_salu_br:
13; SI:       ; %bb.0: ; %entry
14; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0xb
15; SI-NEXT:    s_load_dword s6, s[4:5], 0xf
16; SI-NEXT:    s_waitcnt lgkmcnt(0)
17; SI-NEXT:    s_cmp_lg_u32 s0, 0
18; SI-NEXT:    s_cbranch_scc0 .LBB0_4
19; SI-NEXT:  ; %bb.1: ; %else
20; SI-NEXT:    s_add_i32 s3, s3, s6
21; SI-NEXT:    s_cbranch_execnz .LBB0_3
22; SI-NEXT:  .LBB0_2: ; %if
23; SI-NEXT:    s_sub_i32 s3, s1, s2
24; SI-NEXT:  .LBB0_3: ; %endif
25; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x9
26; SI-NEXT:    s_add_i32 s0, s3, s0
27; SI-NEXT:    s_mov_b32 s7, 0xf000
28; SI-NEXT:    s_mov_b32 s6, -1
29; SI-NEXT:    v_mov_b32_e32 v0, s0
30; SI-NEXT:    s_waitcnt lgkmcnt(0)
31; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
32; SI-NEXT:    s_endpgm
33; SI-NEXT:  .LBB0_4:
34; SI-NEXT:    ; implicit-def: $sgpr3
35; SI-NEXT:    s_branch .LBB0_2
36
37entry:
38  %0 = icmp eq i32 %a, 0
39  br i1 %0, label %if, label %else
40
41if:
42  %1 = sub i32 %b, %c
43  br label %endif
44
45else:
46  %2 = add i32 %d, %e
47  br label %endif
48
49endif:
50  %3 = phi i32 [%1, %if], [%2, %else]
51  %4 = add i32 %3, %a
52  store i32 %4, ptr addrspace(1) %out
53  ret void
54}
55
56define amdgpu_kernel void @sgpr_if_else_salu_br_opt(ptr addrspace(1) %out, [8 x i32], i32 %a, [8 x i32], i32 %b, [8 x i32], i32 %c, [8 x i32], i32 %d, [8 x i32], i32 %e) {
57; SI-LABEL: sgpr_if_else_salu_br_opt:
58; SI:       ; %bb.0: ; %entry
59; SI-NEXT:    s_load_dword s2, s[4:5], 0x13
60; SI-NEXT:    s_waitcnt lgkmcnt(0)
61; SI-NEXT:    s_cmp_lg_u32 s2, 0
62; SI-NEXT:    s_cbranch_scc0 .LBB1_4
63; SI-NEXT:  ; %bb.1: ; %else
64; SI-NEXT:    s_load_dword s0, s[4:5], 0x2e
65; SI-NEXT:    s_load_dword s1, s[4:5], 0x37
66; SI-NEXT:    s_waitcnt lgkmcnt(0)
67; SI-NEXT:    s_add_i32 s3, s0, s1
68; SI-NEXT:    s_cbranch_execnz .LBB1_3
69; SI-NEXT:  .LBB1_2: ; %if
70; SI-NEXT:    s_load_dword s0, s[4:5], 0x1c
71; SI-NEXT:    s_load_dword s1, s[4:5], 0x25
72; SI-NEXT:    s_waitcnt lgkmcnt(0)
73; SI-NEXT:    s_add_i32 s3, s0, s1
74; SI-NEXT:  .LBB1_3: ; %endif
75; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x9
76; SI-NEXT:    s_add_i32 s0, s3, s2
77; SI-NEXT:    s_mov_b32 s7, 0xf000
78; SI-NEXT:    s_mov_b32 s6, -1
79; SI-NEXT:    v_mov_b32_e32 v0, s0
80; SI-NEXT:    s_waitcnt lgkmcnt(0)
81; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
82; SI-NEXT:    s_endpgm
83; SI-NEXT:  .LBB1_4:
84; SI-NEXT:    ; implicit-def: $sgpr3
85; SI-NEXT:    s_branch .LBB1_2
86
87entry:
88  %cmp0 = icmp eq i32 %a, 0
89  br i1 %cmp0, label %if, label %else
90
91if:
92  %add0 = add i32 %b, %c
93  br label %endif
94
95else:
96  %add1 = add i32 %d, %e
97  br label %endif
98
99endif:
100  %phi = phi i32 [%add0, %if], [%add1, %else]
101  %add2 = add i32 %phi, %a
102  store i32 %add2, ptr addrspace(1) %out
103  ret void
104}
105
106; The two S_ADD instructions should write to different registers, since
107; different threads will take different control flow paths.
108define amdgpu_kernel void @sgpr_if_else_valu_br(ptr addrspace(1) %out, float %a, i32 %b, i32 %c, i32 %d, i32 %e) {
109; SI-LABEL: sgpr_if_else_valu_br:
110; SI:       ; %bb.0: ; %entry
111; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0xc
112; SI-NEXT:    v_cvt_f32_u32_e32 v0, v0
113; SI-NEXT:    ; implicit-def: $sgpr8
114; SI-NEXT:    v_cmp_lg_f32_e32 vcc, 0, v0
115; SI-NEXT:    s_and_saveexec_b64 s[6:7], vcc
116; SI-NEXT:    s_xor_b64 s[6:7], exec, s[6:7]
117; SI-NEXT:    s_cbranch_execz .LBB2_2
118; SI-NEXT:  ; %bb.1: ; %else
119; SI-NEXT:    s_waitcnt lgkmcnt(0)
120; SI-NEXT:    s_add_i32 s8, s2, s3
121; SI-NEXT:  .LBB2_2: ; %Flow
122; SI-NEXT:    s_waitcnt lgkmcnt(0)
123; SI-NEXT:    s_or_saveexec_b64 s[2:3], s[6:7]
124; SI-NEXT:    v_mov_b32_e32 v0, s8
125; SI-NEXT:    s_xor_b64 exec, exec, s[2:3]
126; SI-NEXT:  ; %bb.3: ; %if
127; SI-NEXT:    s_add_i32 s0, s0, s1
128; SI-NEXT:    v_mov_b32_e32 v0, s0
129; SI-NEXT:  ; %bb.4: ; %endif
130; SI-NEXT:    s_or_b64 exec, exec, s[2:3]
131; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
132; SI-NEXT:    s_mov_b32 s3, 0xf000
133; SI-NEXT:    s_mov_b32 s2, -1
134; SI-NEXT:    s_waitcnt lgkmcnt(0)
135; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
136; SI-NEXT:    s_endpgm
137entry:
138  %tid = call i32 @llvm.amdgcn.workitem.id.x() #0
139  %tid_f = uitofp i32 %tid to float
140  %tmp1 = fcmp ueq float %tid_f, 0.0
141  br i1 %tmp1, label %if, label %else
142
143if:
144  %tmp2 = add i32 %b, %c
145  br label %endif
146
147else:
148  %tmp3 = add i32 %d, %e
149  br label %endif
150
151endif:
152  %tmp4 = phi i32 [%tmp2, %if], [%tmp3, %else]
153  store i32 %tmp4, ptr addrspace(1) %out
154  ret void
155}
156
157define amdgpu_kernel void @sgpr_if_else_valu_cmp_phi_br(ptr addrspace(1) %out, ptr addrspace(1) %a, ptr addrspace(1) %b) {
158; SI-LABEL: sgpr_if_else_valu_cmp_phi_br:
159; SI:       ; %bb.0: ; %entry
160; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
161; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
162; SI-NEXT:    s_mov_b32 s6, 0
163; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
164; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
165; SI-NEXT:    ; implicit-def: $sgpr8_sgpr9
166; SI-NEXT:    s_and_saveexec_b64 s[10:11], vcc
167; SI-NEXT:    s_xor_b64 s[10:11], exec, s[10:11]
168; SI-NEXT:    s_cbranch_execz .LBB3_2
169; SI-NEXT:  ; %bb.1: ; %else
170; SI-NEXT:    s_mov_b32 s7, 0xf000
171; SI-NEXT:    v_mov_b32_e32 v1, 0
172; SI-NEXT:    s_waitcnt lgkmcnt(0)
173; SI-NEXT:    buffer_load_dword v0, v[0:1], s[4:7], 0 addr64
174; SI-NEXT:    s_waitcnt vmcnt(0)
175; SI-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v0
176; SI-NEXT:    s_and_b64 s[8:9], vcc, exec
177; SI-NEXT:    ; implicit-def: $vgpr0
178; SI-NEXT:  .LBB3_2: ; %Flow
179; SI-NEXT:    s_waitcnt lgkmcnt(0)
180; SI-NEXT:    s_andn2_saveexec_b64 s[4:5], s[10:11]
181; SI-NEXT:    s_cbranch_execz .LBB3_4
182; SI-NEXT:  ; %bb.3: ; %if
183; SI-NEXT:    s_mov_b32 s15, 0xf000
184; SI-NEXT:    s_mov_b32 s14, 0
185; SI-NEXT:    s_mov_b64 s[12:13], s[2:3]
186; SI-NEXT:    v_mov_b32_e32 v1, 0
187; SI-NEXT:    buffer_load_dword v0, v[0:1], s[12:15], 0 addr64
188; SI-NEXT:    s_andn2_b64 s[2:3], s[8:9], exec
189; SI-NEXT:    s_waitcnt vmcnt(0)
190; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
191; SI-NEXT:    s_and_b64 s[6:7], vcc, exec
192; SI-NEXT:    s_or_b64 s[8:9], s[2:3], s[6:7]
193; SI-NEXT:  .LBB3_4: ; %endif
194; SI-NEXT:    s_or_b64 exec, exec, s[4:5]
195; SI-NEXT:    s_mov_b32 s3, 0xf000
196; SI-NEXT:    s_mov_b32 s2, -1
197; SI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[8:9]
198; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
199; SI-NEXT:    s_endpgm
200entry:
201  %tid = call i32 @llvm.amdgcn.workitem.id.x() #0
202  %tmp1 = icmp eq i32 %tid, 0
203  br i1 %tmp1, label %if, label %else
204
205if:
206  %gep.if = getelementptr i32, ptr addrspace(1) %a, i32 %tid
207  %a.val = load i32, ptr addrspace(1) %gep.if
208  %cmp.if = icmp eq i32 %a.val, 0
209  br label %endif
210
211else:
212  %gep.else = getelementptr i32, ptr addrspace(1) %b, i32 %tid
213  %b.val = load i32, ptr addrspace(1) %gep.else
214  %cmp.else = icmp slt i32 %b.val, 0
215  br label %endif
216
217endif:
218  %tmp4 = phi i1 [%cmp.if, %if], [%cmp.else, %else]
219  %ext = sext i1 %tmp4 to i32
220  store i32 %ext, ptr addrspace(1) %out
221  ret void
222}
223
224declare i32 @llvm.amdgcn.workitem.id.x() #0
225
226attributes #0 = { readnone }
227