xref: /llvm-project/llvm/test/CodeGen/AMDGPU/stack-realign.ll (revision 11b040192640ef3b1f481124c440f464ed6ec86a)
1; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
2
3; Check that we properly realign the stack. While 4-byte access is all
4; that is ever needed, some transformations rely on the known bits from the alignment of the pointer (e.g.
5
6
7; 128 byte object
8; 4 byte emergency stack slot
9; = 144 bytes with padding between them
10
11; GCN-LABEL: {{^}}needs_align16_default_stack_align:
12; GCN-DAG: v_lshlrev_b32_e32 [[SCALED_IDX:v[0-9]+]], 4, v0
13; GCN-DAG: v_lshrrev_b32_e64 [[FRAMEDIFF:v[0-9]+]], 6, s32
14; GCN: v_add_u32_e32 [[FI:v[0-9]+]], vcc, [[SCALED_IDX]], [[FRAMEDIFF]]
15
16; GCN-NOT: s32
17
18; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
19; GCN: v_or_b32_e32 v{{[0-9]+}}, 12
20; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
21; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
22; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
23
24; GCN-NOT: s32
25
26; GCN: ; ScratchSize: 144
27define void @needs_align16_default_stack_align(i32 %idx) #0 {
28  %alloca.align16 = alloca [8 x <4 x i32>], align 16, addrspace(5)
29  %gep0 = getelementptr inbounds [8 x <4 x i32>], ptr addrspace(5) %alloca.align16, i32 0, i32 %idx
30  store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr addrspace(5) %gep0, align 16
31  ret void
32}
33
34; GCN-LABEL: {{^}}needs_align16_stack_align4:
35; GCN: s_add_i32 [[SCRATCH_REG:s[0-9]+]], s32, 0x3c0{{$}}
36; GCN: s_and_b32 s33, [[SCRATCH_REG]], 0xfffffc00
37
38; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
39; GCN: v_or_b32_e32 v{{[0-9]+}}, 12
40; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
41; GCN: s_addk_i32 s32, 0x2800{{$}}
42; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
43; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
44
45; GCN: s_mov_b32 s32, s34
46
47; GCN: ; ScratchSize: 160
48define void @needs_align16_stack_align4(i32 %idx) #2 {
49  %alloca.align16 = alloca [8 x <4 x i32>], align 16, addrspace(5)
50  %gep0 = getelementptr inbounds [8 x <4 x i32>], ptr addrspace(5) %alloca.align16, i32 0, i32 %idx
51  store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr addrspace(5) %gep0, align 16
52  ret void
53}
54
55; GCN-LABEL: {{^}}needs_align32:
56; GCN: s_add_i32 [[SCRATCH_REG:s[0-9]+]], s32, 0x7c0{{$}}
57; GCN: s_and_b32 s33, [[SCRATCH_REG]], 0xfffff800
58
59; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
60; GCN: v_or_b32_e32 v{{[0-9]+}}, 12
61; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
62; GCN: s_addk_i32 s32, 0x3000{{$}}
63; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
64; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
65
66; GCN: s_mov_b32 s32, s34
67
68; GCN: ; ScratchSize: 192
69define void @needs_align32(i32 %idx) #0 {
70  %alloca.align16 = alloca [8 x <4 x i32>], align 32, addrspace(5)
71  %gep0 = getelementptr inbounds [8 x <4 x i32>], ptr addrspace(5) %alloca.align16, i32 0, i32 %idx
72  store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr addrspace(5) %gep0, align 32
73  ret void
74}
75
76; GCN-LABEL: {{^}}force_realign4:
77; GCN: s_add_i32 [[SCRATCH_REG:s[0-9]+]], s32, 0xc0{{$}}
78; GCN: s_and_b32 s33, [[SCRATCH_REG]], 0xffffff00
79; GCN: s_addk_i32 s32, 0xd00{{$}}
80
81; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
82; GCN: s_mov_b32 s32, s34
83
84; GCN: ; ScratchSize: 52
85define void @force_realign4(i32 %idx) #1 {
86  %alloca.align16 = alloca [8 x i32], align 4, addrspace(5)
87  %gep0 = getelementptr inbounds [8 x i32], ptr addrspace(5) %alloca.align16, i32 0, i32 %idx
88  store volatile i32 3, ptr addrspace(5) %gep0, align 4
89  ret void
90}
91
92; GCN-LABEL: {{^}}kernel_call_align16_from_8:
93; GCN: s_movk_i32 s32, 0x400{{$}}
94; GCN-NOT: s32
95; GCN: s_swappc_b64
96define amdgpu_kernel void @kernel_call_align16_from_8() #0 {
97  %alloca = alloca i32, align 4, addrspace(5)
98  store volatile i32 2, ptr addrspace(5) %alloca
99  call void @needs_align16_default_stack_align(i32 1)
100  ret void
101}
102
103; The call sequence should keep the stack on call aligned to 4
104; GCN-LABEL: {{^}}kernel_call_align16_from_5:
105; GCN: s_movk_i32 s32, 0x400
106; GCN: s_swappc_b64
107define amdgpu_kernel void @kernel_call_align16_from_5() {
108  %alloca0 = alloca i8, align 1, addrspace(5)
109  store volatile i8 2, ptr  addrspace(5) %alloca0
110
111  call void @needs_align16_default_stack_align(i32 1)
112  ret void
113}
114
115; GCN-LABEL: {{^}}kernel_call_align4_from_5:
116; GCN: s_movk_i32 s32, 0x400
117; GCN: s_swappc_b64
118define amdgpu_kernel void @kernel_call_align4_from_5() {
119  %alloca0 = alloca i8, align 1, addrspace(5)
120  store volatile i8 2, ptr  addrspace(5) %alloca0
121
122  call void @needs_align16_stack_align4(i32 1)
123  ret void
124}
125
126; GCN-LABEL: {{^}}default_realign_align128:
127; GCN: s_mov_b32 [[FP_COPY:s[0-9]+]], s33
128; GCN-NEXT: s_add_i32 s33, s32, 0x1fc0
129; GCN-NEXT: s_and_b32 s33, s33, 0xffffe000
130; GCN-NEXT: s_mov_b32 s5, s34
131; GCN-NEXT: s_mov_b32 s34, s32
132; GCN-NEXT: s_addk_i32 s32, 0x4000
133; GCN-NOT: s33
134; GCN: buffer_store_dword v0, off, s[0:3], s33{{$}}
135; GCN: s_mov_b32 s32, s34
136; GCN: s_mov_b32 s33, [[FP_COPY]]
137define void @default_realign_align128(i32 %idx) #0 {
138  %alloca.align = alloca i32, align 128, addrspace(5)
139  store volatile i32 9, ptr addrspace(5) %alloca.align, align 128
140  ret void
141}
142
143; GCN-LABEL: {{^}}disable_realign_align128:
144; GCN-NOT: s32
145; GCN: buffer_store_dword v0, off, s[0:3], s32{{$}}
146; GCN-NOT: s32
147define void @disable_realign_align128(i32 %idx) #3 {
148  %alloca.align = alloca i32, align 128, addrspace(5)
149  store volatile i32 9, ptr addrspace(5) %alloca.align, align 128
150  ret void
151}
152
153declare void @extern_func(<32 x i32>, i32) #0
154define void @func_call_align1024_bp_gets_vgpr_spill(<32 x i32> %a, i32 %b) #0 {
155; The test forces the stack to be realigned to a new boundary
156; since there is a local object with an alignment of 1024.
157; Should use BP to access the incoming stack arguments.
158; The BP value is saved/restored with a VGPR spill.
159
160; GCN-LABEL: func_call_align1024_bp_gets_vgpr_spill:
161; GCN: s_mov_b32 [[FP_SCRATCH_COPY:s[0-9]+]], s33
162; GCN-NEXT: s_add_i32 [[SCRATCH_REG:s[0-9]+]], s32, 0xffc0
163; GCN-NEXT: s_and_b32 s33, [[SCRATCH_REG]], 0xffff0000
164; GCN-NEXT: s_or_saveexec_b64 s[18:19], -1
165; GCN-NEXT: buffer_store_dword [[VGPR_REG:v[0-9]+]], off, s[0:3], s33 offset:1028 ; 4-byte Folded Spill
166; GCN-NEXT: s_mov_b64 exec, s[18:19]
167; GCN-NEXT: v_writelane_b32 [[VGPR_REG]], [[FP_SCRATCH_COPY]], 2
168; GCN-NEXT: v_mov_b32_e32 v32, 0
169; GCN-DAG: v_writelane_b32 [[VGPR_REG]], s34, 3
170; GCN: s_mov_b32 s34, s32
171; GCN: buffer_store_dword v32, off, s[0:3], s33 offset:1024
172; GCN-NEXT: s_waitcnt vmcnt(0)
173; GCN-NEXT: buffer_load_dword v{{[0-9]+}}, off, s[0:3], s34
174; GCN-DAG: s_add_i32 s32, s32, 0x30000
175; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s32
176; GCN: s_swappc_b64 s[30:31],
177
178; GCN: v_readlane_b32 s31, [[VGPR_REG]], 1
179; GCN: v_readlane_b32 s30, [[VGPR_REG]], 0
180; GCN-NEXT: s_mov_b32 s32, s34
181; GCN-NEXT: v_readlane_b32 [[FP_SCRATCH_COPY:s[0-9]+]], [[VGPR_REG]], 2
182; GCN-NEXT: v_readlane_b32 s34, [[VGPR_REG]], 3
183; GCN-NEXT: s_or_saveexec_b64 s[6:7], -1
184; GCN-NEXT: buffer_load_dword [[VGPR_REG]], off, s[0:3], s33 offset:1028 ; 4-byte Folded Reload
185; GCN-NEXT: s_mov_b64 exec, s[6:7]
186; GCN-NEXT: s_mov_b32 s33, [[FP_SCRATCH_COPY]]
187; GCN: s_setpc_b64 s[30:31]
188  %temp = alloca i32, align 1024, addrspace(5)
189  store volatile i32 0, ptr addrspace(5) %temp, align 1024
190  call void @extern_func(<32 x i32> %a, i32 %b)
191  ret void
192}
193
194%struct.Data = type { [9 x i32] }
195define i32 @needs_align1024_stack_args_used_inside_loop(ptr addrspace(5) nocapture readonly byval(%struct.Data) align 8 %arg) local_unnamed_addr #4 {
196; The local object allocation needed an alignment of 1024.
197; Since the function argument is accessed in a loop with an
198; index variable, the base pointer first get loaded into a VGPR
199; and that value should be further referenced to load the incoming values.
200; The BP value will get saved/restored in an SGPR at the prolgoue/epilogue.
201
202; GCN-LABEL: needs_align1024_stack_args_used_inside_loop:
203; GCN: s_mov_b32 [[FP_COPY:s[0-9]+]], s33
204; GCN-NEXT: s_add_i32 s33, s32, 0xffc0
205; GCN-NEXT: s_mov_b32 [[BP_COPY:s[0-9]+]], s34
206; GCN-NEXT: s_mov_b32 s34, s32
207; GCN-NEXT: s_and_b32 s33, s33, 0xffff0000
208; GCN-NEXT: v_lshrrev_b32_e64 [[VGPR_REG:v[0-9]+]], 6, s34
209; GCN-NEXT: v_mov_b32_e32 v{{[0-9]+}}, 0
210; GCN: s_add_i32 s32, s32, 0x30000
211; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s33 offset:1024
212; GCN: buffer_load_dword v{{[0-9]+}}, [[VGPR_REG]], s[0:3], 0 offen
213; GCN: v_add_u32_e32 [[VGPR_REG]], vcc, 4, [[VGPR_REG]]
214; GCN: s_mov_b32 s32, s34
215; GCN-NEXT: s_mov_b32 s34, [[BP_COPY]]
216; GCN-NEXT: s_mov_b32 s33, [[FP_COPY]]
217; GCN-NEXT: s_setpc_b64 s[30:31]
218begin:
219  %local_var = alloca i32, align 1024, addrspace(5)
220  store volatile i32 0, ptr addrspace(5) %local_var, align 1024
221  br label %loop_body
222
223loop_end:                                                ; preds = %loop_body
224  %idx_next = add nuw nsw i32 %lp_idx, 1
225  %lp_exit_cond = icmp eq i32 %idx_next, 9
226  br i1 %lp_exit_cond, label %exit, label %loop_body
227
228loop_body:                                                ; preds = %loop_end, %begin
229  %lp_idx = phi i32 [ 0, %begin ], [ %idx_next, %loop_end ]
230  %ptr = getelementptr inbounds %struct.Data, ptr addrspace(5) %arg, i32 0, i32 0, i32 %lp_idx
231  %val = load i32, ptr addrspace(5) %ptr, align 8
232  %lp_cond = icmp eq i32 %val, %lp_idx
233  br i1 %lp_cond, label %loop_end, label %exit
234
235exit:                                               ; preds = %loop_end, %loop_body
236  %out = phi i32 [ 0, %loop_body ], [ 1, %loop_end ]
237  ret i32 %out
238}
239
240define void @no_free_scratch_sgpr_for_bp_copy(<32 x i32> %a, i32 %b) #0 {
241; GCN-LABEL: no_free_scratch_sgpr_for_bp_copy:
242; GCN: ; %bb.0:
243; GCN: v_writelane_b32 [[VGPR_REG:v[0-9]+]], s34, 0
244; GCN-NEXT: s_mov_b32 s34, s32
245; GCN-NEXT: buffer_load_dword v{{[0-9]+}}, off, s[0:3], s34
246; GCN: v_readlane_b32 s34, [[VGPR_REG:v[0-9]+]], 0
247; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s33 offset:128
248; GCN-NEXT: s_waitcnt vmcnt(0)
249; GCN-NEXT: ;;#ASMSTART
250; GCN-NEXT: ;;#ASMEND
251; GCN: s_setpc_b64 s[30:31]
252  %local_val = alloca i32, align 128, addrspace(5)
253  store volatile i32 %b, ptr addrspace(5) %local_val, align 128
254  ; Use all clobberable registers, so BP has to spill to a VGPR.
255  call void asm sideeffect "",
256    "~{s0},~{s1},~{s2},~{s3},~{s4},~{s5},~{s6},~{s7},~{s8},~{s9}
257    ,~{s10},~{s11},~{s12},~{s13},~{s14},~{s15},~{s16},~{s17},~{s18},~{s19}
258    ,~{s20},~{s21},~{s22},~{s23},~{s24},~{s25},~{s26},~{s27},~{s28},~{s29}
259    ,~{vcc_hi}"() #0
260  ret void
261}
262
263define void @no_free_regs_spill_bp_to_memory(<32 x i32> %a, i32 %b) #5 {
264; If there are no free SGPRs or VGPRs available we must spill the BP to memory.
265
266; GCN-LABEL: no_free_regs_spill_bp_to_mem
267; GCN: s_mov_b32 [[FP_SCRATCH_COPY:s[0-9]+]], s33
268; GCN: s_xor_saveexec_b64 s[6:7], -1
269; GCN: buffer_store_dword v39, off, s[0:3], s33
270; GCN: v_mov_b32_e32 v0, [[FP_SCRATCH_COPY]]
271; GCN: buffer_store_dword v0, off, s[0:3], s33
272; GCN: v_mov_b32_e32 v0, s34
273; GCN-DAG: buffer_store_dword v0, off, s[0:3], s33
274  %local_val = alloca i32, align 128, addrspace(5)
275  store volatile i32 %b, ptr addrspace(5) %local_val, align 128
276
277  call void asm sideeffect "; clobber nonpreserved SGPRs and 64 CSRs",
278    "~{s4},~{s5},~{s6},~{s7},~{s8},~{s9}
279    ,~{s10},~{s11},~{s12},~{s13},~{s14},~{s15},~{s16},~{s17},~{s18},~{s19}
280    ,~{s20},~{s21},~{s22},~{s23},~{s24},~{s25},~{s26},~{s27},~{s28},~{s29}
281    ,~{s40},~{s41},~{s42},~{s43},~{s44},~{s45},~{s46},~{s47},~{s48},~{s49}
282    ,~{s50},~{s51},~{s52},~{s53},~{s54},~{s55},~{s56},~{s57},~{s58},~{s59}
283    ,~{s60},~{s61},~{s62},~{s63},~{s64},~{s65},~{s66},~{s67},~{s68},~{s69}
284    ,~{s70},~{s71},~{s72},~{s73},~{s74},~{s75},~{s76},~{s77},~{s78},~{s79}
285    ,~{s80},~{s81},~{s82},~{s83},~{s84},~{s85},~{s86},~{s87},~{s88},~{s89}
286    ,~{s90},~{s91},~{s92},~{s93},~{s94},~{s95},~{s96},~{s97},~{s98},~{s99}
287    ,~{s100},~{s101},~{s102},~{s39},~{vcc}"() #0
288
289  call void asm sideeffect "; clobber all VGPRs",
290    "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9}
291    ,~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19}
292    ,~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29}
293    ,~{v30},~{v31},~{v32},~{v33},~{v34},~{v35},~{v36},~{v37},~{v38}" () #0
294  ret void
295}
296
297define void @spill_bp_to_memory_scratch_reg_needed_mubuf_offset(<32 x i32> %a, i32 %b, ptr addrspace(5) byval([4096 x i8]) align 4 %arg) #5 {
298; If the size of the offset exceeds the MUBUF offset field we need another
299; scratch VGPR to hold the offset.
300
301; GCN-LABEL: spill_bp_to_memory_scratch_reg_needed_mubuf_offset
302; GCN: s_mov_b32 [[FP_SCRATCH_COPY:s[0-9]+]], s33
303; GCN-NEXT: s_add_i32 s33, s32, 0x1fc0
304; GCN-NEXT: s_and_b32 s33, s33, 0xffffe000
305; GCN-NEXT: s_xor_saveexec_b64 s[6:7], -1
306; GCN-NEXT: s_add_i32 s5, s33, 0x42100
307; GCN-NEXT: buffer_store_dword v39, off, s[0:3], s5 ; 4-byte Folded Spill
308; GCN-NEXT: s_mov_b64 exec, s[6:7]
309; GCN-NEXT: v_mov_b32_e32 v0, [[FP_SCRATCH_COPY]]
310; GCN-NEXT: s_add_i32 s5, s33, 0x42200
311; GCN-NEXT: buffer_store_dword v0, off, s[0:3], s5 ; 4-byte Folded Spill
312; GCN-NEXT: v_mov_b32_e32 v0, s34
313; GCN-NEXT: s_add_i32 s5, s33, 0x42300
314; GCN-NEXT: s_mov_b32 s34, s32
315; GCN-NEXT: buffer_store_dword v0, off, s[0:3], s5 ; 4-byte Folded Spill
316  %local_val = alloca i32, align 128, addrspace(5)
317  store volatile i32 %b, ptr addrspace(5) %local_val, align 128
318
319  call void asm sideeffect "; clobber nonpreserved SGPRs and 64 CSRs",
320    "~{s4},~{s5},~{s6},~{s7},~{s8},~{s9}
321    ,~{s10},~{s11},~{s12},~{s13},~{s14},~{s15},~{s16},~{s17},~{s18},~{s19}
322    ,~{s20},~{s21},~{s22},~{s23},~{s24},~{s25},~{s26},~{s27},~{s28},~{s29}
323    ,~{s40},~{s41},~{s42},~{s43},~{s44},~{s45},~{s46},~{s47},~{s48},~{s49}
324    ,~{s50},~{s51},~{s52},~{s53},~{s54},~{s55},~{s56},~{s57},~{s58},~{s59}
325    ,~{s60},~{s61},~{s62},~{s63},~{s64},~{s65},~{s66},~{s67},~{s68},~{s69}
326    ,~{s70},~{s71},~{s72},~{s73},~{s74},~{s75},~{s76},~{s77},~{s78},~{s79}
327    ,~{s80},~{s81},~{s82},~{s83},~{s84},~{s85},~{s86},~{s87},~{s88},~{s89}
328    ,~{s90},~{s91},~{s92},~{s93},~{s94},~{s95},~{s96},~{s97},~{s98},~{s99}
329    ,~{s100},~{s101},~{s102},~{s39},~{vcc}"() #0
330
331  call void asm sideeffect "; clobber all VGPRs",
332    "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9}
333    ,~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19}
334    ,~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29}
335    ,~{v30},~{v31},~{v32},~{v33},~{v34},~{v35},~{v36},~{v37},~{v38}"() #0
336  ret void
337}
338
339attributes #0 = { noinline nounwind }
340attributes #1 = { noinline nounwind "stackrealign" }
341attributes #2 = { noinline nounwind alignstack=4 }
342attributes #3 = { noinline nounwind "no-realign-stack" }
343attributes #4 = { noinline nounwind "frame-pointer"="all"}
344attributes #5 = { noinline nounwind "amdgpu-waves-per-eu"="6,6" }
345