xref: /llvm-project/llvm/test/CodeGen/AMDGPU/function-resource-usage.ll (revision 9a5e5e28eca97ca06adc0cc60273dcf6cd61e32f)
1; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -enable-ipra=0 < %s | FileCheck -check-prefix=GCN %s
2
3; Functions that don't make calls should have constants as its resource usage as no resource information has to be propagated.
4
5; GCN-LABEL: {{^}}use_vcc:
6; GCN: .set use_vcc.num_vgpr, 0
7; GCN: .set use_vcc.num_agpr, 0
8; GCN: .set use_vcc.numbered_sgpr, 32
9; GCN: .set use_vcc.private_seg_size, 0
10; GCN: .set use_vcc.uses_vcc, 1
11; GCN: .set use_vcc.uses_flat_scratch, 0
12; GCN: .set use_vcc.has_dyn_sized_stack, 0
13; GCN: .set use_vcc.has_recursion, 0
14; GCN: .set use_vcc.has_indirect_call, 0
15; GCN: TotalNumSgprs: 36
16; GCN: NumVgprs: 0
17; GCN: ScratchSize: 0
18define void @use_vcc() #1 {
19  call void asm sideeffect "", "~{vcc}" () #0
20  ret void
21}
22
23; GCN-LABEL: {{^}}indirect_use_vcc:
24; GCN: .set indirect_use_vcc.num_vgpr, max(41, use_vcc.num_vgpr)
25; GCN: .set indirect_use_vcc.num_agpr, max(0, use_vcc.num_agpr)
26; GCN: .set indirect_use_vcc.numbered_sgpr, max(34, use_vcc.numbered_sgpr)
27; GCN: .set indirect_use_vcc.private_seg_size, 16+(max(use_vcc.private_seg_size))
28; GCN: .set indirect_use_vcc.uses_vcc, or(1, use_vcc.uses_vcc)
29; GCN: .set indirect_use_vcc.uses_flat_scratch, or(0, use_vcc.uses_flat_scratch)
30; GCN: .set indirect_use_vcc.has_dyn_sized_stack, or(0, use_vcc.has_dyn_sized_stack)
31; GCN: .set indirect_use_vcc.has_recursion, or(0, use_vcc.has_recursion)
32; GCN: .set indirect_use_vcc.has_indirect_call, or(0, use_vcc.has_indirect_call)
33; GCN: TotalNumSgprs: 38
34; GCN: NumVgprs: 41
35; GCN: ScratchSize: 16
36define void @indirect_use_vcc() #1 {
37  call void @use_vcc()
38  ret void
39}
40
41; GCN-LABEL: {{^}}indirect_2level_use_vcc_kernel:
42; GCN: .set indirect_2level_use_vcc_kernel.num_vgpr, max(32, indirect_use_vcc.num_vgpr)
43; GCN: .set indirect_2level_use_vcc_kernel.num_agpr, max(0, indirect_use_vcc.num_agpr)
44; GCN: .set indirect_2level_use_vcc_kernel.numbered_sgpr, max(33, indirect_use_vcc.numbered_sgpr)
45; GCN: .set indirect_2level_use_vcc_kernel.private_seg_size, 0+(max(indirect_use_vcc.private_seg_size))
46; GCN: .set indirect_2level_use_vcc_kernel.uses_vcc, or(1, indirect_use_vcc.uses_vcc)
47; GCN: .set indirect_2level_use_vcc_kernel.uses_flat_scratch, or(1, indirect_use_vcc.uses_flat_scratch)
48; GCN: .set indirect_2level_use_vcc_kernel.has_dyn_sized_stack, or(0, indirect_use_vcc.has_dyn_sized_stack)
49; GCN: .set indirect_2level_use_vcc_kernel.has_recursion, or(0, indirect_use_vcc.has_recursion)
50; GCN: .set indirect_2level_use_vcc_kernel.has_indirect_call, or(0, indirect_use_vcc.has_indirect_call)
51; GCN: TotalNumSgprs: 40
52; GCN: NumVgprs: 41
53; GCN: ScratchSize: 16
54define amdgpu_kernel void @indirect_2level_use_vcc_kernel(ptr addrspace(1) %out) #0 {
55  call void @indirect_use_vcc()
56  ret void
57}
58
59; GCN-LABEL: {{^}}use_flat_scratch:
60; GCN: .set use_flat_scratch.num_vgpr, 0
61; GCN: .set use_flat_scratch.num_agpr, 0
62; GCN: .set use_flat_scratch.numbered_sgpr, 32
63; GCN: .set use_flat_scratch.private_seg_size, 0
64; GCN: .set use_flat_scratch.uses_vcc, 0
65; GCN: .set use_flat_scratch.uses_flat_scratch, 1
66; GCN: .set use_flat_scratch.has_dyn_sized_stack, 0
67; GCN: .set use_flat_scratch.has_recursion, 0
68; GCN: .set use_flat_scratch.has_indirect_call, 0
69; GCN: TotalNumSgprs: 38
70; GCN: NumVgprs: 0
71; GCN: ScratchSize: 0
72define void @use_flat_scratch() #1 {
73  call void asm sideeffect "", "~{flat_scratch}" () #0
74  ret void
75}
76
77; GCN-LABEL: {{^}}indirect_use_flat_scratch:
78; GCN: .set indirect_use_flat_scratch.num_vgpr, max(41, use_flat_scratch.num_vgpr)
79; GCN: .set indirect_use_flat_scratch.num_agpr, max(0, use_flat_scratch.num_agpr)
80; GCN: .set indirect_use_flat_scratch.numbered_sgpr, max(34, use_flat_scratch.numbered_sgpr)
81; GCN: .set indirect_use_flat_scratch.private_seg_size, 16+(max(use_flat_scratch.private_seg_size))
82; GCN: .set indirect_use_flat_scratch.uses_vcc, or(1, use_flat_scratch.uses_vcc)
83; GCN: .set indirect_use_flat_scratch.uses_flat_scratch, or(0, use_flat_scratch.uses_flat_scratch)
84; GCN: .set indirect_use_flat_scratch.has_dyn_sized_stack, or(0, use_flat_scratch.has_dyn_sized_stack)
85; GCN: .set indirect_use_flat_scratch.has_recursion, or(0, use_flat_scratch.has_recursion)
86; GCN: .set indirect_use_flat_scratch.has_indirect_call, or(0, use_flat_scratch.has_indirect_call)
87; GCN: TotalNumSgprs: 40
88; GCN: NumVgprs: 41
89; GCN: ScratchSize: 16
90define void @indirect_use_flat_scratch() #1 {
91  call void @use_flat_scratch()
92  ret void
93}
94
95; GCN-LABEL: {{^}}indirect_2level_use_flat_scratch_kernel:
96; GCN: .set indirect_2level_use_flat_scratch_kernel.num_vgpr, max(32, indirect_use_flat_scratch.num_vgpr)
97; GCN: .set indirect_2level_use_flat_scratch_kernel.num_agpr, max(0, indirect_use_flat_scratch.num_agpr)
98; GCN: .set indirect_2level_use_flat_scratch_kernel.numbered_sgpr, max(33, indirect_use_flat_scratch.numbered_sgpr)
99; GCN: .set indirect_2level_use_flat_scratch_kernel.private_seg_size, 0+(max(indirect_use_flat_scratch.private_seg_size))
100; GCN: .set indirect_2level_use_flat_scratch_kernel.uses_vcc, or(1, indirect_use_flat_scratch.uses_vcc)
101; GCN: .set indirect_2level_use_flat_scratch_kernel.uses_flat_scratch, or(1, indirect_use_flat_scratch.uses_flat_scratch)
102; GCN: .set indirect_2level_use_flat_scratch_kernel.has_dyn_sized_stack, or(0, indirect_use_flat_scratch.has_dyn_sized_stack)
103; GCN: .set indirect_2level_use_flat_scratch_kernel.has_recursion, or(0, indirect_use_flat_scratch.has_recursion)
104; GCN: .set indirect_2level_use_flat_scratch_kernel.has_indirect_call, or(0, indirect_use_flat_scratch.has_indirect_call)
105; GCN: TotalNumSgprs: 40
106; GCN: NumVgprs: 41
107; GCN: ScratchSize: 16
108define amdgpu_kernel void @indirect_2level_use_flat_scratch_kernel(ptr addrspace(1) %out) #0 {
109  call void @indirect_use_flat_scratch()
110  ret void
111}
112
113; GCN-LABEL: {{^}}use_10_vgpr:
114; GCN: .set use_10_vgpr.num_vgpr, 10
115; GCN: .set use_10_vgpr.num_agpr, 0
116; GCN: .set use_10_vgpr.numbered_sgpr, 32
117; GCN: .set use_10_vgpr.private_seg_size, 0
118; GCN: .set use_10_vgpr.uses_vcc, 0
119; GCN: .set use_10_vgpr.uses_flat_scratch, 0
120; GCN: .set use_10_vgpr.has_dyn_sized_stack, 0
121; GCN: .set use_10_vgpr.has_recursion, 0
122; GCN: .set use_10_vgpr.has_indirect_call, 0
123; GCN: TotalNumSgprs: 36
124; GCN: NumVgprs: 10
125; GCN: ScratchSize: 0
126define void @use_10_vgpr() #1 {
127  call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4}"() #0
128  call void asm sideeffect "", "~{v5},~{v6},~{v7},~{v8},~{v9}"() #0
129  ret void
130}
131
132; GCN-LABEL: {{^}}indirect_use_10_vgpr:
133; GCN: .set indirect_use_10_vgpr.num_vgpr, max(41, use_10_vgpr.num_vgpr)
134; GCN: .set indirect_use_10_vgpr.num_agpr, max(0, use_10_vgpr.num_agpr)
135; GCN: .set indirect_use_10_vgpr.numbered_sgpr, max(34, use_10_vgpr.numbered_sgpr)
136; GCN: .set indirect_use_10_vgpr.private_seg_size, 16+(max(use_10_vgpr.private_seg_size))
137; GCN: .set indirect_use_10_vgpr.uses_vcc, or(1, use_10_vgpr.uses_vcc)
138; GCN: .set indirect_use_10_vgpr.uses_flat_scratch, or(0, use_10_vgpr.uses_flat_scratch)
139; GCN: .set indirect_use_10_vgpr.has_dyn_sized_stack, or(0, use_10_vgpr.has_dyn_sized_stack)
140; GCN: .set indirect_use_10_vgpr.has_recursion, or(0, use_10_vgpr.has_recursion)
141; GCN: .set indirect_use_10_vgpr.has_indirect_call, or(0, use_10_vgpr.has_indirect_call)
142; GCN: TotalNumSgprs: 38
143; GCN: NumVgprs: 41
144; GCN: ScratchSize: 16
145define void @indirect_use_10_vgpr() #0 {
146  call void @use_10_vgpr()
147  ret void
148}
149
150; GCN-LABEL: {{^}}indirect_2_level_use_10_vgpr:
151; GCN:	.set indirect_2_level_use_10_vgpr.num_vgpr, max(32, indirect_use_10_vgpr.num_vgpr)
152; GCN:	.set indirect_2_level_use_10_vgpr.num_agpr, max(0, indirect_use_10_vgpr.num_agpr)
153; GCN:	.set indirect_2_level_use_10_vgpr.numbered_sgpr, max(33, indirect_use_10_vgpr.numbered_sgpr)
154; GCN:	.set indirect_2_level_use_10_vgpr.private_seg_size, 0+(max(indirect_use_10_vgpr.private_seg_size))
155; GCN:	.set indirect_2_level_use_10_vgpr.uses_vcc, or(1, indirect_use_10_vgpr.uses_vcc)
156; GCN:	.set indirect_2_level_use_10_vgpr.uses_flat_scratch, or(1, indirect_use_10_vgpr.uses_flat_scratch)
157; GCN:	.set indirect_2_level_use_10_vgpr.has_dyn_sized_stack, or(0, indirect_use_10_vgpr.has_dyn_sized_stack)
158; GCN:	.set indirect_2_level_use_10_vgpr.has_recursion, or(0, indirect_use_10_vgpr.has_recursion)
159; GCN:	.set indirect_2_level_use_10_vgpr.has_indirect_call, or(0, indirect_use_10_vgpr.has_indirect_call)
160; GCN: TotalNumSgprs: 40
161; GCN: NumVgprs: 41
162; GCN: ScratchSize: 16
163define amdgpu_kernel void @indirect_2_level_use_10_vgpr() #0 {
164  call void @indirect_use_10_vgpr()
165  ret void
166}
167
168; GCN-LABEL: {{^}}use_50_vgpr:
169; GCN:	.set use_50_vgpr.num_vgpr, 50
170; GCN:	.set use_50_vgpr.num_agpr, 0
171; GCN:	.set use_50_vgpr.numbered_sgpr, 32
172; GCN:	.set use_50_vgpr.private_seg_size, 0
173; GCN:	.set use_50_vgpr.uses_vcc, 0
174; GCN:	.set use_50_vgpr.uses_flat_scratch, 0
175; GCN:	.set use_50_vgpr.has_dyn_sized_stack, 0
176; GCN:	.set use_50_vgpr.has_recursion, 0
177; GCN:	.set use_50_vgpr.has_indirect_call, 0
178; GCN: TotalNumSgprs: 36
179; GCN: NumVgprs: 50
180; GCN: ScratchSize: 0
181define void @use_50_vgpr() #1 {
182  call void asm sideeffect "", "~{v49}"() #0
183  ret void
184}
185
186; GCN-LABEL: {{^}}indirect_use_50_vgpr:
187; GCN:	.set indirect_use_50_vgpr.num_vgpr, max(41, use_50_vgpr.num_vgpr)
188; GCN:	.set indirect_use_50_vgpr.num_agpr, max(0, use_50_vgpr.num_agpr)
189; GCN:	.set indirect_use_50_vgpr.numbered_sgpr, max(34, use_50_vgpr.numbered_sgpr)
190; GCN:	.set indirect_use_50_vgpr.private_seg_size, 16+(max(use_50_vgpr.private_seg_size))
191; GCN:	.set indirect_use_50_vgpr.uses_vcc, or(1, use_50_vgpr.uses_vcc)
192; GCN:	.set indirect_use_50_vgpr.uses_flat_scratch, or(0, use_50_vgpr.uses_flat_scratch)
193; GCN:	.set indirect_use_50_vgpr.has_dyn_sized_stack, or(0, use_50_vgpr.has_dyn_sized_stack)
194; GCN:	.set indirect_use_50_vgpr.has_recursion, or(0, use_50_vgpr.has_recursion)
195; GCN:	.set indirect_use_50_vgpr.has_indirect_call, or(0, use_50_vgpr.has_indirect_call)
196; GCN: TotalNumSgprs: 38
197; GCN: NumVgprs: 50
198; GCN: ScratchSize: 16
199define void @indirect_use_50_vgpr() #0 {
200  call void @use_50_vgpr()
201  ret void
202}
203
204; GCN-LABEL: {{^}}use_80_sgpr:
205; GCN:	.set use_80_sgpr.num_vgpr, 1
206; GCN:	.set use_80_sgpr.num_agpr, 0
207; GCN:	.set use_80_sgpr.numbered_sgpr, 80
208; GCN:	.set use_80_sgpr.private_seg_size, 8
209; GCN:	.set use_80_sgpr.uses_vcc, 0
210; GCN:	.set use_80_sgpr.uses_flat_scratch, 0
211; GCN:	.set use_80_sgpr.has_dyn_sized_stack, 0
212; GCN:	.set use_80_sgpr.has_recursion, 0
213; GCN:	.set use_80_sgpr.has_indirect_call, 0
214; GCN: TotalNumSgprs: 84
215; GCN: NumVgprs: 1
216; GCN: ScratchSize: 8
217define void @use_80_sgpr() #1 {
218  call void asm sideeffect "", "~{s79}"() #0
219  ret void
220}
221
222; GCN-LABEL: {{^}}indirect_use_80_sgpr:
223; GCN:	.set indirect_use_80_sgpr.num_vgpr, max(41, use_80_sgpr.num_vgpr)
224; GCN:	.set indirect_use_80_sgpr.num_agpr, max(0, use_80_sgpr.num_agpr)
225; GCN:	.set indirect_use_80_sgpr.numbered_sgpr, max(34, use_80_sgpr.numbered_sgpr)
226; GCN:	.set indirect_use_80_sgpr.private_seg_size, 16+(max(use_80_sgpr.private_seg_size))
227; GCN:	.set indirect_use_80_sgpr.uses_vcc, or(1, use_80_sgpr.uses_vcc)
228; GCN:	.set indirect_use_80_sgpr.uses_flat_scratch, or(0, use_80_sgpr.uses_flat_scratch)
229; GCN:	.set indirect_use_80_sgpr.has_dyn_sized_stack, or(0, use_80_sgpr.has_dyn_sized_stack)
230; GCN:	.set indirect_use_80_sgpr.has_recursion, or(0, use_80_sgpr.has_recursion)
231; GCN:	.set indirect_use_80_sgpr.has_indirect_call, or(0, use_80_sgpr.has_indirect_call)
232; GCN: TotalNumSgprs: 84
233; GCN: NumVgprs: 41
234; GCN: ScratchSize: 24
235define void @indirect_use_80_sgpr() #1 {
236  call void @use_80_sgpr()
237  ret void
238}
239
240; GCN-LABEL: {{^}}indirect_2_level_use_80_sgpr:
241; GCN:	.set indirect_2_level_use_80_sgpr.num_vgpr, max(32, indirect_use_80_sgpr.num_vgpr)
242; GCN:	.set indirect_2_level_use_80_sgpr.num_agpr, max(0, indirect_use_80_sgpr.num_agpr)
243; GCN:	.set indirect_2_level_use_80_sgpr.numbered_sgpr, max(33, indirect_use_80_sgpr.numbered_sgpr)
244; GCN:	.set indirect_2_level_use_80_sgpr.private_seg_size, 0+(max(indirect_use_80_sgpr.private_seg_size))
245; GCN:	.set indirect_2_level_use_80_sgpr.uses_vcc, or(1, indirect_use_80_sgpr.uses_vcc)
246; GCN:	.set indirect_2_level_use_80_sgpr.uses_flat_scratch, or(1, indirect_use_80_sgpr.uses_flat_scratch)
247; GCN:	.set indirect_2_level_use_80_sgpr.has_dyn_sized_stack, or(0, indirect_use_80_sgpr.has_dyn_sized_stack)
248; GCN:	.set indirect_2_level_use_80_sgpr.has_recursion, or(0, indirect_use_80_sgpr.has_recursion)
249; GCN:	.set indirect_2_level_use_80_sgpr.has_indirect_call, or(0, indirect_use_80_sgpr.has_indirect_call)
250; GCN: TotalNumSgprs: 86
251; GCN: NumVgprs: 41
252; GCN: ScratchSize: 24
253define amdgpu_kernel void @indirect_2_level_use_80_sgpr() #0 {
254  call void @indirect_use_80_sgpr()
255  ret void
256}
257
258; GCN-LABEL: {{^}}use_stack0:
259; GCN:	.set use_stack0.num_vgpr, 1
260; GCN:	.set use_stack0.num_agpr, 0
261; GCN:	.set use_stack0.numbered_sgpr, 33
262; GCN:	.set use_stack0.private_seg_size, 2052
263; GCN:	.set use_stack0.uses_vcc, 0
264; GCN:	.set use_stack0.uses_flat_scratch, 0
265; GCN:	.set use_stack0.has_dyn_sized_stack, 0
266; GCN:	.set use_stack0.has_recursion, 0
267; GCN:	.set use_stack0.has_indirect_call, 0
268; GCN: TotalNumSgprs: 37
269; GCN: NumVgprs: 1
270; GCN: ScratchSize: 2052
271define void @use_stack0() #1 {
272  %alloca = alloca [512 x i32], align 4, addrspace(5)
273  call void asm sideeffect "; use $0", "v"(ptr addrspace(5) %alloca) #0
274  ret void
275}
276
277; GCN-LABEL: {{^}}use_stack1:
278; GCN:	.set use_stack1.num_vgpr, 1
279; GCN:	.set use_stack1.num_agpr, 0
280; GCN:	.set use_stack1.numbered_sgpr, 33
281; GCN:	.set use_stack1.private_seg_size, 404
282; GCN:	.set use_stack1.uses_vcc, 0
283; GCN:	.set use_stack1.uses_flat_scratch, 0
284; GCN:	.set use_stack1.has_dyn_sized_stack, 0
285; GCN:	.set use_stack1.has_recursion, 0
286; GCN:	.set use_stack1.has_indirect_call, 0
287; GCN: TotalNumSgprs: 37
288; GCN: NumVgprs: 1
289; GCN: ScratchSize: 404
290define void @use_stack1() #1 {
291  %alloca = alloca [100 x i32], align 4, addrspace(5)
292  call void asm sideeffect "; use $0", "v"(ptr addrspace(5) %alloca) #0
293  ret void
294}
295
296; GCN-LABEL: {{^}}indirect_use_stack:
297; GCN:	.set indirect_use_stack.num_vgpr, max(41, use_stack0.num_vgpr)
298; GCN:	.set indirect_use_stack.num_agpr, max(0, use_stack0.num_agpr)
299; GCN:	.set indirect_use_stack.numbered_sgpr, max(34, use_stack0.numbered_sgpr)
300; GCN:	.set indirect_use_stack.private_seg_size, 80+(max(use_stack0.private_seg_size))
301; GCN:	.set indirect_use_stack.uses_vcc, or(1, use_stack0.uses_vcc)
302; GCN:	.set indirect_use_stack.uses_flat_scratch, or(0, use_stack0.uses_flat_scratch)
303; GCN:	.set indirect_use_stack.has_dyn_sized_stack, or(0, use_stack0.has_dyn_sized_stack)
304; GCN:	.set indirect_use_stack.has_recursion, or(0, use_stack0.has_recursion)
305; GCN:	.set indirect_use_stack.has_indirect_call, or(0, use_stack0.has_indirect_call)
306; GCN: TotalNumSgprs: 38
307; GCN: NumVgprs: 41
308; GCN: ScratchSize: 2132
309define void @indirect_use_stack() #1 {
310  %alloca = alloca [16 x i32], align 4, addrspace(5)
311  call void asm sideeffect "; use $0", "v"(ptr addrspace(5) %alloca) #0
312  call void @use_stack0()
313  ret void
314}
315
316; GCN-LABEL: {{^}}indirect_2_level_use_stack:
317; GCN:	.set indirect_2_level_use_stack.num_vgpr, max(32, indirect_use_stack.num_vgpr)
318; GCN:	.set indirect_2_level_use_stack.num_agpr, max(0, indirect_use_stack.num_agpr)
319; GCN:	.set indirect_2_level_use_stack.numbered_sgpr, max(33, indirect_use_stack.numbered_sgpr)
320; GCN:	.set indirect_2_level_use_stack.private_seg_size, 0+(max(indirect_use_stack.private_seg_size))
321; GCN:	.set indirect_2_level_use_stack.uses_vcc, or(1, indirect_use_stack.uses_vcc)
322; GCN:	.set indirect_2_level_use_stack.uses_flat_scratch, or(1, indirect_use_stack.uses_flat_scratch)
323; GCN:	.set indirect_2_level_use_stack.has_dyn_sized_stack, or(0, indirect_use_stack.has_dyn_sized_stack)
324; GCN:	.set indirect_2_level_use_stack.has_recursion, or(0, indirect_use_stack.has_recursion)
325; GCN:	.set indirect_2_level_use_stack.has_indirect_call, or(0, indirect_use_stack.has_indirect_call)
326; GCN: TotalNumSgprs: 40
327; GCN: NumVgprs: 41
328; GCN: ScratchSize: 2132
329define amdgpu_kernel void @indirect_2_level_use_stack() #0 {
330  call void @indirect_use_stack()
331  ret void
332}
333
334
335; Should be maximum of callee usage
336; GCN-LABEL: {{^}}multi_call_use_use_stack:
337; GCN:	.set multi_call_use_use_stack.num_vgpr, max(41, use_stack0.num_vgpr, use_stack1.num_vgpr)
338; GCN:	.set multi_call_use_use_stack.num_agpr, max(0, use_stack0.num_agpr, use_stack1.num_agpr)
339; GCN:	.set multi_call_use_use_stack.numbered_sgpr, max(44, use_stack0.numbered_sgpr, use_stack1.numbered_sgpr)
340; GCN:	.set multi_call_use_use_stack.private_seg_size, 0+(max(use_stack0.private_seg_size, use_stack1.private_seg_size))
341; GCN:	.set multi_call_use_use_stack.uses_vcc, or(1, use_stack0.uses_vcc, use_stack1.uses_vcc)
342; GCN:	.set multi_call_use_use_stack.uses_flat_scratch, or(1, use_stack0.uses_flat_scratch, use_stack1.uses_flat_scratch)
343; GCN:	.set multi_call_use_use_stack.has_dyn_sized_stack, or(0, use_stack0.has_dyn_sized_stack, use_stack1.has_dyn_sized_stack)
344; GCN:	.set multi_call_use_use_stack.has_recursion, or(0, use_stack0.has_recursion, use_stack1.has_recursion)
345; GCN:	.set multi_call_use_use_stack.has_indirect_call, or(0, use_stack0.has_indirect_call, use_stack1.has_indirect_call)
346; GCN: TotalNumSgprs: 50
347; GCN: NumVgprs: 41
348; GCN: ScratchSize: 2052
349define amdgpu_kernel void @multi_call_use_use_stack() #0 {
350  call void @use_stack0()
351  call void @use_stack1()
352  ret void
353}
354
355declare void @external() #0
356
357; GCN-LABEL: {{^}}multi_call_with_external:
358; GCN:	.set multi_call_with_external.num_vgpr, max(41, amdgpu.max_num_vgpr)
359; GCN:	.set multi_call_with_external.num_agpr, max(0, amdgpu.max_num_agpr)
360; GCN:	.set multi_call_with_external.numbered_sgpr, max(44, amdgpu.max_num_sgpr)
361; GCN:	.set multi_call_with_external.private_seg_size, 0+(max(use_stack0.private_seg_size, use_stack1.private_seg_size))
362; GCN:	.set multi_call_with_external.uses_vcc, 1
363; GCN:	.set multi_call_with_external.uses_flat_scratch, 1
364; GCN:	.set multi_call_with_external.has_dyn_sized_stack, 1
365; GCN:	.set multi_call_with_external.has_recursion, 0
366; GCN:	.set multi_call_with_external.has_indirect_call, 1
367; GCN: TotalNumSgprs: multi_call_with_external.numbered_sgpr+6
368; GCN: NumVgprs: multi_call_with_external.num_vgpr
369; GCN: ScratchSize: 2052
370define amdgpu_kernel void @multi_call_with_external() #0 {
371  call void @use_stack0()
372  call void @use_stack1()
373  call void @external()
374  ret void
375}
376
377; GCN-LABEL: {{^}}multi_call_with_external_and_duplicates:
378; GCN:	.set multi_call_with_external_and_duplicates.num_vgpr, max(41, amdgpu.max_num_vgpr)
379; GCN:	.set multi_call_with_external_and_duplicates.num_agpr, max(0, amdgpu.max_num_agpr)
380; GCN:	.set multi_call_with_external_and_duplicates.numbered_sgpr, max(46, amdgpu.max_num_sgpr)
381; GCN:	.set multi_call_with_external_and_duplicates.private_seg_size, 0+(max(use_stack0.private_seg_size, use_stack1.private_seg_size))
382; GCN:	.set multi_call_with_external_and_duplicates.uses_vcc, 1
383; GCN:	.set multi_call_with_external_and_duplicates.uses_flat_scratch, 1
384; GCN:	.set multi_call_with_external_and_duplicates.has_dyn_sized_stack, 1
385; GCN:	.set multi_call_with_external_and_duplicates.has_recursion, 0
386; GCN:	.set multi_call_with_external_and_duplicates.has_indirect_call, 1
387; GCN: TotalNumSgprs: multi_call_with_external_and_duplicates.numbered_sgpr+6
388; GCN: NumVgprs: multi_call_with_external_and_duplicates.num_vgpr
389; GCN: ScratchSize: 2052
390define amdgpu_kernel void @multi_call_with_external_and_duplicates() #0 {
391  call void @use_stack0()
392  call void @use_stack0()
393  call void @use_stack1()
394  call void @use_stack1()
395  call void @external()
396  call void @external()
397  ret void
398}
399
400; GCN-LABEL: {{^}}usage_external:
401; GCN:	.set usage_external.num_vgpr, max(32, amdgpu.max_num_vgpr)
402; GCN:	.set usage_external.num_agpr, max(0, amdgpu.max_num_agpr)
403; GCN:	.set usage_external.numbered_sgpr, max(33, amdgpu.max_num_sgpr)
404; GCN:	.set usage_external.private_seg_size, 0
405; GCN:	.set usage_external.uses_vcc, 1
406; GCN:	.set usage_external.uses_flat_scratch, 1
407; GCN:	.set usage_external.has_dyn_sized_stack, 1
408; GCN:	.set usage_external.has_recursion, 0
409; GCN:	.set usage_external.has_indirect_call, 1
410; GCN: TotalNumSgprs: usage_external.numbered_sgpr+6
411; GCN: NumVgprs: usage_external.num_vgpr
412; GCN: ScratchSize: 0
413define amdgpu_kernel void @usage_external() #0 {
414  call void @external()
415  ret void
416}
417
418declare void @external_recurse() #2
419
420; GCN-LABEL: {{^}}usage_external_recurse:
421; GCN:	.set usage_external_recurse.num_vgpr, max(32, amdgpu.max_num_vgpr)
422; GCN:	.set usage_external_recurse.num_agpr, max(0, amdgpu.max_num_agpr)
423; GCN:	.set usage_external_recurse.numbered_sgpr, max(33, amdgpu.max_num_sgpr)
424; GCN:	.set usage_external_recurse.private_seg_size, 0
425; GCN:	.set usage_external_recurse.uses_vcc, 1
426; GCN:	.set usage_external_recurse.uses_flat_scratch, 1
427; GCN:	.set usage_external_recurse.has_dyn_sized_stack, 1
428; GCN:	.set usage_external_recurse.has_recursion, 1
429; GCN:	.set usage_external_recurse.has_indirect_call, 1
430; GCN: TotalNumSgprs: usage_external_recurse.numbered_sgpr+6
431; GCN: NumVgprs: usage_external_recurse.num_vgpr
432; GCN: ScratchSize: 0
433define amdgpu_kernel void @usage_external_recurse() #0 {
434  call void @external_recurse()
435  ret void
436}
437
438; GCN-LABEL: {{^}}direct_recursion_use_stack:
439; GCN: .set direct_recursion_use_stack.num_vgpr, 41
440; GCN: .set direct_recursion_use_stack.num_agpr, 0
441; GCN: .set direct_recursion_use_stack.numbered_sgpr, 36
442; GCN: .set direct_recursion_use_stack.private_seg_size, 2064
443; GCN: .set direct_recursion_use_stack.uses_vcc, 1
444; GCN: .set direct_recursion_use_stack.uses_flat_scratch, 0
445; GCN: .set direct_recursion_use_stack.has_dyn_sized_stack, 0
446; GCN: .set direct_recursion_use_stack.has_recursion, 1
447; GCN: .set direct_recursion_use_stack.has_indirect_call, 0
448; GCN: TotalNumSgprs: 40
449; GCN: NumVgprs: 41
450; GCN: ScratchSize: 2064
451define void @direct_recursion_use_stack(i32 %val) #2 {
452  %alloca = alloca [512 x i32], align 4, addrspace(5)
453  call void asm sideeffect "; use $0", "v"(ptr addrspace(5) %alloca) #0
454  %cmp = icmp eq i32 %val, 0
455  br i1 %cmp, label %ret, label %call
456
457call:
458  %val.sub1 = sub i32 %val, 1
459  call void @direct_recursion_use_stack(i32 %val.sub1)
460  br label %ret
461
462ret:
463  ret void
464}
465
466; GCN-LABEL: {{^}}usage_direct_recursion:
467; GCN:  .set usage_direct_recursion.num_vgpr, max(32, direct_recursion_use_stack.num_vgpr)
468; GCN:  .set usage_direct_recursion.num_agpr, max(0, direct_recursion_use_stack.num_agpr)
469; GCN:  .set usage_direct_recursion.numbered_sgpr, max(33, direct_recursion_use_stack.numbered_sgpr)
470; GCN:  .set usage_direct_recursion.private_seg_size, 0+(max(direct_recursion_use_stack.private_seg_size))
471; GCN:  .set usage_direct_recursion.uses_vcc, or(1, direct_recursion_use_stack.uses_vcc)
472; GCN:  .set usage_direct_recursion.uses_flat_scratch, or(1, direct_recursion_use_stack.uses_flat_scratch)
473; GCN:  .set usage_direct_recursion.has_dyn_sized_stack, or(0, direct_recursion_use_stack.has_dyn_sized_stack)
474; GCN:  .set usage_direct_recursion.has_recursion, or(1, direct_recursion_use_stack.has_recursion)
475; GCN:  .set usage_direct_recursion.has_indirect_call, or(0, direct_recursion_use_stack.has_indirect_call)
476; GCN: TotalNumSgprs: 42
477; GCN: NumVgprs: 41
478; GCN: ScratchSize: 2064
479define amdgpu_kernel void @usage_direct_recursion(i32 %n) #0 {
480  call void @direct_recursion_use_stack(i32 %n)
481  ret void
482}
483
484; GCN-LABEL: {{^}}multi_stage_recurse2:
485; GCN: .set multi_stage_recurse2.num_vgpr, max(43, multi_stage_recurse1.num_vgpr)
486; GCN: .set multi_stage_recurse2.num_agpr, max(0, multi_stage_recurse1.num_agpr)
487; GCN: .set multi_stage_recurse2.numbered_sgpr, max(34, multi_stage_recurse1.numbered_sgpr)
488; GCN: .set multi_stage_recurse2.private_seg_size, 16+(max(multi_stage_recurse1.private_seg_size))
489; GCN: .set multi_stage_recurse2.uses_vcc, or(1, multi_stage_recurse1.uses_vcc)
490; GCN: .set multi_stage_recurse2.uses_flat_scratch, or(0, multi_stage_recurse1.uses_flat_scratch)
491; GCN: .set multi_stage_recurse2.has_dyn_sized_stack, or(0, multi_stage_recurse1.has_dyn_sized_stack)
492; GCN: .set multi_stage_recurse2.has_recursion, or(1, multi_stage_recurse1.has_recursion)
493; GCN: .set multi_stage_recurse2.has_indirect_call, or(0, multi_stage_recurse1.has_indirect_call)
494; GCN: TotalNumSgprs: multi_stage_recurse2.numbered_sgpr+(extrasgprs(multi_stage_recurse2.uses_vcc, multi_stage_recurse2.uses_flat_scratch, 1))
495; GCN: NumVgprs: max(43, multi_stage_recurse1.num_vgpr)
496; GCN: ScratchSize: 16+(max(multi_stage_recurse1.private_seg_size))
497; GCN-LABEL: {{^}}multi_stage_recurse1:
498; GCN: .set multi_stage_recurse1.num_vgpr, max(48, amdgpu.max_num_vgpr)
499; GCN: .set multi_stage_recurse1.num_agpr, max(0, amdgpu.max_num_agpr)
500; GCN: .set multi_stage_recurse1.numbered_sgpr, max(34, amdgpu.max_num_sgpr)
501; GCN: .set multi_stage_recurse1.private_seg_size, 16
502; GCN: .set multi_stage_recurse1.uses_vcc, 1
503; GCN: .set multi_stage_recurse1.uses_flat_scratch, 0
504; GCN: .set multi_stage_recurse1.has_dyn_sized_stack, 0
505; GCN: .set multi_stage_recurse1.has_recursion, 1
506; GCN: .set multi_stage_recurse1.has_indirect_call, 0
507; GCN: TotalNumSgprs: multi_stage_recurse1.numbered_sgpr+4
508; GCN: NumVgprs: max(48, amdgpu.max_num_vgpr)
509; GCN: ScratchSize: 16
510define void @multi_stage_recurse1(i32 %val) #2 {
511  call void @multi_stage_recurse2(i32 %val)
512  call void asm sideeffect "", "~{v47}"() #0
513  ret void
514}
515define void @multi_stage_recurse2(i32 %val) #2 {
516  call void @multi_stage_recurse1(i32 %val)
517  call void asm sideeffect "", "~{v42}"() #0
518  ret void
519}
520
521; GCN-LABEL: {{^}}usage_multi_stage_recurse:
522; GCN: .set usage_multi_stage_recurse.num_vgpr, max(32, multi_stage_recurse1.num_vgpr)
523; GCN: .set usage_multi_stage_recurse.num_agpr, max(0, multi_stage_recurse1.num_agpr)
524; GCN: .set usage_multi_stage_recurse.numbered_sgpr, max(33, multi_stage_recurse1.numbered_sgpr)
525; GCN: .set usage_multi_stage_recurse.private_seg_size, 0+(max(multi_stage_recurse1.private_seg_size))
526; GCN: .set usage_multi_stage_recurse.uses_vcc, or(1, multi_stage_recurse1.uses_vcc)
527; GCN: .set usage_multi_stage_recurse.uses_flat_scratch, or(1, multi_stage_recurse1.uses_flat_scratch)
528; GCN: .set usage_multi_stage_recurse.has_dyn_sized_stack, or(0, multi_stage_recurse1.has_dyn_sized_stack)
529; GCN: .set usage_multi_stage_recurse.has_recursion, or(1, multi_stage_recurse1.has_recursion)
530; GCN: .set usage_multi_stage_recurse.has_indirect_call, or(0, multi_stage_recurse1.has_indirect_call)
531; GCN: TotalNumSgprs: usage_multi_stage_recurse.numbered_sgpr+6
532; GCN: NumVgprs: usage_multi_stage_recurse.num_vgpr
533; GCN: ScratchSize: 16
534define amdgpu_kernel void @usage_multi_stage_recurse(i32 %n) #0 {
535  call void @multi_stage_recurse1(i32 %n)
536  ret void
537}
538
539; GCN-LABEL: {{^}}multi_stage_recurse_noattr2:
540; GCN: .set multi_stage_recurse_noattr2.num_vgpr, max(41, multi_stage_recurse_noattr1.num_vgpr)
541; GCN: .set multi_stage_recurse_noattr2.num_agpr, max(0, multi_stage_recurse_noattr1.num_agpr)
542; GCN: .set multi_stage_recurse_noattr2.numbered_sgpr, max(54, multi_stage_recurse_noattr1.numbered_sgpr)
543; GCN: .set multi_stage_recurse_noattr2.private_seg_size, 16+(max(multi_stage_recurse_noattr1.private_seg_size))
544; GCN: .set multi_stage_recurse_noattr2.uses_vcc, or(1, multi_stage_recurse_noattr1.uses_vcc)
545; GCN: .set multi_stage_recurse_noattr2.uses_flat_scratch, or(0, multi_stage_recurse_noattr1.uses_flat_scratch)
546; GCN: .set multi_stage_recurse_noattr2.has_dyn_sized_stack, or(0, multi_stage_recurse_noattr1.has_dyn_sized_stack)
547; GCN: .set multi_stage_recurse_noattr2.has_recursion, or(0, multi_stage_recurse_noattr1.has_recursion)
548; GCN: .set multi_stage_recurse_noattr2.has_indirect_call, or(0, multi_stage_recurse_noattr1.has_indirect_call)
549; GCN: TotalNumSgprs: multi_stage_recurse_noattr2.numbered_sgpr+(extrasgprs(multi_stage_recurse_noattr2.uses_vcc, multi_stage_recurse_noattr2.uses_flat_scratch, 1))
550; GCN: NumVgprs: max(41, multi_stage_recurse_noattr1.num_vgpr)
551; GCN: ScratchSize: 16+(max(multi_stage_recurse_noattr1.private_seg_size))
552; GCN-LABEL: {{^}}multi_stage_recurse_noattr1:
553; GCN: .set multi_stage_recurse_noattr1.num_vgpr, max(41, amdgpu.max_num_vgpr)
554; GCN: .set multi_stage_recurse_noattr1.num_agpr, max(0, amdgpu.max_num_agpr)
555; GCN: .set multi_stage_recurse_noattr1.numbered_sgpr, max(57, amdgpu.max_num_sgpr)
556; GCN: .set multi_stage_recurse_noattr1.private_seg_size, 16
557; GCN: .set multi_stage_recurse_noattr1.uses_vcc, 1
558; GCN: .set multi_stage_recurse_noattr1.uses_flat_scratch, 0
559; GCN: .set multi_stage_recurse_noattr1.has_dyn_sized_stack, 0
560; GCN: .set multi_stage_recurse_noattr1.has_recursion, 0
561; GCN: .set multi_stage_recurse_noattr1.has_indirect_call, 0
562; GCN: TotalNumSgprs: multi_stage_recurse_noattr1.numbered_sgpr+4
563; GCN: NumVgprs: max(41, amdgpu.max_num_vgpr)
564; GCN: ScratchSize: 16
565define void @multi_stage_recurse_noattr1(i32 %val) #0 {
566  call void @multi_stage_recurse_noattr2(i32 %val)
567  call void asm sideeffect "", "~{s56}"() #0
568  ret void
569}
570define void @multi_stage_recurse_noattr2(i32 %val) #0 {
571  call void @multi_stage_recurse_noattr1(i32 %val)
572  call void asm sideeffect "", "~{s53}"() #0
573  ret void
574}
575
576; GCN-LABEL: {{^}}usage_multi_stage_recurse_noattrs:
577; GCN: .set usage_multi_stage_recurse_noattrs.num_vgpr, max(32, multi_stage_recurse_noattr1.num_vgpr)
578; GCN: .set usage_multi_stage_recurse_noattrs.num_agpr, max(0, multi_stage_recurse_noattr1.num_agpr)
579; GCN: .set usage_multi_stage_recurse_noattrs.numbered_sgpr, max(33, multi_stage_recurse_noattr1.numbered_sgpr)
580; GCN: .set usage_multi_stage_recurse_noattrs.private_seg_size, 0+(max(multi_stage_recurse_noattr1.private_seg_size))
581; GCN: .set usage_multi_stage_recurse_noattrs.uses_vcc, or(1, multi_stage_recurse_noattr1.uses_vcc)
582; GCN: .set usage_multi_stage_recurse_noattrs.uses_flat_scratch, or(1, multi_stage_recurse_noattr1.uses_flat_scratch)
583; GCN: .set usage_multi_stage_recurse_noattrs.has_dyn_sized_stack, or(0, multi_stage_recurse_noattr1.has_dyn_sized_stack)
584; GCN: .set usage_multi_stage_recurse_noattrs.has_recursion, or(0, multi_stage_recurse_noattr1.has_recursion)
585; GCN: .set usage_multi_stage_recurse_noattrs.has_indirect_call, or(0, multi_stage_recurse_noattr1.has_indirect_call)
586; GCN: TotalNumSgprs: usage_multi_stage_recurse_noattrs.numbered_sgpr+6
587; GCN: NumVgprs: usage_multi_stage_recurse_noattrs.num_vgpr
588; GCN: ScratchSize: 16
589define amdgpu_kernel void @usage_multi_stage_recurse_noattrs(i32 %n) #0 {
590  call void @multi_stage_recurse_noattr1(i32 %n)
591  ret void
592}
593
594; GCN-LABEL: {{^}}multi_call_with_multi_stage_recurse:
595; GCN:  .set multi_call_with_multi_stage_recurse.num_vgpr, max(41, use_stack0.num_vgpr, use_stack1.num_vgpr, multi_stage_recurse1.num_vgpr)
596; GCN:  .set multi_call_with_multi_stage_recurse.num_agpr, max(0, use_stack0.num_agpr, use_stack1.num_agpr, multi_stage_recurse1.num_agpr)
597; GCN:  .set multi_call_with_multi_stage_recurse.numbered_sgpr, max(45, use_stack0.numbered_sgpr, use_stack1.numbered_sgpr, multi_stage_recurse1.numbered_sgpr)
598; GCN:  .set multi_call_with_multi_stage_recurse.private_seg_size, 0+(max(use_stack0.private_seg_size, use_stack1.private_seg_size, multi_stage_recurse1.private_seg_size))
599; GCN:  .set multi_call_with_multi_stage_recurse.uses_vcc, or(1, use_stack0.uses_vcc, use_stack1.uses_vcc, multi_stage_recurse1.uses_vcc)
600; GCN:  .set multi_call_with_multi_stage_recurse.uses_flat_scratch, or(1, use_stack0.uses_flat_scratch, use_stack1.uses_flat_scratch, multi_stage_recurse1.uses_flat_scratch)
601; GCN:  .set multi_call_with_multi_stage_recurse.has_dyn_sized_stack, or(0, use_stack0.has_dyn_sized_stack, use_stack1.has_dyn_sized_stack, multi_stage_recurse1.has_dyn_sized_stack)
602; GCN:  .set multi_call_with_multi_stage_recurse.has_recursion, or(1, use_stack0.has_recursion, use_stack1.has_recursion, multi_stage_recurse1.has_recursion)
603; GCN:  .set multi_call_with_multi_stage_recurse.has_indirect_call, or(0, use_stack0.has_indirect_call, use_stack1.has_indirect_call, multi_stage_recurse1.has_indirect_call)
604; GCN: TotalNumSgprs: multi_call_with_multi_stage_recurse.numbered_sgpr+6
605; GCN: NumVgprs:  multi_call_with_multi_stage_recurse.num_vgpr
606; GCN: ScratchSize: 2052
607define amdgpu_kernel void @multi_call_with_multi_stage_recurse(i32 %n) #0 {
608  call void @use_stack0()
609  call void @use_stack1()
610  call void @multi_stage_recurse1(i32 %n)
611  ret void
612}
613
614; Make sure there's no assert when a sgpr96 is used.
615; GCN-LABEL: {{^}}count_use_sgpr96_external_call
616; GCN:	.set count_use_sgpr96_external_call.num_vgpr, max(32, amdgpu.max_num_vgpr)
617; GCN:	.set count_use_sgpr96_external_call.num_agpr, max(0, amdgpu.max_num_agpr)
618; GCN:	.set count_use_sgpr96_external_call.numbered_sgpr, max(33, amdgpu.max_num_sgpr)
619; GCN:	.set count_use_sgpr96_external_call.private_seg_size, 0
620; GCN:	.set count_use_sgpr96_external_call.uses_vcc, 1
621; GCN:	.set count_use_sgpr96_external_call.uses_flat_scratch, 1
622; GCN:	.set count_use_sgpr96_external_call.has_dyn_sized_stack, 1
623; GCN:	.set count_use_sgpr96_external_call.has_recursion, 0
624; GCN:	.set count_use_sgpr96_external_call.has_indirect_call, 1
625; GCN: TotalNumSgprs: count_use_sgpr96_external_call.numbered_sgpr+6
626; GCN: NumVgprs: count_use_sgpr96_external_call.num_vgpr
627; GCN: ScratchSize: 0
628define amdgpu_kernel void @count_use_sgpr96_external_call()  {
629entry:
630  tail call void asm sideeffect "; sgpr96 $0", "s"(<3 x i32> <i32 10, i32 11, i32 12>) #1
631  call void @external()
632  ret void
633}
634
635; Make sure there's no assert when a sgpr160 is used.
636; GCN-LABEL: {{^}}count_use_sgpr160_external_call
637; GCN:	.set count_use_sgpr160_external_call.num_vgpr, max(32, amdgpu.max_num_vgpr)
638; GCN:	.set count_use_sgpr160_external_call.num_agpr, max(0, amdgpu.max_num_agpr)
639; GCN:	.set count_use_sgpr160_external_call.numbered_sgpr, max(33, amdgpu.max_num_sgpr)
640; GCN:	.set count_use_sgpr160_external_call.private_seg_size, 0
641; GCN:	.set count_use_sgpr160_external_call.uses_vcc, 1
642; GCN:	.set count_use_sgpr160_external_call.uses_flat_scratch, 1
643; GCN:	.set count_use_sgpr160_external_call.has_dyn_sized_stack, 1
644; GCN:	.set count_use_sgpr160_external_call.has_recursion, 0
645; GCN:	.set count_use_sgpr160_external_call.has_indirect_call, 1
646; GCN: TotalNumSgprs: count_use_sgpr160_external_call.numbered_sgpr+6
647; GCN: NumVgprs: count_use_sgpr160_external_call.num_vgpr
648; GCN: ScratchSize: 0
649define amdgpu_kernel void @count_use_sgpr160_external_call()  {
650entry:
651  tail call void asm sideeffect "; sgpr160 $0", "s"(<5 x i32> <i32 10, i32 11, i32 12, i32 13, i32 14>) #1
652  call void @external()
653  ret void
654}
655
656; Make sure there's no assert when a vgpr160 is used.
657; GCN-LABEL: {{^}}count_use_vgpr160_external_call
658; GCN:	.set count_use_vgpr160_external_call.num_vgpr, max(32, amdgpu.max_num_vgpr)
659; GCN:	.set count_use_vgpr160_external_call.num_agpr, max(0, amdgpu.max_num_agpr)
660; GCN:	.set count_use_vgpr160_external_call.numbered_sgpr, max(33, amdgpu.max_num_sgpr)
661; GCN:	.set count_use_vgpr160_external_call.private_seg_size, 0
662; GCN:	.set count_use_vgpr160_external_call.uses_vcc, 1
663; GCN:	.set count_use_vgpr160_external_call.uses_flat_scratch, 1
664; GCN:	.set count_use_vgpr160_external_call.has_dyn_sized_stack, 1
665; GCN:	.set count_use_vgpr160_external_call.has_recursion, 0
666; GCN:	.set count_use_vgpr160_external_call.has_indirect_call, 1
667; GCN: TotalNumSgprs: count_use_vgpr160_external_call.numbered_sgpr+6
668; GCN: NumVgprs: count_use_vgpr160_external_call.num_vgpr
669; GCN: ScratchSize: 0
670define amdgpu_kernel void @count_use_vgpr160_external_call()  {
671entry:
672  tail call void asm sideeffect "; vgpr160 $0", "v"(<5 x i32> <i32 10, i32 11, i32 12, i32 13, i32 14>) #1
673  call void @external()
674  ret void
675}
676
677; Added at the of the .s are the module level maximums
678; GCN:	.set amdgpu.max_num_vgpr, 50
679; GCN:	.set amdgpu.max_num_agpr, 0
680; GCN:	.set amdgpu.max_num_sgpr, 80
681
682attributes #0 = { nounwind noinline norecurse }
683attributes #1 = { nounwind noinline norecurse }
684attributes #2 = { nounwind noinline }
685