xref: /llvm-project/llvm/test/CodeGen/X86/dynamic-allocas-VLAs.ll (revision 2f448bf509432c1a19ec46ab8cbc7353c03c6280)
1; RUN: llc < %s -stack-symbol-ordering=0 -mcpu=generic -mattr=+avx -mtriple=x86_64-apple-darwin10 | FileCheck %s
2; rdar://11496434
3
4; no VLAs or dynamic alignment
5define i32 @t1() nounwind uwtable ssp {
6entry:
7  %a = alloca i32, align 4
8  call void @t1_helper(ptr %a) nounwind
9  %0 = load i32, ptr %a, align 4
10  %add = add nsw i32 %0, 13
11  ret i32 %add
12
13; CHECK: _t1
14; CHECK-NOT: andq $-{{[0-9]+}}, %rsp
15; CHECK: leaq [[OFFSET:[0-9]*]](%rsp), %rdi
16; CHECK: callq _t1_helper
17; CHECK: movl [[OFFSET]](%rsp), %eax
18; CHECK: addl $13, %eax
19}
20
21declare void @t1_helper(ptr)
22
23; dynamic realignment
24define i32 @t2() nounwind uwtable ssp {
25entry:
26  %a = alloca i32, align 4
27  %v = alloca <8 x float>, align 32
28  call void @t2_helper(ptr %a, ptr %v) nounwind
29  %0 = load i32, ptr %a, align 4
30  %add = add nsw i32 %0, 13
31  ret i32 %add
32
33; CHECK: _t2
34; CHECK: pushq %rbp
35; CHECK: movq %rsp, %rbp
36; CHECK: andq $-32, %rsp
37; CHECK: subq ${{[0-9]+}}, %rsp
38;
39; CHECK: leaq {{[0-9]*}}(%rsp), %rdi
40; CHECK: movq %rsp, %rsi
41; CHECK: callq _t2_helper
42;
43; CHECK: movq %rbp, %rsp
44; CHECK: popq %rbp
45}
46
47declare void @t2_helper(ptr, ptr)
48
49; VLAs
50define i32 @t3(i64 %sz) nounwind uwtable ssp {
51entry:
52  %a = alloca i32, align 4
53  %vla = alloca i32, i64 %sz, align 16
54  call void @t3_helper(ptr %a, ptr %vla) nounwind
55  %0 = load i32, ptr %a, align 4
56  %add = add nsw i32 %0, 13
57  ret i32 %add
58
59; CHECK: _t3
60; CHECK: pushq %rbp
61; CHECK: movq %rsp, %rbp
62; CHECK-NOT: andq $-{{[0-9]+}}, %rsp
63; CHECK: subq ${{[0-9]+}}, %rsp
64;
65; CHECK: movq %rbp, %rsp
66; CHECK: popq %rbp
67}
68
69declare void @t3_helper(ptr, ptr)
70
71; VLAs + Dynamic realignment
72define i32 @t4(i64 %sz) nounwind uwtable ssp {
73entry:
74  %a = alloca i32, align 4
75  %v = alloca <8 x float>, align 32
76  %vla = alloca i32, i64 %sz, align 16
77  call void @t4_helper(ptr %a, ptr %vla, ptr %v) nounwind
78  %0 = load i32, ptr %a, align 4
79  %add = add nsw i32 %0, 13
80  ret i32 %add
81
82; CHECK: _t4
83; CHECK: pushq %rbp
84; CHECK: movq %rsp, %rbp
85; CHECK: pushq %rbx
86; CHECK: andq $-32, %rsp
87; CHECK: subq ${{[0-9]+}}, %rsp
88; CHECK: movq %rsp, %rbx
89;
90; CHECK: leaq {{[0-9]*}}(%rbx), %rdi
91; CHECK: movq %rbx, %rdx
92; CHECK: callq   _t4_helper
93;
94; CHECK: leaq -{{[0-9]+}}(%rbp), %rsp
95; CHECK: popq %rbx
96; CHECK: popq %rbp
97}
98
99declare void @t4_helper(ptr, ptr, ptr)
100
101; Spilling an AVX register shouldn't cause dynamic realignment
102define i32 @t5(ptr nocapture %f) nounwind uwtable ssp {
103entry:
104  %a = alloca i32, align 4
105  %0 = load <8 x float>, ptr %f, align 32
106  call void @t5_helper1(ptr %a) nounwind
107  call void @t5_helper2(<8 x float> %0) nounwind
108  %1 = load i32, ptr %a, align 4
109  %add = add nsw i32 %1, 13
110  ret i32 %add
111
112; CHECK: _t5
113; CHECK: subq ${{[0-9]+}}, %rsp
114;
115; CHECK: vmovaps (%rdi), [[AVXREG:%ymm[0-9]+]]
116; CHECK: vmovups [[AVXREG]], (%rsp)
117; CHECK: leaq {{[0-9]+}}(%rsp), %rdi
118; CHECK: callq   _t5_helper1
119; CHECK: vmovups (%rsp), %ymm0
120; CHECK: callq   _t5_helper2
121; CHECK: movl {{[0-9]+}}(%rsp), %eax
122}
123
124declare void @t5_helper1(ptr)
125
126declare void @t5_helper2(<8 x float>)
127
128; VLAs + Dynamic realignment + Spill
129; FIXME: RA has already reserved RBX, so we can't do dynamic realignment.
130define i32 @t6(i64 %sz, ptr nocapture %f) nounwind uwtable ssp {
131entry:
132; CHECK: _t6
133  %a = alloca i32, align 4
134  %0 = load <8 x float>, ptr %f, align 32
135  %vla = alloca i32, i64 %sz, align 16
136  call void @t6_helper1(ptr %a, ptr %vla) nounwind
137  call void @t6_helper2(<8 x float> %0) nounwind
138  %1 = load i32, ptr %a, align 4
139  %add = add nsw i32 %1, 13
140  ret i32 %add
141}
142
143declare void @t6_helper1(ptr, ptr)
144
145declare void @t6_helper2(<8 x float>)
146
147; VLAs + Dynamic realignment + byval
148; The byval adjust the sp after the prolog, but if we're restoring the sp from
149; the base pointer we use the original adjustment.
150%struct.struct_t = type { [5 x i32] }
151
152define void @t7(i32 %size, ptr byval(%struct.struct_t) align 8 %arg1) nounwind uwtable {
153entry:
154  %x = alloca i32, align 32
155  store i32 0, ptr %x, align 32
156  %0 = zext i32 %size to i64
157  %vla = alloca i32, i64 %0, align 16
158  %1 = load i32, ptr %x, align 32
159  call void @bar(i32 %1, ptr %vla, ptr byval(%struct.struct_t) align 8 %arg1)
160  ret void
161
162; CHECK: _t7
163; CHECK:     pushq %rbp
164; CHECK:     movq %rsp, %rbp
165; CHECK:     pushq %rbx
166; CHECK:     andq $-32, %rsp
167; CHECK:     subq ${{[0-9]+}}, %rsp
168; CHECK:     movq %rsp, %rbx
169
170; Stack adjustment for byval
171; CHECK:     subq {{.*}}, %rsp
172; CHECK:     callq _bar
173; CHECK-NOT: addq {{.*}}, %rsp
174; CHECK:     leaq -8(%rbp), %rsp
175; CHECK:     popq %rbx
176; CHECK:     popq %rbp
177}
178
179declare ptr @llvm.stacksave() nounwind
180
181declare void @bar(i32, ptr, ptr byval(%struct.struct_t) align 8)
182
183declare void @llvm.stackrestore(ptr) nounwind
184