xref: /llvm-project/llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll (revision 5ddce70ef0e5a641d7fea95e31fc5e2439cb98cb)
1; RUN: llc < %s -mtriple=arm64-apple-darwin -enable-misched=false -enable-post-misched=false | FileCheck %s
2
3; rdar://12713765
4; Make sure we are not creating stack objects that are assumed to be 64-byte
5; aligned.
6@T3_retval = common global <16 x float> zeroinitializer, align 16
7
8define void @test(ptr noalias sret(<16 x float>) %agg.result) nounwind ssp {
9entry:
10; CHECK: test
11; CHECK: stp [[Q1:q[0-9]+]], [[Q2:q[0-9]+]], [sp, #32]
12; CHECK: stp [[Q1:q[0-9]+]], [[Q2:q[0-9]+]], [sp]
13; CHECK: stp [[Q1:q[0-9]+]], [[Q2:q[0-9]+]], [[[BASE:x[0-9]+]], #32]
14; CHECK: stp [[Q1:q[0-9]+]], [[Q2:q[0-9]+]], [[[BASE]]]
15 %retval = alloca <16 x float>, align 16
16 %0 = load <16 x float>, ptr @T3_retval, align 16
17 store <16 x float> %0, ptr %retval
18 %1 = load <16 x float>, ptr %retval
19 store <16 x float> %1, ptr %agg.result, align 16
20 ret void
21}
22