xref: /llvm-project/llvm/test/CodeGen/ARM/alloc-no-stack-realign.ll (revision 4442667fc5adefe005746f44ee7e7c154b5f73b9)
1; RUN: llc < %s -mtriple=armv7-apple-ios -O0 | FileCheck %s
2
3; rdar://12713765
4; When realign-stack is set to false, make sure we are not creating stack
5; objects that are assumed to be 64-byte aligned.
6@T3_retval = common global <16 x float> zeroinitializer, align 16
7
8define void @test1(<16 x float>* noalias sret %agg.result) nounwind ssp "no-realign-stack" {
9entry:
10; CHECK-LABEL: test1
11; CHECK:	ldr	r[[R1:[0-9]+]], [pc, r1]
12; CHECK:	add	r[[R2:[0-9]+]], r1, #48
13; CHECK:	vld1.64	 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
14; CHECK:	mov	r[[R2:[0-9]+]], r[[R1]]
15; CHECK:	vld1.32	 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]!
16; CHECK:	vld1.64	 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
17; CHECK:	add	r[[R1:[0-9]+]], r[[R1]], #32
18; CHECK:	vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
19; CHECK:	mov	r[[R1:[0-9]+]], sp
20; CHECK:	vst1.64	{{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
21; CHECK:	add	r[[R2:[0-9]+]], r[[R1]], #32
22; CHECK:	vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
23; CHECK:	vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]!
24; CHECK:	vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
25; CHECK:	vld1.32 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]!
26; CHECK:	vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
27; CHECK:	vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
28; CHECK:	vld1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
29; CHECK:	add	r[[R1:[0-9]+]], r0, #48
30; CHECK:	vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
31; CHECK:	add	r[[R1:[0-9]+]], r0, #32
32; CHECK:	vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
33; CHECK:	vst1.32 {{{d[0-9]+, d[0-9]+}}}, [r0:128]!
34; CHECK:	vst1.64 {{{d[0-9]+, d[0-9]+}}}, [r0:128]
35 %retval = alloca <16 x float>, align 16
36 %0 = load <16 x float>, <16 x float>* @T3_retval, align 16
37 store <16 x float> %0, <16 x float>* %retval
38 %1 = load <16 x float>, <16 x float>* %retval
39 store <16 x float> %1, <16 x float>* %agg.result, align 16
40 ret void
41}
42
43define void @test2(<16 x float>* noalias sret %agg.result) nounwind ssp {
44entry:
45; CHECK:	ldr	r[[R1:[0-9]+]], [pc, r1]
46; CHECK:	add	r[[R2:[0-9]+]], r[[R1]], #48
47; CHECK:	vld1.64	{{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
48; CHECK:	mov	r[[R2:[0-9]+]], r[[R1]]
49; CHECK:	vld1.32	{{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]!
50; CHECK:	vld1.64	{{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
51; CHECK:	add	r[[R1:[0-9]+]], r[[R1]], #32
52; CHECK:	vld1.64	{{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
53; CHECK:	mov	r[[R1:[0-9]+]], sp
54; CHECK:	vst1.64	{{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
55; CHECK:	orr	r[[R2:[0-9]+]], r[[R1]], #32
56; CHECK:	vst1.64	{{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
57; CHECK:	vld1.32	{{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]!
58; CHECK:	vst1.64	{{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
59; CHECK:	vld1.32	{{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]!
60; CHECK:	vst1.64	{{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
61; CHECK:	vld1.64	{{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
62; CHECK:	vld1.64	{{{d[0-9]+, d[0-9]+}}}, [r[[R2]]:128]
63; CHECK:	add	r[[R1:[0-9]+]], r0, #48
64; CHECK:	vst1.64	{{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
65; CHECK:	add	r[[R1:[0-9]+]], r0, #32
66; CHECK:	vst1.64	{{{d[0-9]+, d[0-9]+}}}, [r[[R1]]:128]
67; CHECK:	vst1.32	{{{d[0-9]+, d[0-9]+}}}, [r0:128]!
68; CHECK:	vst1.64	{{{d[0-9]+, d[0-9]+}}}, [r0:128]
69
70
71%retval = alloca <16 x float>, align 16
72 %0 = load <16 x float>, <16 x float>* @T3_retval, align 16
73 store <16 x float> %0, <16 x float>* %retval
74 %1 = load <16 x float>, <16 x float>* %retval
75 store <16 x float> %1, <16 x float>* %agg.result, align 16
76 ret void
77}
78