xref: /llvm-project/llvm/test/CodeGen/AArch64/arm64-stur.ll (revision 2f083b364f43fb12b2fdf23935e1f0b6958d0882)
1; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -mcpu=cyclone -mattr=+slow-misaligned-128store | FileCheck %s
2%struct.X = type <{ i32, i64, i64 }>
3
4define void @foo1(ptr %p, i64 %val) nounwind {
5; CHECK-LABEL: foo1:
6; CHECK: 	stur	w1, [x0, #-4]
7; CHECK-NEXT: 	ret
8  %tmp1 = trunc i64 %val to i32
9  %ptr = getelementptr inbounds i32, ptr %p, i64 -1
10  store i32 %tmp1, ptr %ptr, align 4
11  ret void
12}
13define void @foo2(ptr %p, i64 %val) nounwind {
14; CHECK-LABEL: foo2:
15; CHECK: 	sturh	w1, [x0, #-2]
16; CHECK-NEXT: 	ret
17  %tmp1 = trunc i64 %val to i16
18  %ptr = getelementptr inbounds i16, ptr %p, i64 -1
19  store i16 %tmp1, ptr %ptr, align 2
20  ret void
21}
22define void @foo3(ptr %p, i64 %val) nounwind {
23; CHECK-LABEL: foo3:
24; CHECK: 	sturb	w1, [x0, #-1]
25; CHECK-NEXT: 	ret
26  %tmp1 = trunc i64 %val to i8
27  %ptr = getelementptr inbounds i8, ptr %p, i64 -1
28  store i8 %tmp1, ptr %ptr, align 1
29  ret void
30}
31define void @foo4(ptr %p, i32 %val) nounwind {
32; CHECK-LABEL: foo4:
33; CHECK: 	sturh	w1, [x0, #-2]
34; CHECK-NEXT: 	ret
35  %tmp1 = trunc i32 %val to i16
36  %ptr = getelementptr inbounds i16, ptr %p, i32 -1
37  store i16 %tmp1, ptr %ptr, align 2
38  ret void
39}
40define void @foo5(ptr %p, i32 %val) nounwind {
41; CHECK-LABEL: foo5:
42; CHECK: 	sturb	w1, [x0, #-1]
43; CHECK-NEXT: 	ret
44  %tmp1 = trunc i32 %val to i8
45  %ptr = getelementptr inbounds i8, ptr %p, i32 -1
46  store i8 %tmp1, ptr %ptr, align 1
47  ret void
48}
49
50define void @foo(ptr nocapture %p) nounwind optsize ssp {
51; CHECK-LABEL: foo:
52; CHECK-NOT: str
53; CHECK: stur    xzr, [x0, #12]
54; CHECK-NEXT: stur    xzr, [x0, #4]
55; CHECK-NEXT: ret
56  %B = getelementptr inbounds %struct.X, ptr %p, i64 0, i32 1
57  call void @llvm.memset.p0.i64(ptr %B, i8 0, i64 16, i1 false)
58  ret void
59}
60
61declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
62
63; Unaligned 16b stores are split into 8b stores for performance.
64; radar://15424193
65
66; CHECK-LABEL: unaligned:
67; CHECK-NOT: str q0
68; CHECK: ext.16b v[[REG2:[0-9]+]], v[[REG:[0-9]+]], v[[REG]], #8
69; CHECK: stp     d[[REG]], d[[REG2]], [x0]
70define void @unaligned(ptr %p, <4 x i32> %v) nounwind {
71  store <4 x i32> %v, ptr %p, align 4
72  ret void
73}
74
75; CHECK-LABEL: aligned:
76; CHECK: str q0
77define void @aligned(ptr %p, <4 x i32> %v) nounwind {
78  store <4 x i32> %v, ptr %p
79  ret void
80}
81
82; Don't split one and two byte aligned stores.
83; radar://16349308
84
85; CHECK-LABEL: twobytealign:
86; CHECK: str q0
87define void @twobytealign(ptr %p, <4 x i32> %v) nounwind {
88  store <4 x i32> %v, ptr %p, align 2
89  ret void
90}
91; CHECK-LABEL: onebytealign:
92; CHECK: str q0
93define void @onebytealign(ptr %p, <4 x i32> %v) nounwind {
94  store <4 x i32> %v, ptr %p, align 1
95  ret void
96}
97