xref: /llvm-project/llvm/test/CodeGen/AArch64/settag.ll (revision db158c7c830807caeeb0691739c41f1d522029e9)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=aarch64 -mattr=+mte | FileCheck %s
3
4define void @stg1(ptr %p) {
5; CHECK-LABEL: stg1:
6; CHECK:       // %bb.0: // %entry
7; CHECK-NEXT:    stg x0, [x0]
8; CHECK-NEXT:    ret
9entry:
10  call void @llvm.aarch64.settag(ptr %p, i64 16)
11  ret void
12}
13
14define void @stg2(ptr %p) {
15; CHECK-LABEL: stg2:
16; CHECK:       // %bb.0: // %entry
17; CHECK-NEXT:    st2g x0, [x0]
18; CHECK-NEXT:    ret
19entry:
20  call void @llvm.aarch64.settag(ptr %p, i64 32)
21  ret void
22}
23
24define void @stg3(ptr %p) {
25; CHECK-LABEL: stg3:
26; CHECK:       // %bb.0: // %entry
27; CHECK-NEXT:    stg x0, [x0, #32]
28; CHECK-NEXT:    st2g x0, [x0]
29; CHECK-NEXT:    ret
30entry:
31  call void @llvm.aarch64.settag(ptr %p, i64 48)
32  ret void
33}
34
35define void @stg4(ptr %p) {
36; CHECK-LABEL: stg4:
37; CHECK:       // %bb.0: // %entry
38; CHECK-NEXT:    st2g x0, [x0, #32]
39; CHECK-NEXT:    st2g x0, [x0]
40; CHECK-NEXT:    ret
41entry:
42  call void @llvm.aarch64.settag(ptr %p, i64 64)
43  ret void
44}
45
46define void @stg5(ptr %p) {
47; CHECK-LABEL: stg5:
48; CHECK:       // %bb.0: // %entry
49; CHECK-NEXT:    stg x0, [x0, #64]
50; CHECK-NEXT:    st2g x0, [x0, #32]
51; CHECK-NEXT:    st2g x0, [x0]
52; CHECK-NEXT:    ret
53entry:
54  call void @llvm.aarch64.settag(ptr %p, i64 80)
55  ret void
56}
57
58define void @stg16(ptr %p) {
59; CHECK-LABEL: stg16:
60; CHECK:       // %bb.0: // %entry
61; CHECK-NEXT:    mov x8, #256 // =0x100
62; CHECK-NEXT:  .LBB5_1: // %entry
63; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
64; CHECK-NEXT:    st2g x0, [x0], #32
65; CHECK-NEXT:    subs x8, x8, #32
66; CHECK-NEXT:    b.ne .LBB5_1
67; CHECK-NEXT:  // %bb.2: // %entry
68; CHECK-NEXT:    ret
69entry:
70  call void @llvm.aarch64.settag(ptr %p, i64 256)
71  ret void
72}
73
74define void @stg17(ptr %p) {
75; CHECK-LABEL: stg17:
76; CHECK:       // %bb.0: // %entry
77; CHECK-NEXT:    stg x0, [x0], #16
78; CHECK-NEXT:    mov x8, #256 // =0x100
79; CHECK-NEXT:  .LBB6_1: // %entry
80; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
81; CHECK-NEXT:    st2g x0, [x0], #32
82; CHECK-NEXT:    subs x8, x8, #32
83; CHECK-NEXT:    b.ne .LBB6_1
84; CHECK-NEXT:  // %bb.2: // %entry
85; CHECK-NEXT:    ret
86entry:
87  call void @llvm.aarch64.settag(ptr %p, i64 272)
88  ret void
89}
90
91define void @stzg3(ptr %p) {
92; CHECK-LABEL: stzg3:
93; CHECK:       // %bb.0: // %entry
94; CHECK-NEXT:    stzg x0, [x0, #32]
95; CHECK-NEXT:    stz2g x0, [x0]
96; CHECK-NEXT:    ret
97entry:
98  call void @llvm.aarch64.settag.zero(ptr %p, i64 48)
99  ret void
100}
101
102define void @stzg17(ptr %p) {
103; CHECK-LABEL: stzg17:
104; CHECK:       // %bb.0: // %entry
105; CHECK-NEXT:    stzg x0, [x0], #16
106; CHECK-NEXT:    mov x8, #256 // =0x100
107; CHECK-NEXT:  .LBB8_1: // %entry
108; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
109; CHECK-NEXT:    stz2g x0, [x0], #32
110; CHECK-NEXT:    subs x8, x8, #32
111; CHECK-NEXT:    b.ne .LBB8_1
112; CHECK-NEXT:  // %bb.2: // %entry
113; CHECK-NEXT:    ret
114entry:
115  call void @llvm.aarch64.settag.zero(ptr %p, i64 272)
116  ret void
117}
118
119define void @stg_alloca1() uwtable {
120; CHECK-LABEL: stg_alloca1:
121; CHECK:       // %bb.0: // %entry
122; CHECK-NEXT:    sub sp, sp, #16
123; CHECK-NEXT:    .cfi_def_cfa_offset 16
124; CHECK-NEXT:    stg sp, [sp], #16
125; CHECK-NEXT:    .cfi_def_cfa_offset 0
126; CHECK-NEXT:    ret
127entry:
128  %a = alloca i8, i32 16, align 16
129  call void @llvm.aarch64.settag(ptr %a, i64 16)
130  ret void
131}
132
133define void @stg_alloca5() uwtable {
134; CHECK-LABEL: stg_alloca5:
135; CHECK:       // %bb.0: // %entry
136; CHECK-NEXT:    sub sp, sp, #80
137; CHECK-NEXT:    .cfi_def_cfa_offset 80
138; CHECK-NEXT:    st2g sp, [sp, #32]
139; CHECK-NEXT:    stg sp, [sp, #64]
140; CHECK-NEXT:    st2g sp, [sp], #80
141; CHECK-NEXT:    .cfi_def_cfa_offset 0
142; CHECK-NEXT:    ret
143entry:
144  %a = alloca i8, i32 80, align 16
145  call void @llvm.aarch64.settag(ptr %a, i64 80)
146  ret void
147}
148
149define void @stg_alloca17() nounwind {
150; CHECK-LABEL: stg_alloca17:
151; CHECK:       // %bb.0: // %entry
152; CHECK-NEXT:    sub sp, sp, #288
153; CHECK-NEXT:    mov x8, #256 // =0x100
154; CHECK-NEXT:    str x29, [sp, #272] // 8-byte Folded Spill
155; CHECK-NEXT:  .LBB11_1: // %entry
156; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
157; CHECK-NEXT:    st2g sp, [sp], #32
158; CHECK-NEXT:    subs x8, x8, #32
159; CHECK-NEXT:    b.ne .LBB11_1
160; CHECK-NEXT:  // %bb.2: // %entry
161; CHECK-NEXT:    stg sp, [sp], #16
162; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
163; CHECK-NEXT:    ret
164entry:
165  %a = alloca i8, i32 272, align 16
166  call void @llvm.aarch64.settag(ptr %a, i64 272)
167  ret void
168}
169
170define void @stg_alloca18() uwtable {
171; CHECK-LABEL: stg_alloca18:
172; CHECK:       // %bb.0: // %entry
173; CHECK-NEXT:    sub sp, sp, #288
174; CHECK-NEXT:    .cfi_def_cfa_offset 288
175; CHECK-NEXT:    str x29, [sp, #272] // 8-byte Folded Spill
176; CHECK-NEXT:    .cfi_offset w29, -16
177; CHECK-NEXT:    mov x9, sp
178; CHECK-NEXT:    mov x8, #256 // =0x100
179; CHECK-NEXT:    stg x9, [x9], #16
180; CHECK-NEXT:  .LBB12_1: // %entry
181; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
182; CHECK-NEXT:    st2g x9, [x9], #32
183; CHECK-NEXT:    subs x8, x8, #32
184; CHECK-NEXT:    b.ne .LBB12_1
185; CHECK-NEXT:  // %bb.2: // %entry
186; CHECK-NEXT:    add sp, sp, #272
187; CHECK-NEXT:    .cfi_def_cfa_offset 16
188; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
189; CHECK-NEXT:    .cfi_def_cfa_offset 0
190; CHECK-NEXT:    .cfi_restore w29
191; CHECK-NEXT:    ret
192entry:
193  %a = alloca i8, i32 272, align 16
194  call void @llvm.aarch64.settag(ptr %a, i64 272)
195  ret void
196}
197
198; Verify that SLH works together with MTE stack tagging,
199; see issue https://github.com/llvm/llvm-project/issues/61830
200define void @test_slh() speculative_load_hardening {
201; CHECK-LABEL: test_slh:
202; CHECK:       // %bb.0:
203; CHECK-NEXT:    cmp sp, #0
204; CHECK-NEXT:    csetm x16, ne
205; CHECK-NEXT:    sub sp, sp, #208
206; CHECK-NEXT:    str x30, [sp, #192] // 8-byte Folded Spill
207; CHECK-NEXT:    .cfi_def_cfa_offset 208
208; CHECK-NEXT:    .cfi_offset w30, -16
209; CHECK-NEXT:    mov x1, sp
210; CHECK-NEXT:    mov x0, sp
211; CHECK-NEXT:    and x1, x1, x16
212; CHECK-NEXT:    mov sp, x1
213; CHECK-NEXT:    bl b
214; CHECK-NEXT:    cmp sp, #0
215; CHECK-NEXT:    ldr x30, [sp, #192] // 8-byte Folded Reload
216; CHECK-NEXT:    csetm x16, ne
217; CHECK-NEXT:    and x30, x30, x16
218; CHECK-NEXT:    add sp, sp, #208
219; CHECK-NEXT:    mov x0, sp
220; CHECK-NEXT:    and x0, x0, x16
221; CHECK-NEXT:    mov sp, x0
222; CHECK-NEXT:    csdb
223; CHECK-NEXT:    ret
224; Verify that the memtag loop uses a b.cc conditional branch
225; rather than an cb[n]z branch.
226  %d = alloca [48 x i32], align 4
227  call void @b(ptr %d)
228  ret void
229}
230declare void @b(ptr)
231
232
233declare void @llvm.aarch64.settag(ptr %p, i64 %a)
234declare void @llvm.aarch64.settag.zero(ptr %p, i64 %a)
235