xref: /llvm-project/llvm/test/CodeGen/Thumb2/mve-phireg.ll (revision b31fffbc7f1e0491bf599e82b7195e320d26e140)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -O3 -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp --arm-memtransfer-tploop=allow -enable-arm-maskedgatscat=false -verify-machineinstrs %s -o - | FileCheck %s
3
4; verify-machineinstrs previously caught the incorrect use of QPR in the stack reloads.
5
6define arm_aapcs_vfpcc void @k() {
7; CHECK-LABEL: k:
8; CHECK:       @ %bb.0: @ %entry
9; CHECK-NEXT:    .save {r4, r5, r6, lr}
10; CHECK-NEXT:    push {r4, r5, r6, lr}
11; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14}
12; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14}
13; CHECK-NEXT:    .pad #32
14; CHECK-NEXT:    sub sp, #32
15; CHECK-NEXT:    adr r5, .LCPI0_0
16; CHECK-NEXT:    adr r4, .LCPI0_1
17; CHECK-NEXT:    vldrw.u32 q6, [r5]
18; CHECK-NEXT:    vldrw.u32 q5, [r4]
19; CHECK-NEXT:    add r0, sp, #16
20; CHECK-NEXT:    vmov.i32 q0, #0x1
21; CHECK-NEXT:    vmov.i8 q1, #0x0
22; CHECK-NEXT:    vmov.i8 q2, #0xff
23; CHECK-NEXT:    vmov.i16 q3, #0x6
24; CHECK-NEXT:    vmov.i16 q4, #0x3
25; CHECK-NEXT:    movs r1, #0
26; CHECK-NEXT:  .LBB0_1: @ %vector.body
27; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
28; CHECK-NEXT:    vand q5, q5, q0
29; CHECK-NEXT:    vand q6, q6, q0
30; CHECK-NEXT:    vcmp.i32 eq, q5, zr
31; CHECK-NEXT:    vpsel q5, q2, q1
32; CHECK-NEXT:    vcmp.i32 eq, q6, zr
33; CHECK-NEXT:    vpsel q6, q2, q1
34; CHECK-NEXT:    vstrh.32 q5, [r0]
35; CHECK-NEXT:    vstrh.32 q6, [r0, #8]
36; CHECK-NEXT:    vldrw.u32 q5, [r0]
37; CHECK-NEXT:    vcmp.i16 ne, q5, zr
38; CHECK-NEXT:    vmov.i32 q5, #0x0
39; CHECK-NEXT:    vpsel q6, q4, q3
40; CHECK-NEXT:    vstrh.16 q6, [r0]
41; CHECK-NEXT:    vmov q6, q5
42; CHECK-NEXT:    cbz r1, .LBB0_2
43; CHECK-NEXT:    le .LBB0_1
44; CHECK-NEXT:  .LBB0_2: @ %for.cond4.preheader
45; CHECK-NEXT:    movs r6, #0
46; CHECK-NEXT:    cbnz r6, .LBB0_5
47; CHECK-NEXT:  .LBB0_3: @ %for.body10
48; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
49; CHECK-NEXT:    cbnz r6, .LBB0_4
50; CHECK-NEXT:    le .LBB0_3
51; CHECK-NEXT:  .LBB0_4: @ %for.cond4.loopexit
52; CHECK-NEXT:    bl l
53; CHECK-NEXT:  .LBB0_5: @ %vector.body105.preheader
54; CHECK-NEXT:    vldrw.u32 q0, [r5]
55; CHECK-NEXT:    vldrw.u32 q1, [r4]
56; CHECK-NEXT:    movs r0, #8
57; CHECK-NEXT:  .LBB0_6: @ %vector.body105
58; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
59; CHECK-NEXT:    vadd.i32 q1, q1, r0
60; CHECK-NEXT:    vadd.i32 q0, q0, r0
61; CHECK-NEXT:    cbz r6, .LBB0_7
62; CHECK-NEXT:    le .LBB0_6
63; CHECK-NEXT:  .LBB0_7: @ %vector.body115.ph
64; CHECK-NEXT:    vldrw.u32 q0, [r4]
65; CHECK-NEXT:    movs r0, #4
66; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
67; CHECK-NEXT:    @APP
68; CHECK-NEXT:    nop
69; CHECK-NEXT:    @NO_APP
70; CHECK-NEXT:    vldrw.u32 q0, [sp] @ 16-byte Reload
71; CHECK-NEXT:  .LBB0_8: @ %vector.body115
72; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
73; CHECK-NEXT:    vadd.i32 q0, q0, r0
74; CHECK-NEXT:    b .LBB0_8
75; CHECK-NEXT:    .p2align 4
76; CHECK-NEXT:  @ %bb.9:
77; CHECK-NEXT:  .LCPI0_0:
78; CHECK-NEXT:    .long 4 @ 0x4
79; CHECK-NEXT:    .long 5 @ 0x5
80; CHECK-NEXT:    .long 6 @ 0x6
81; CHECK-NEXT:    .long 7 @ 0x7
82; CHECK-NEXT:  .LCPI0_1:
83; CHECK-NEXT:    .long 0 @ 0x0
84; CHECK-NEXT:    .long 1 @ 0x1
85; CHECK-NEXT:    .long 2 @ 0x2
86; CHECK-NEXT:    .long 3 @ 0x3
87entry:
88  br label %vector.body
89
90vector.body:                                      ; preds = %vector.body, %entry
91  %vec.ind = phi <8 x i32> [ <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, %entry ], [ zeroinitializer, %vector.body ]
92  %0 = and <8 x i32> %vec.ind, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
93  %1 = icmp eq <8 x i32> %0, zeroinitializer
94  %2 = select <8 x i1> %1, <8 x i16> <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>, <8 x i16> <i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6, i16 6>
95  %3 = bitcast ptr undef to ptr
96  store <8 x i16> %2, ptr %3, align 2
97  %4 = icmp eq i32 undef, 128
98  br i1 %4, label %for.cond4.preheader, label %vector.body
99
100for.cond4.preheader:                              ; preds = %vector.body
101  br i1 undef, label %vector.body105, label %for.body10
102
103for.cond4.loopexit:                               ; preds = %for.body10
104  %call5 = call arm_aapcs_vfpcc i32 @l()
105  br label %vector.body105
106
107for.body10:                                       ; preds = %for.body10, %for.cond4.preheader
108  %exitcond88 = icmp eq i32 undef, 7
109  br i1 %exitcond88, label %for.cond4.loopexit, label %for.body10
110
111vector.body105:                                   ; preds = %vector.body105, %for.cond4.loopexit, %for.cond4.preheader
112  %vec.ind113 = phi <8 x i32> [ %vec.ind.next114, %vector.body105 ], [ <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, %for.cond4.loopexit ], [ <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, %for.cond4.preheader ]
113  %5 = and <8 x i32> %vec.ind113, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
114  %vec.ind.next114 = add <8 x i32> %vec.ind113, <i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
115  %6 = icmp eq i32 undef, 256
116  br i1 %6, label %vector.body115.ph, label %vector.body105
117
118vector.body115.ph:                                ; preds = %vector.body105
119  tail call void asm sideeffect "nop", "~{s0},~{s4},~{s8},~{s12},~{s16},~{s20},~{s24},~{s28},~{memory}"()
120  br label %vector.body115
121
122vector.body115:                                   ; preds = %vector.body115, %vector.body115.ph
123  %vec.ind123 = phi <4 x i32> [ %vec.ind.next124, %vector.body115 ], [ <i32 0, i32 1, i32 2, i32 3>, %vector.body115.ph ]
124  %7 = icmp eq <4 x i32> %vec.ind123, zeroinitializer
125  %vec.ind.next124 = add <4 x i32> %vec.ind123, <i32 4, i32 4, i32 4, i32 4>
126  br label %vector.body115
127}
128
129
130@a = external dso_local global i32, align 4
131@b = dso_local local_unnamed_addr global i32 ptrtoint (ptr @a to i32), align 4
132@c = dso_local global i32 2, align 4
133@d = dso_local global i32 2, align 4
134
135define dso_local i32 @e() #0 {
136; CHECK-LABEL: e:
137; CHECK:       @ %bb.0: @ %entry
138; CHECK-NEXT:    .save {r4, r5, r6, r7, r8, r9, r10, lr}
139; CHECK-NEXT:    push.w {r4, r5, r6, r7, r8, r9, r10, lr}
140; CHECK-NEXT:    .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
141; CHECK-NEXT:    vpush {d8, d9, d10, d11, d12, d13, d14, d15}
142; CHECK-NEXT:    .pad #408
143; CHECK-NEXT:    sub sp, #408
144; CHECK-NEXT:    movw r7, :lower16:.L_MergedGlobals
145; CHECK-NEXT:    vldr s15, .LCPI1_1
146; CHECK-NEXT:    movt r7, :upper16:.L_MergedGlobals
147; CHECK-NEXT:    movw r2, :lower16:e
148; CHECK-NEXT:    mov r4, r7
149; CHECK-NEXT:    mov r3, r7
150; CHECK-NEXT:    ldr r6, [r4, #8]!
151; CHECK-NEXT:    vmov.i32 q0, #0x0
152; CHECK-NEXT:    ldr r0, [r3, #4]!
153; CHECK-NEXT:    vstrw.32 q0, [sp] @ 16-byte Spill
154; CHECK-NEXT:    movt r2, :upper16:e
155; CHECK-NEXT:    vmov r5, s15
156; CHECK-NEXT:    vmov q0[2], q0[0], r4, r4
157; CHECK-NEXT:    vmov s13, r3
158; CHECK-NEXT:    vldr s12, .LCPI1_0
159; CHECK-NEXT:    vmov q0[3], q0[1], r5, r2
160; CHECK-NEXT:    vdup.32 q7, r3
161; CHECK-NEXT:    vmov q6[2], q6[0], r3, r5
162; CHECK-NEXT:    vstrw.32 q0, [sp, #92]
163; CHECK-NEXT:    vmov q0, q7
164; CHECK-NEXT:    vmov q6[3], q6[1], r3, r2
165; CHECK-NEXT:    vmov q4, q7
166; CHECK-NEXT:    vmov.32 q0[0], r2
167; CHECK-NEXT:    vmov.32 q7[1], r2
168; CHECK-NEXT:    vmov s21, r2
169; CHECK-NEXT:    movs r1, #64
170; CHECK-NEXT:    vmov.f32 s20, s12
171; CHECK-NEXT:    str r0, [sp, #40]
172; CHECK-NEXT:    vmov.f32 s22, s13
173; CHECK-NEXT:    str r6, [r0]
174; CHECK-NEXT:    vmov.f32 s23, s15
175; CHECK-NEXT:    str r0, [r0]
176; CHECK-NEXT:    vstrw.32 q5, [r0]
177; CHECK-NEXT:    vstrw.32 q7, [r0]
178; CHECK-NEXT:    vstrw.32 q0, [r0]
179; CHECK-NEXT:    vstrw.32 q6, [r0]
180; CHECK-NEXT:    mov.w r8, #0
181; CHECK-NEXT:    vmov q1[2], q1[0], r4, r3
182; CHECK-NEXT:    vmov q2[2], q2[0], r3, r3
183; CHECK-NEXT:    mov.w r12, #4
184; CHECK-NEXT:    vmov q1[3], q1[1], r2, r4
185; CHECK-NEXT:    vmov.f32 s14, s13
186; CHECK-NEXT:    vmov q2[3], q2[1], r4, r5
187; CHECK-NEXT:    vmov.32 q4[0], r8
188; CHECK-NEXT:    @ implicit-def: $r2
189; CHECK-NEXT:    str.w r8, [sp, #44]
190; CHECK-NEXT:    vstrw.32 q3, [sp, #60]
191; CHECK-NEXT:    strh.w r12, [sp, #406]
192; CHECK-NEXT:    wlstp.8 lr, r1, .LBB1_2
193; CHECK-NEXT:  .LBB1_1: @ =>This Inner Loop Header: Depth=1
194; CHECK-NEXT:    vldrw.u32 q0, [sp] @ 16-byte Reload
195; CHECK-NEXT:    vstrb.8 q0, [r2], #16
196; CHECK-NEXT:    letp lr, .LBB1_1
197; CHECK-NEXT:  .LBB1_2: @ %entry
198; CHECK-NEXT:    vstrw.32 q1, [r0]
199; CHECK-NEXT:    str.w r8, [r7]
200; CHECK-NEXT:    vstrw.32 q4, [r0]
201; CHECK-NEXT:    vstrw.32 q2, [r0]
202; CHECK-NEXT:    str.w r12, [sp, #324]
203; CHECK-NEXT:  .LBB1_3: @ %for.cond
204; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
205; CHECK-NEXT:    b .LBB1_3
206; CHECK-NEXT:    .p2align 2
207; CHECK-NEXT:  @ %bb.4:
208; CHECK-NEXT:  .LCPI1_0:
209; CHECK-NEXT:    .long 0x00000004 @ float 5.60519386E-45
210; CHECK-NEXT:  .LCPI1_1:
211; CHECK-NEXT:    .long 0x00000000 @ float 0
212entry:
213  %f = alloca i16, align 2
214  %g = alloca [3 x [8 x [4 x ptr]]], align 4
215  store i16 4, ptr %f, align 2
216  %0 = load i32, ptr @c, align 4
217  %1 = load i32, ptr @d, align 4
218  %arrayinit.element7 = getelementptr inbounds [3 x [8 x [4 x ptr]]], ptr %g, i32 0, i32 0, i32 1, i32 1
219  %2 = bitcast ptr %arrayinit.element7 to ptr
220  store i32 %0, ptr %2, align 4
221  %arrayinit.element8 = getelementptr inbounds [3 x [8 x [4 x ptr]]], ptr %g, i32 0, i32 0, i32 1, i32 2
222  store ptr null, ptr %arrayinit.element8, align 4
223  %3 = bitcast ptr undef to ptr
224  store i32 %1, ptr %3, align 4
225  %4 = bitcast ptr undef to ptr
226  store i32 %0, ptr %4, align 4
227  %arrayinit.element13 = getelementptr inbounds [3 x [8 x [4 x ptr]]], ptr %g, i32 0, i32 0, i32 2, i32 2
228  %5 = bitcast ptr %arrayinit.element13 to ptr
229  store <4 x ptr> <ptr inttoptr (i32 4 to ptr), ptr @c, ptr @c, ptr null>, ptr %5, align 4
230  %arrayinit.element24 = getelementptr inbounds [3 x [8 x [4 x ptr]]], ptr %g, i32 0, i32 0, i32 4, i32 2
231  %6 = bitcast ptr %arrayinit.element24 to ptr
232  store <4 x ptr> <ptr @d, ptr null, ptr @d, ptr @e>, ptr %6, align 4
233  %7 = bitcast ptr undef to ptr
234  store <4 x ptr> <ptr inttoptr (i32 4 to ptr), ptr @e, ptr @c, ptr null>, ptr %7, align 4
235  %8 = bitcast ptr undef to ptr
236  store <4 x ptr> <ptr @c, ptr @e, ptr @c, ptr @c>, ptr %8, align 4
237  %9 = bitcast ptr undef to ptr
238  store <4 x ptr> <ptr @e, ptr @c, ptr @c, ptr @c>, ptr %9, align 4
239  %10 = bitcast ptr undef to ptr
240  store <4 x ptr> <ptr @c, ptr @c, ptr null, ptr @e>, ptr %10, align 4
241  call void @llvm.memset.p0.i32(ptr nonnull align 4 dereferenceable(64) undef, i8 0, i32 64, i1 false)
242  %11 = bitcast ptr undef to ptr
243  store <4 x ptr> <ptr @d, ptr @e, ptr @c, ptr @d>, ptr %11, align 4
244  %12 = bitcast ptr undef to ptr
245  store <4 x ptr> <ptr null, ptr @c, ptr @c, ptr @c>, ptr %12, align 4
246  %13 = bitcast ptr undef to ptr
247  store <4 x ptr> <ptr @c, ptr @d, ptr @c, ptr null>, ptr %13, align 4
248  %arrayinit.begin78 = getelementptr inbounds [3 x [8 x [4 x ptr]]], ptr %g, i32 0, i32 2, i32 3, i32 0
249  store ptr inttoptr (i32 4 to ptr), ptr %arrayinit.begin78, align 4
250  store i32 0, ptr @b, align 4
251  br label %for.cond
252
253for.cond:                                         ; preds = %for.cond, %entry
254  br label %for.cond
255}
256
257; Function Attrs: argmemonly nounwind willreturn
258declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg) #1
259
260; Function Attrs: argmemonly nounwind willreturn
261declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #1
262
263
264declare arm_aapcs_vfpcc i32 @l(...)
265