xref: /llvm-project/llvm/test/CodeGen/Thumb2/LowOverheadLoops/extending-loads.ll (revision b5b663aac17415625340eb29c8010832bfc4c21c)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -tail-predication=enabled %s -o - | FileCheck %s
3
4define dso_local arm_aapcs_vfpcc void @sext_i8(ptr noalias nocapture %a, ptr nocapture readonly %b, i32 %N) {
5; CHECK-LABEL: sext_i8:
6; CHECK:       @ %bb.0: @ %entry
7; CHECK-NEXT:    push {r7, lr}
8; CHECK-NEXT:    cmp r2, #0
9; CHECK-NEXT:    it eq
10; CHECK-NEXT:    popeq {r7, pc}
11; CHECK-NEXT:  .LBB0_1: @ %vector.ph
12; CHECK-NEXT:    dlstp.16 lr, r2
13; CHECK-NEXT:  .LBB0_2: @ %vector.body
14; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
15; CHECK-NEXT:    vldrb.s16 q0, [r1], #8
16; CHECK-NEXT:    vldrh.u16 q1, [r0]
17; CHECK-NEXT:    vadd.i16 q0, q1, q0
18; CHECK-NEXT:    vstrh.16 q0, [r0], #16
19; CHECK-NEXT:    letp lr, .LBB0_2
20; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
21; CHECK-NEXT:    pop {r7, pc}
22entry:
23  %cmp8 = icmp eq i32 %N, 0
24  br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
25
26vector.ph:                                        ; preds = %entry
27  %n.rnd.up = add i32 %N, 7
28  %n.vec = and i32 %n.rnd.up, -8
29  br label %vector.body
30
31vector.body:                                      ; preds = %vector.body, %vector.ph
32  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
33  %0 = getelementptr inbounds i8, ptr %b, i32 %index
34  %1 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
35  %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %0, i32 1, <8 x i1> %1, <8 x i8> undef)
36  %2 = sext <8 x i8> %wide.masked.load to <8 x i16>
37  %3 = getelementptr inbounds i16, ptr %a, i32 %index
38  %wide.masked.load12 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %3, i32 2, <8 x i1> %1, <8 x i16> undef)
39  %4 = add <8 x i16> %wide.masked.load12, %2
40  call void @llvm.masked.store.v8i16.p0(<8 x i16> %4, ptr %3, i32 2, <8 x i1> %1)
41  %index.next = add i32 %index, 8
42  %5 = icmp eq i32 %index.next, %n.vec
43  br i1 %5, label %for.cond.cleanup, label %vector.body
44
45for.cond.cleanup:                                 ; preds = %vector.body, %entry
46  ret void
47}
48
49; Function Attrs: nofree norecurse nounwind
50define dso_local arm_aapcs_vfpcc void @zext_i8(ptr noalias nocapture %a, ptr nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
51; CHECK-LABEL: zext_i8:
52; CHECK:       @ %bb.0: @ %entry
53; CHECK-NEXT:    push {r7, lr}
54; CHECK-NEXT:    cmp r2, #0
55; CHECK-NEXT:    it eq
56; CHECK-NEXT:    popeq {r7, pc}
57; CHECK-NEXT:  .LBB1_1: @ %vector.ph
58; CHECK-NEXT:    dlstp.16 lr, r2
59; CHECK-NEXT:  .LBB1_2: @ %vector.body
60; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
61; CHECK-NEXT:    vldrb.u16 q0, [r1], #8
62; CHECK-NEXT:    vldrh.u16 q1, [r0]
63; CHECK-NEXT:    vadd.i16 q0, q1, q0
64; CHECK-NEXT:    vstrh.16 q0, [r0], #16
65; CHECK-NEXT:    letp lr, .LBB1_2
66; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
67; CHECK-NEXT:    pop {r7, pc}
68entry:
69  %cmp8 = icmp eq i32 %N, 0
70  br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
71
72vector.ph:                                        ; preds = %entry
73  %n.rnd.up = add i32 %N, 7
74  %n.vec = and i32 %n.rnd.up, -8
75  br label %vector.body
76
77vector.body:                                      ; preds = %vector.body, %vector.ph
78  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
79  %0 = getelementptr inbounds i8, ptr %b, i32 %index
80  %1 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %N)
81  %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %0, i32 1, <8 x i1> %1, <8 x i8> undef)
82  %2 = zext <8 x i8> %wide.masked.load to <8 x i16>
83  %3 = getelementptr inbounds i16, ptr %a, i32 %index
84  %wide.masked.load12 = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr %3, i32 2, <8 x i1> %1, <8 x i16> undef)
85  %4 = add <8 x i16> %wide.masked.load12, %2
86  call void @llvm.masked.store.v8i16.p0(<8 x i16> %4, ptr %3, i32 2, <8 x i1> %1)
87  %index.next = add i32 %index, 8
88  %5 = icmp eq i32 %index.next, %n.vec
89  br i1 %5, label %for.cond.cleanup, label %vector.body
90
91for.cond.cleanup:                                 ; preds = %vector.body, %entry
92  ret void
93}
94
95; Function Attrs: nofree norecurse nounwind
96define dso_local arm_aapcs_vfpcc void @sext_i16(ptr noalias nocapture %a, ptr nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
97; CHECK-LABEL: sext_i16:
98; CHECK:       @ %bb.0: @ %entry
99; CHECK-NEXT:    push {r7, lr}
100; CHECK-NEXT:    cmp r2, #0
101; CHECK-NEXT:    it eq
102; CHECK-NEXT:    popeq {r7, pc}
103; CHECK-NEXT:  .LBB2_1: @ %vector.ph
104; CHECK-NEXT:    dlstp.32 lr, r2
105; CHECK-NEXT:  .LBB2_2: @ %vector.body
106; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
107; CHECK-NEXT:    vldrh.s32 q0, [r1], #8
108; CHECK-NEXT:    vldrw.u32 q1, [r0]
109; CHECK-NEXT:    vadd.i32 q0, q1, q0
110; CHECK-NEXT:    vstrw.32 q0, [r0], #16
111; CHECK-NEXT:    letp lr, .LBB2_2
112; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
113; CHECK-NEXT:    pop {r7, pc}
114entry:
115  %cmp6 = icmp eq i32 %N, 0
116  br i1 %cmp6, label %for.cond.cleanup, label %vector.ph
117
118vector.ph:                                        ; preds = %entry
119  %n.rnd.up = add i32 %N, 3
120  %n.vec = and i32 %n.rnd.up, -4
121  br label %vector.body
122
123vector.body:                                      ; preds = %vector.body, %vector.ph
124  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
125  %0 = getelementptr inbounds i16, ptr %b, i32 %index
126  %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
127  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %0, i32 2, <4 x i1> %1, <4 x i16> undef)
128  %2 = sext <4 x i16> %wide.masked.load to <4 x i32>
129  %3 = getelementptr inbounds i32, ptr %a, i32 %index
130  %wide.masked.load10 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %3, i32 4, <4 x i1> %1, <4 x i32> undef)
131  %4 = add nsw <4 x i32> %wide.masked.load10, %2
132  call void @llvm.masked.store.v4i32.p0(<4 x i32> %4, ptr %3, i32 4, <4 x i1> %1)
133  %index.next = add i32 %index, 4
134  %5 = icmp eq i32 %index.next, %n.vec
135  br i1 %5, label %for.cond.cleanup, label %vector.body
136
137for.cond.cleanup:                                 ; preds = %vector.body, %entry
138  ret void
139}
140
141; Function Attrs: nofree norecurse nounwind
142define dso_local arm_aapcs_vfpcc void @zext_i16(ptr noalias nocapture %a, ptr nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
143; CHECK-LABEL: zext_i16:
144; CHECK:       @ %bb.0: @ %entry
145; CHECK-NEXT:    push {r7, lr}
146; CHECK-NEXT:    cmp r2, #0
147; CHECK-NEXT:    it eq
148; CHECK-NEXT:    popeq {r7, pc}
149; CHECK-NEXT:  .LBB3_1: @ %vector.ph
150; CHECK-NEXT:    dlstp.32 lr, r2
151; CHECK-NEXT:  .LBB3_2: @ %vector.body
152; CHECK-NEXT:    @ =>This Inner Loop Header: Depth=1
153; CHECK-NEXT:    vldrh.u32 q0, [r1], #8
154; CHECK-NEXT:    vldrw.u32 q1, [r0]
155; CHECK-NEXT:    vadd.i32 q0, q1, q0
156; CHECK-NEXT:    vstrw.32 q0, [r0], #16
157; CHECK-NEXT:    letp lr, .LBB3_2
158; CHECK-NEXT:  @ %bb.3: @ %for.cond.cleanup
159; CHECK-NEXT:    pop {r7, pc}
160entry:
161  %cmp6 = icmp eq i32 %N, 0
162  br i1 %cmp6, label %for.cond.cleanup, label %vector.ph
163
164vector.ph:                                        ; preds = %entry
165  %n.rnd.up = add i32 %N, 3
166  %n.vec = and i32 %n.rnd.up, -4
167  br label %vector.body
168
169vector.body:                                      ; preds = %vector.body, %vector.ph
170  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
171  %0 = getelementptr inbounds i16, ptr %b, i32 %index
172  %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
173  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0(ptr %0, i32 2, <4 x i1> %1, <4 x i16> undef)
174  %2 = zext <4 x i16> %wide.masked.load to <4 x i32>
175  %3 = getelementptr inbounds i32, ptr %a, i32 %index
176  %wide.masked.load10 = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %3, i32 4, <4 x i1> %1, <4 x i32> undef)
177  %4 = add <4 x i32> %wide.masked.load10, %2
178  call void @llvm.masked.store.v4i32.p0(<4 x i32> %4, ptr %3, i32 4, <4 x i1> %1)
179  %index.next = add i32 %index, 4
180  %5 = icmp eq i32 %index.next, %n.vec
181  br i1 %5, label %for.cond.cleanup, label %vector.body
182
183for.cond.cleanup:                                 ; preds = %vector.body, %entry
184  ret void
185}
186
187declare <8 x i8> @llvm.masked.load.v8i8.p0(ptr, i32 immarg, <8 x i1>, <8 x i8>)
188declare <8 x i16> @llvm.masked.load.v8i16.p0(ptr, i32 immarg, <8 x i1>, <8 x i16>)
189declare void @llvm.masked.store.v8i16.p0(<8 x i16>, ptr, i32 immarg, <8 x i1>)
190declare <4 x i16> @llvm.masked.load.v4i16.p0(ptr, i32 immarg, <4 x i1>, <4 x i16>)
191declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
192declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>)
193
194declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
195declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
196