xref: /llvm-project/llvm/test/CodeGen/Thumb2/LowOverheadLoops/nested.ll (revision 322d0afd875df66b36e4810a2b95c20a8f22ab9b)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -mtriple=armv8.1m.main -mattr=+mve -S -mve-tail-predication -tail-predication=enabled %s -o - | FileCheck %s
3
4define void @mat_vec_sext_i16(i16** nocapture readonly %A, i16* nocapture readonly %B, i32* noalias nocapture %C, i32 %N) {
5; CHECK-LABEL: @mat_vec_sext_i16(
6; CHECK-NEXT:  entry:
7; CHECK-NEXT:    [[CMP24:%.*]] = icmp eq i32 [[N:%.*]], 0
8; CHECK-NEXT:    br i1 [[CMP24]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_COND1_PREHEADER_US_PREHEADER:%.*]]
9; CHECK:       for.cond1.preheader.us.preheader:
10; CHECK-NEXT:    [[N_RND_UP:%.*]] = add i32 [[N]], 3
11; CHECK-NEXT:    [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -4
12; CHECK-NEXT:    [[TRIP_COUNT_MINUS_1:%.*]] = add i32 [[N]], -1
13; CHECK-NEXT:    [[BROADCAST_SPLATINSERT28:%.*]] = insertelement <4 x i32> undef, i32 [[TRIP_COUNT_MINUS_1]], i32 0
14; CHECK-NEXT:    [[BROADCAST_SPLAT29:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT28]], <4 x i32> undef, <4 x i32> zeroinitializer
15; CHECK-NEXT:    [[TMP:%.*]] = add i32 [[N_VEC]], -4
16; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[TMP]], 2
17; CHECK-NEXT:    [[TMP2:%.*]] = add nuw nsw i32 [[TMP1]], 1
18; CHECK-NEXT:    br label [[FOR_COND1_PREHEADER_US:%.*]]
19; CHECK:       for.cond1.preheader.us:
20; CHECK-NEXT:    [[I_025_US:%.*]] = phi i32 [ [[INC10_US:%.*]], [[MIDDLE_BLOCK:%.*]] ], [ 0, [[FOR_COND1_PREHEADER_US_PREHEADER]] ]
21; CHECK-NEXT:    [[ARRAYIDX_US:%.*]] = getelementptr inbounds i16*, i16** [[A:%.*]], i32 [[I_025_US]]
22; CHECK-NEXT:    [[TMP3:%.*]] = load i16*, i16** [[ARRAYIDX_US]], align 4
23; CHECK-NEXT:    [[ARRAYIDX8_US:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i32 [[I_025_US]]
24; CHECK-NEXT:    [[ARRAYIDX8_PROMOTED_US:%.*]] = load i32, i32* [[ARRAYIDX8_US]], align 4
25; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 [[ARRAYIDX8_PROMOTED_US]], i32 0
26; CHECK-NEXT:    call void @llvm.set.loop.iterations.i32(i32 [[TMP2]])
27; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
28; CHECK:       vector.body:
29; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[FOR_COND1_PREHEADER_US]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
30; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP4]], [[FOR_COND1_PREHEADER_US]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
31; CHECK-NEXT:    [[TMP5:%.*]] = phi i32 [ [[TMP2]], [[FOR_COND1_PREHEADER_US]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
32; CHECK-NEXT:    [[TMP0:%.*]] = phi i32 [ [[N]], [[FOR_COND1_PREHEADER_US]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
33; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[INDEX]], i32 0
34; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer
35; CHECK-NEXT:    [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3>
36; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[INDEX]]
37; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[TMP0]])
38; CHECK-NEXT:    [[TMP2]] = sub i32 [[TMP0]], 4
39; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i16* [[TMP6]] to <4 x i16>*
40; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* [[TMP8]], i32 2, <4 x i1> [[TMP1]], <4 x i16> undef)
41; CHECK-NEXT:    [[TMP9:%.*]] = sext <4 x i16> [[WIDE_MASKED_LOAD]] to <4 x i32>
42; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i16, i16* [[B:%.*]], i32 [[INDEX]]
43; CHECK-NEXT:    [[TMP11:%.*]] = bitcast i16* [[TMP10]] to <4 x i16>*
44; CHECK-NEXT:    [[WIDE_MASKED_LOAD30:%.*]] = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* [[TMP11]], i32 2, <4 x i1> [[TMP1]], <4 x i16> undef)
45; CHECK-NEXT:    [[TMP12:%.*]] = sext <4 x i16> [[WIDE_MASKED_LOAD30]] to <4 x i32>
46; CHECK-NEXT:    [[TMP13:%.*]] = mul nsw <4 x i32> [[TMP12]], [[TMP9]]
47; CHECK-NEXT:    [[TMP14]] = add nsw <4 x i32> [[TMP13]], [[VEC_PHI]]
48; CHECK-NEXT:    [[INDEX_NEXT]] = add i32 [[INDEX]], 4
49; CHECK-NEXT:    [[TMP15]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP5]], i32 1)
50; CHECK-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
51; CHECK-NEXT:    br i1 [[TMP16]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK]]
52; CHECK:       middle.block:
53; CHECK-NEXT:    [[TMP17:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP14]], <4 x i32> [[VEC_PHI]]
54; CHECK-NEXT:    [[TMP18:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP17]])
55; CHECK-NEXT:    store i32 [[TMP18]], i32* [[ARRAYIDX8_US]], align 4
56; CHECK-NEXT:    [[INC10_US]] = add nuw i32 [[I_025_US]], 1
57; CHECK-NEXT:    [[EXITCOND27:%.*]] = icmp eq i32 [[INC10_US]], [[N]]
58; CHECK-NEXT:    br i1 [[EXITCOND27]], label [[FOR_COND_CLEANUP]], label [[FOR_COND1_PREHEADER_US]]
59; CHECK:       for.cond.cleanup:
60; CHECK-NEXT:    ret void
61;
62entry:
63  %cmp24 = icmp eq i32 %N, 0
64  br i1 %cmp24, label %for.cond.cleanup, label %for.cond1.preheader.us.preheader
65
66for.cond1.preheader.us.preheader:                 ; preds = %entry
67  %n.rnd.up = add i32 %N, 3
68  %n.vec = and i32 %n.rnd.up, -4
69  %trip.count.minus.1 = add i32 %N, -1
70  %broadcast.splatinsert28 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
71  %broadcast.splat29 = shufflevector <4 x i32> %broadcast.splatinsert28, <4 x i32> undef, <4 x i32> zeroinitializer
72  %tmp = add i32 %n.vec, -4
73  %tmp1 = lshr i32 %tmp, 2
74  %tmp2 = add nuw nsw i32 %tmp1, 1
75  br label %for.cond1.preheader.us
76
77for.cond1.preheader.us:                           ; preds = %middle.block, %for.cond1.preheader.us.preheader
78  %i.025.us = phi i32 [ %inc10.us, %middle.block ], [ 0, %for.cond1.preheader.us.preheader ]
79  %arrayidx.us = getelementptr inbounds i16*, i16** %A, i32 %i.025.us
80  %tmp3 = load i16*, i16** %arrayidx.us, align 4
81  %arrayidx8.us = getelementptr inbounds i32, i32* %C, i32 %i.025.us
82  %arrayidx8.promoted.us = load i32, i32* %arrayidx8.us, align 4
83  %tmp4 = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 %arrayidx8.promoted.us, i32 0
84  call void @llvm.set.loop.iterations.i32(i32 %tmp2)
85  br label %vector.body
86
87vector.body:                                      ; preds = %vector.body, %for.cond1.preheader.us
88  %index = phi i32 [ 0, %for.cond1.preheader.us ], [ %index.next, %vector.body ]
89  %vec.phi = phi <4 x i32> [ %tmp4, %for.cond1.preheader.us ], [ %tmp14, %vector.body ]
90  %tmp5 = phi i32 [ %tmp2, %for.cond1.preheader.us ], [ %tmp15, %vector.body ]
91  %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
92  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
93  %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
94  %tmp6 = getelementptr inbounds i16, i16* %tmp3, i32 %index
95
96  ; %tmp7 = icmp ule <4 x i32> %induction, %broadcast.splat29
97  %tmp7 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
98
99  %tmp8 = bitcast i16* %tmp6 to <4 x i16>*
100  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %tmp8, i32 2, <4 x i1> %tmp7, <4 x i16> undef)
101  %tmp9 = sext <4 x i16> %wide.masked.load to <4 x i32>
102  %tmp10 = getelementptr inbounds i16, i16* %B, i32 %index
103  %tmp11 = bitcast i16* %tmp10 to <4 x i16>*
104  %wide.masked.load30 = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %tmp11, i32 2, <4 x i1> %tmp7, <4 x i16> undef)
105  %tmp12 = sext <4 x i16> %wide.masked.load30 to <4 x i32>
106  %tmp13 = mul nsw <4 x i32> %tmp12, %tmp9
107  %tmp14 = add nsw <4 x i32> %tmp13, %vec.phi
108  %index.next = add i32 %index, 4
109  %tmp15 = call i32 @llvm.loop.decrement.reg.i32(i32 %tmp5, i32 1)
110  %tmp16 = icmp ne i32 %tmp15, 0
111  br i1 %tmp16, label %vector.body, label %middle.block
112
113middle.block:                                     ; preds = %vector.body
114  %tmp17 = select <4 x i1> %tmp7, <4 x i32> %tmp14, <4 x i32> %vec.phi
115  %tmp18 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp17)
116  store i32 %tmp18, i32* %arrayidx8.us, align 4
117  %inc10.us = add nuw i32 %i.025.us, 1
118  %exitcond27 = icmp eq i32 %inc10.us, %N
119  br i1 %exitcond27, label %for.cond.cleanup, label %for.cond1.preheader.us
120
121for.cond.cleanup:                                 ; preds = %middle.block, %entry
122  ret void
123}
124
125define void @mat_vec_i32(i32** nocapture readonly %A, i32* nocapture readonly %B, i32* noalias nocapture %C, i32 %N) {
126; CHECK-LABEL: @mat_vec_i32(
127; CHECK-NEXT:  entry:
128; CHECK-NEXT:    [[CMP23:%.*]] = icmp eq i32 [[N:%.*]], 0
129; CHECK-NEXT:    br i1 [[CMP23]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_COND1_PREHEADER_US_PREHEADER:%.*]]
130; CHECK:       for.cond1.preheader.us.preheader:
131; CHECK-NEXT:    [[N_RND_UP:%.*]] = add i32 [[N]], 3
132; CHECK-NEXT:    [[N_VEC:%.*]] = and i32 [[N_RND_UP]], -4
133; CHECK-NEXT:    [[TRIP_COUNT_MINUS_1:%.*]] = add i32 [[N]], -1
134; CHECK-NEXT:    [[BROADCAST_SPLATINSERT27:%.*]] = insertelement <4 x i32> undef, i32 [[TRIP_COUNT_MINUS_1]], i32 0
135; CHECK-NEXT:    [[BROADCAST_SPLAT28:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT27]], <4 x i32> undef, <4 x i32> zeroinitializer
136; CHECK-NEXT:    [[TMP:%.*]] = add i32 [[N_VEC]], -4
137; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[TMP]], 2
138; CHECK-NEXT:    [[TMP2:%.*]] = add nuw nsw i32 [[TMP1]], 1
139; CHECK-NEXT:    br label [[FOR_COND1_PREHEADER_US:%.*]]
140; CHECK:       for.cond1.preheader.us:
141; CHECK-NEXT:    [[I_024_US:%.*]] = phi i32 [ [[INC9_US:%.*]], [[MIDDLE_BLOCK:%.*]] ], [ 0, [[FOR_COND1_PREHEADER_US_PREHEADER]] ]
142; CHECK-NEXT:    [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32*, i32** [[A:%.*]], i32 [[I_024_US]]
143; CHECK-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[ARRAYIDX_US]], align 4
144; CHECK-NEXT:    [[ARRAYIDX7_US:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i32 [[I_024_US]]
145; CHECK-NEXT:    [[ARRAYIDX7_PROMOTED_US:%.*]] = load i32, i32* [[ARRAYIDX7_US]], align 4
146; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 [[ARRAYIDX7_PROMOTED_US]], i32 0
147; CHECK-NEXT:    call void @llvm.set.loop.iterations.i32(i32 [[TMP2]])
148; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
149; CHECK:       vector.body:
150; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[FOR_COND1_PREHEADER_US]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
151; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP4]], [[FOR_COND1_PREHEADER_US]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
152; CHECK-NEXT:    [[TMP5:%.*]] = phi i32 [ [[TMP2]], [[FOR_COND1_PREHEADER_US]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ]
153; CHECK-NEXT:    [[TMP0:%.*]] = phi i32 [ [[N]], [[FOR_COND1_PREHEADER_US]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ]
154; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[INDEX]], i32 0
155; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer
156; CHECK-NEXT:    [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], <i32 0, i32 1, i32 2, i32 3>
157; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i32 [[INDEX]]
158; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[TMP0]])
159; CHECK-NEXT:    [[TMP2]] = sub i32 [[TMP0]], 4
160; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i32* [[TMP6]] to <4 x i32>*
161; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP8]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
162; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 [[INDEX]]
163; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
164; CHECK-NEXT:    [[WIDE_MASKED_LOAD29:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP10]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef)
165; CHECK-NEXT:    [[TMP11:%.*]] = mul nsw <4 x i32> [[WIDE_MASKED_LOAD29]], [[WIDE_MASKED_LOAD]]
166; CHECK-NEXT:    [[TMP12]] = add nsw <4 x i32> [[VEC_PHI]], [[TMP11]]
167; CHECK-NEXT:    [[INDEX_NEXT]] = add i32 [[INDEX]], 4
168; CHECK-NEXT:    [[TMP13]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP5]], i32 1)
169; CHECK-NEXT:    [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0
170; CHECK-NEXT:    br i1 [[TMP14]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK]]
171; CHECK:       middle.block:
172; CHECK-NEXT:    [[TMP15:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP12]], <4 x i32> [[VEC_PHI]]
173; CHECK-NEXT:    [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP15]])
174; CHECK-NEXT:    store i32 [[TMP16]], i32* [[ARRAYIDX7_US]], align 4
175; CHECK-NEXT:    [[INC9_US]] = add nuw i32 [[I_024_US]], 1
176; CHECK-NEXT:    [[EXITCOND26:%.*]] = icmp eq i32 [[INC9_US]], [[N]]
177; CHECK-NEXT:    br i1 [[EXITCOND26]], label [[FOR_COND_CLEANUP]], label [[FOR_COND1_PREHEADER_US]]
178; CHECK:       for.cond.cleanup:
179; CHECK-NEXT:    ret void
180;
181entry:
182  %cmp23 = icmp eq i32 %N, 0
183  br i1 %cmp23, label %for.cond.cleanup, label %for.cond1.preheader.us.preheader
184
185for.cond1.preheader.us.preheader:                 ; preds = %entry
186  %n.rnd.up = add i32 %N, 3
187  %n.vec = and i32 %n.rnd.up, -4
188  %trip.count.minus.1 = add i32 %N, -1
189  %broadcast.splatinsert27 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
190  %broadcast.splat28 = shufflevector <4 x i32> %broadcast.splatinsert27, <4 x i32> undef, <4 x i32> zeroinitializer
191  %tmp = add i32 %n.vec, -4
192  %tmp1 = lshr i32 %tmp, 2
193  %tmp2 = add nuw nsw i32 %tmp1, 1
194  br label %for.cond1.preheader.us
195
196for.cond1.preheader.us:                           ; preds = %middle.block, %for.cond1.preheader.us.preheader
197  %i.024.us = phi i32 [ %inc9.us, %middle.block ], [ 0, %for.cond1.preheader.us.preheader ]
198  %arrayidx.us = getelementptr inbounds i32*, i32** %A, i32 %i.024.us
199  %tmp3 = load i32*, i32** %arrayidx.us, align 4
200  %arrayidx7.us = getelementptr inbounds i32, i32* %C, i32 %i.024.us
201  %arrayidx7.promoted.us = load i32, i32* %arrayidx7.us, align 4
202  %tmp4 = insertelement <4 x i32> <i32 undef, i32 0, i32 0, i32 0>, i32 %arrayidx7.promoted.us, i32 0
203  call void @llvm.set.loop.iterations.i32(i32 %tmp2)
204  br label %vector.body
205
206vector.body:                                      ; preds = %vector.body, %for.cond1.preheader.us
207  %index = phi i32 [ 0, %for.cond1.preheader.us ], [ %index.next, %vector.body ]
208  %vec.phi = phi <4 x i32> [ %tmp4, %for.cond1.preheader.us ], [ %tmp12, %vector.body ]
209  %tmp5 = phi i32 [ %tmp2, %for.cond1.preheader.us ], [ %tmp13, %vector.body ]
210  %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
211  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
212  %induction = add <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
213  %tmp6 = getelementptr inbounds i32, i32* %tmp3, i32 %index
214
215  ; %tmp7 = icmp ule <4 x i32> %induction, %broadcast.splat28
216  %tmp7 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %N)
217
218  %tmp8 = bitcast i32* %tmp6 to <4 x i32>*
219  %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp8, i32 4, <4 x i1> %tmp7, <4 x i32> undef)
220  %tmp9 = getelementptr inbounds i32, i32* %B, i32 %index
221  %tmp10 = bitcast i32* %tmp9 to <4 x i32>*
222  %wide.masked.load29 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %tmp10, i32 4, <4 x i1> %tmp7, <4 x i32> undef)
223  %tmp11 = mul nsw <4 x i32> %wide.masked.load29, %wide.masked.load
224  %tmp12 = add nsw <4 x i32> %vec.phi, %tmp11
225  %index.next = add i32 %index, 4
226  %tmp13 = call i32 @llvm.loop.decrement.reg.i32(i32 %tmp5, i32 1)
227  %tmp14 = icmp ne i32 %tmp13, 0
228  br i1 %tmp14, label %vector.body, label %middle.block
229
230middle.block:                                     ; preds = %vector.body
231  %tmp15 = select <4 x i1> %tmp7, <4 x i32> %tmp12, <4 x i32> %vec.phi
232  %tmp16 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp15)
233  store i32 %tmp16, i32* %arrayidx7.us, align 4
234  %inc9.us = add nuw i32 %i.024.us, 1
235  %exitcond26 = icmp eq i32 %inc9.us, %N
236  br i1 %exitcond26, label %for.cond.cleanup, label %for.cond1.preheader.us
237
238for.cond.cleanup:                                 ; preds = %middle.block, %entry
239  ret void
240}
241
242
243; Function Attrs: argmemonly nounwind readonly willreturn
244declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) #0
245
246; Function Attrs: argmemonly nounwind readonly willreturn
247declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>) #0
248
249; Function Attrs: nounwind readnone willreturn
250declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) #1
251
252; Function Attrs: noduplicate nounwind
253declare void @llvm.set.loop.iterations.i32(i32) #2
254
255; Function Attrs: noduplicate nounwind
256declare i32 @llvm.loop.decrement.reg.i32(i32, i32) #2
257
258declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
259
260attributes #0 = { argmemonly nounwind readonly willreturn }
261attributes #1 = { nounwind readnone willreturn }
262attributes #2 = { noduplicate nounwind }
263