xref: /llvm-project/llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll (revision 86d113a8b8aec1092d51115a8ff3e7e6682d1931)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -loop-versioning -S < %s | FileCheck %s -check-prefix=LV
3
4target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
5
6; For this loop:
7;   unsigned index = 0;
8;   for (int i = 0; i < n; i++) {
9;    A[2 * index] = A[2 * index] + B[i];
10;    index++;
11;   }
12;
13; SCEV is unable to prove that A[2 * i] does not overflow.
14;
15; Analyzing the IR does not help us because the GEPs are not
16; affine AddRecExprs. However, we can turn them into AddRecExprs
17; using SCEV Predicates.
18;
19; Once we have an affine expression we need to add an additional NUSW
20; to check that the pointers don't wrap since the GEPs are not
21; inbound.
22
23; The expression for %mul_ext as analyzed by SCEV is
24;    (zext i32 {0,+,2}<%for.body> to i64)
25; We have added the nusw flag to turn this expression into the SCEV expression:
26;    i64 {0,+,2}<%for.body>
27
28define void @f1(i16* noalias %a,
29; LV-LABEL: @f1(
30; LV-NEXT:  for.body.lver.check:
31; LV-NEXT:    [[A5:%.*]] = bitcast i16* [[A:%.*]] to i8*
32; LV-NEXT:    [[TMP0:%.*]] = add i64 [[N:%.*]], -1
33; LV-NEXT:    [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
34; LV-NEXT:    [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP1]])
35; LV-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
36; LV-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
37; LV-NEXT:    [[TMP2:%.*]] = add i32 0, [[MUL_RESULT]]
38; LV-NEXT:    [[TMP3:%.*]] = sub i32 0, [[MUL_RESULT]]
39; LV-NEXT:    [[TMP4:%.*]] = icmp ugt i32 [[TMP3]], 0
40; LV-NEXT:    [[TMP5:%.*]] = icmp ult i32 [[TMP2]], 0
41; LV-NEXT:    [[TMP6:%.*]] = select i1 false, i1 [[TMP4]], i1 [[TMP5]]
42; LV-NEXT:    [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
43; LV-NEXT:    [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
44; LV-NEXT:    [[TMP9:%.*]] = or i1 [[TMP8]], [[MUL_OVERFLOW]]
45; LV-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
46; LV-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
47; LV-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
48; LV-NEXT:    [[TMP11:%.*]] = sub i64 0, [[MUL_RESULT3]]
49; LV-NEXT:    [[TMP12:%.*]] = getelementptr i8, i8* [[A5]], i64 [[MUL_RESULT3]]
50; LV-NEXT:    [[TMP13:%.*]] = getelementptr i8, i8* [[A5]], i64 [[TMP11]]
51; LV-NEXT:    [[TMP14:%.*]] = icmp ugt i8* [[TMP13]], [[A5]]
52; LV-NEXT:    [[TMP15:%.*]] = icmp ult i8* [[TMP12]], [[A5]]
53; LV-NEXT:    [[TMP16:%.*]] = select i1 false, i1 [[TMP14]], i1 [[TMP15]]
54; LV-NEXT:    [[TMP17:%.*]] = or i1 [[TMP16]], [[MUL_OVERFLOW4]]
55; LV-NEXT:    [[TMP18:%.*]] = or i1 [[TMP9]], [[TMP17]]
56; LV-NEXT:    br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
57; LV:       for.body.ph.lver.orig:
58; LV-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
59; LV:       for.body.lver.orig:
60; LV-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
61; LV-NEXT:    [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
62; LV-NEXT:    [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
63; LV-NEXT:    [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64
64; LV-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
65; LV-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
66; LV-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
67; LV-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
68; LV-NEXT:    [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
69; LV-NEXT:    store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
70; LV-NEXT:    [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
71; LV-NEXT:    [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1
72; LV-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
73; LV-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
74; LV:       for.body.ph:
75; LV-NEXT:    br label [[FOR_BODY:%.*]]
76; LV:       for.body:
77; LV-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
78; LV-NEXT:    [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
79; LV-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
80; LV-NEXT:    [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
81; LV-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
82; LV-NEXT:    [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
83; LV-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
84; LV-NEXT:    [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
85; LV-NEXT:    [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
86; LV-NEXT:    store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
87; LV-NEXT:    [[INC]] = add nuw nsw i64 [[IND]], 1
88; LV-NEXT:    [[INC1]] = add i32 [[IND1]], 1
89; LV-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
90; LV-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
91; LV:       for.end.loopexit:
92; LV-NEXT:    br label [[FOR_END:%.*]]
93; LV:       for.end.loopexit6:
94; LV-NEXT:    br label [[FOR_END]]
95; LV:       for.end:
96; LV-NEXT:    ret void
97;
98  i16* noalias %b, i64 %N) {
99entry:
100  br label %for.body
101
102for.body:                                         ; preds = %for.body, %entry
103  %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
104  %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ]
105
106  %mul = mul i32 %ind1, 2
107  %mul_ext = zext i32 %mul to i64
108
109  %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
110  %loadA = load i16, i16* %arrayidxA, align 2
111
112  %arrayidxB = getelementptr i16, i16* %b, i64 %ind
113  %loadB = load i16, i16* %arrayidxB, align 2
114
115  %add = mul i16 %loadA, %loadB
116
117  store i16 %add, i16* %arrayidxA, align 2
118
119  %inc = add nuw nsw i64 %ind, 1
120  %inc1 = add i32 %ind1, 1
121
122  %exitcond = icmp eq i64 %inc, %N
123  br i1 %exitcond, label %for.end, label %for.body
124
125for.end:                                          ; preds = %for.body
126  ret void
127}
128
129; For this loop:
130;   unsigned index = n;
131;   for (int i = 0; i < n; i++) {
132;    A[2 * index] = A[2 * index] + B[i];
133;    index--;
134;   }
135;
136; the SCEV expression for 2 * index is not an AddRecExpr
137; (and implictly not affine). However, we are able to make assumptions
138; that will turn the expression into an affine one and continue the
139; analysis.
140;
141; Once we have an affine expression we need to add an additional NUSW
142; to check that the pointers don't wrap since the GEPs are not
143; inbounds.
144;
145; This loop has a negative stride for A, and the nusw flag is required in
146; order to properly extend the increment from i32 -4 to i64 -4.
147
148; The expression for %mul_ext as analyzed by SCEV is
149;     (zext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64)
150; We have added the nusw flag to turn this expression into the following SCEV:
151;     i64 {zext i32 (2 * (trunc i64 %N to i32)) to i64,+,-2}<%for.body>
152
153define void @f2(i16* noalias %a,
154; LV-LABEL: @f2(
155; LV-NEXT:  for.body.lver.check:
156; LV-NEXT:    [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32
157; LV-NEXT:    [[TMP0:%.*]] = add i64 [[N]], -1
158; LV-NEXT:    [[TMP1:%.*]] = shl i32 [[TRUNCN]], 1
159; LV-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32
160; LV-NEXT:    [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]])
161; LV-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
162; LV-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
163; LV-NEXT:    [[TMP3:%.*]] = add i32 [[TMP1]], [[MUL_RESULT]]
164; LV-NEXT:    [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]]
165; LV-NEXT:    [[TMP5:%.*]] = icmp ugt i32 [[TMP4]], [[TMP1]]
166; LV-NEXT:    [[TMP6:%.*]] = icmp ult i32 [[TMP3]], [[TMP1]]
167; LV-NEXT:    [[TMP7:%.*]] = select i1 true, i1 [[TMP5]], i1 [[TMP6]]
168; LV-NEXT:    [[TMP8:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
169; LV-NEXT:    [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
170; LV-NEXT:    [[TMP10:%.*]] = or i1 [[TMP9]], [[MUL_OVERFLOW]]
171; LV-NEXT:    [[TMP12:%.*]] = trunc i64 [[N]] to i31
172; LV-NEXT:    [[TMP13:%.*]] = zext i31 [[TMP12]] to i64
173; LV-NEXT:    [[TMP14:%.*]] = shl nuw nsw i64 [[TMP13]], 1
174; LV-NEXT:    [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP14]]
175; LV-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
176; LV-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
177; LV-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
178; LV-NEXT:    [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8*
179; LV-NEXT:    [[TMP15:%.*]] = sub i64 0, [[MUL_RESULT3]]
180; LV-NEXT:    [[TMP16:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[MUL_RESULT3]]
181; LV-NEXT:    [[TMP17:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP15]]
182; LV-NEXT:    [[TMP18:%.*]] = icmp ugt i8* [[TMP17]], [[SCEVGEP5]]
183; LV-NEXT:    [[TMP19:%.*]] = icmp ult i8* [[TMP16]], [[SCEVGEP5]]
184; LV-NEXT:    [[TMP20:%.*]] = select i1 true, i1 [[TMP18]], i1 [[TMP19]]
185; LV-NEXT:    [[TMP21:%.*]] = or i1 [[TMP20]], [[MUL_OVERFLOW4]]
186; LV-NEXT:    [[TMP22:%.*]] = or i1 [[TMP10]], [[TMP21]]
187; LV-NEXT:    br i1 [[TMP22]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
188; LV:       for.body.ph.lver.orig:
189; LV-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
190; LV:       for.body.lver.orig:
191; LV-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
192; LV-NEXT:    [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
193; LV-NEXT:    [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
194; LV-NEXT:    [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64
195; LV-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
196; LV-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
197; LV-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
198; LV-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
199; LV-NEXT:    [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
200; LV-NEXT:    store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
201; LV-NEXT:    [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
202; LV-NEXT:    [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1
203; LV-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
204; LV-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
205; LV:       for.body.ph:
206; LV-NEXT:    br label [[FOR_BODY:%.*]]
207; LV:       for.body:
208; LV-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
209; LV-NEXT:    [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ]
210; LV-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
211; LV-NEXT:    [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
212; LV-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
213; LV-NEXT:    [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
214; LV-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
215; LV-NEXT:    [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
216; LV-NEXT:    [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
217; LV-NEXT:    store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
218; LV-NEXT:    [[INC]] = add nuw nsw i64 [[IND]], 1
219; LV-NEXT:    [[DEC]] = sub i32 [[IND1]], 1
220; LV-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
221; LV-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
222; LV:       for.end.loopexit:
223; LV-NEXT:    br label [[FOR_END:%.*]]
224; LV:       for.end.loopexit6:
225; LV-NEXT:    br label [[FOR_END]]
226; LV:       for.end:
227; LV-NEXT:    ret void
228;
229  i16* noalias %b, i64 %N) {
230entry:
231  %TruncN = trunc i64 %N to i32
232  br label %for.body
233
234for.body:                                         ; preds = %for.body, %entry
235  %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
236  %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ]
237
238  %mul = mul i32 %ind1, 2
239  %mul_ext = zext i32 %mul to i64
240
241  %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
242  %loadA = load i16, i16* %arrayidxA, align 2
243
244  %arrayidxB = getelementptr i16, i16* %b, i64 %ind
245  %loadB = load i16, i16* %arrayidxB, align 2
246
247  %add = mul i16 %loadA, %loadB
248
249  store i16 %add, i16* %arrayidxA, align 2
250
251  %inc = add nuw nsw i64 %ind, 1
252  %dec = sub i32 %ind1, 1
253
254  %exitcond = icmp eq i64 %inc, %N
255  br i1 %exitcond, label %for.end, label %for.body
256
257for.end:                                          ; preds = %for.body
258  ret void
259}
260
261; We replicate the tests above, but this time sign extend 2 * index instead
262; of zero extending it.
263
264; The expression for %mul_ext as analyzed by SCEV is
265;     i64 (sext i32 {0,+,2}<%for.body> to i64)
266; We have added the nssw flag to turn this expression into the following SCEV:
267;     i64 {0,+,2}<%for.body>
268
269define void @f3(i16* noalias %a,
270; LV-LABEL: @f3(
271; LV-NEXT:  for.body.lver.check:
272; LV-NEXT:    [[A5:%.*]] = bitcast i16* [[A:%.*]] to i8*
273; LV-NEXT:    [[TMP0:%.*]] = add i64 [[N:%.*]], -1
274; LV-NEXT:    [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
275; LV-NEXT:    [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP1]])
276; LV-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
277; LV-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
278; LV-NEXT:    [[TMP2:%.*]] = add i32 0, [[MUL_RESULT]]
279; LV-NEXT:    [[TMP3:%.*]] = sub i32 0, [[MUL_RESULT]]
280; LV-NEXT:    [[TMP4:%.*]] = icmp sgt i32 [[TMP3]], 0
281; LV-NEXT:    [[TMP5:%.*]] = icmp slt i32 [[TMP2]], 0
282; LV-NEXT:    [[TMP6:%.*]] = select i1 false, i1 [[TMP4]], i1 [[TMP5]]
283; LV-NEXT:    [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
284; LV-NEXT:    [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
285; LV-NEXT:    [[TMP9:%.*]] = or i1 [[TMP8]], [[MUL_OVERFLOW]]
286; LV-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
287; LV-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
288; LV-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
289; LV-NEXT:    [[TMP11:%.*]] = sub i64 0, [[MUL_RESULT3]]
290; LV-NEXT:    [[TMP12:%.*]] = getelementptr i8, i8* [[A5]], i64 [[MUL_RESULT3]]
291; LV-NEXT:    [[TMP13:%.*]] = getelementptr i8, i8* [[A5]], i64 [[TMP11]]
292; LV-NEXT:    [[TMP14:%.*]] = icmp ugt i8* [[TMP13]], [[A5]]
293; LV-NEXT:    [[TMP15:%.*]] = icmp ult i8* [[TMP12]], [[A5]]
294; LV-NEXT:    [[TMP16:%.*]] = select i1 false, i1 [[TMP14]], i1 [[TMP15]]
295; LV-NEXT:    [[TMP17:%.*]] = or i1 [[TMP16]], [[MUL_OVERFLOW4]]
296; LV-NEXT:    [[TMP18:%.*]] = or i1 [[TMP9]], [[TMP17]]
297; LV-NEXT:    br i1 [[TMP18]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
298; LV:       for.body.ph.lver.orig:
299; LV-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
300; LV:       for.body.lver.orig:
301; LV-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
302; LV-NEXT:    [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
303; LV-NEXT:    [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
304; LV-NEXT:    [[MUL_EXT_LVER_ORIG:%.*]] = sext i32 [[MUL_LVER_ORIG]] to i64
305; LV-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
306; LV-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
307; LV-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
308; LV-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
309; LV-NEXT:    [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
310; LV-NEXT:    store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
311; LV-NEXT:    [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
312; LV-NEXT:    [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1
313; LV-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
314; LV-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
315; LV:       for.body.ph:
316; LV-NEXT:    br label [[FOR_BODY:%.*]]
317; LV:       for.body:
318; LV-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
319; LV-NEXT:    [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
320; LV-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
321; LV-NEXT:    [[MUL_EXT:%.*]] = sext i32 [[MUL]] to i64
322; LV-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
323; LV-NEXT:    [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
324; LV-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
325; LV-NEXT:    [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
326; LV-NEXT:    [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
327; LV-NEXT:    store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
328; LV-NEXT:    [[INC]] = add nuw nsw i64 [[IND]], 1
329; LV-NEXT:    [[INC1]] = add i32 [[IND1]], 1
330; LV-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
331; LV-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
332; LV:       for.end.loopexit:
333; LV-NEXT:    br label [[FOR_END:%.*]]
334; LV:       for.end.loopexit6:
335; LV-NEXT:    br label [[FOR_END]]
336; LV:       for.end:
337; LV-NEXT:    ret void
338;
339  i16* noalias %b, i64 %N) {
340entry:
341  br label %for.body
342
343for.body:                                         ; preds = %for.body, %entry
344  %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
345  %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ]
346
347  %mul = mul i32 %ind1, 2
348  %mul_ext = sext i32 %mul to i64
349
350  %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
351  %loadA = load i16, i16* %arrayidxA, align 2
352
353  %arrayidxB = getelementptr i16, i16* %b, i64 %ind
354  %loadB = load i16, i16* %arrayidxB, align 2
355
356  %add = mul i16 %loadA, %loadB
357
358  store i16 %add, i16* %arrayidxA, align 2
359
360  %inc = add nuw nsw i64 %ind, 1
361  %inc1 = add i32 %ind1, 1
362
363  %exitcond = icmp eq i64 %inc, %N
364  br i1 %exitcond, label %for.end, label %for.body
365
366for.end:                                          ; preds = %for.body
367  ret void
368}
369
370define void @f4(i16* noalias %a,
371; LV-LABEL: @f4(
372; LV-NEXT:  for.body.lver.check:
373; LV-NEXT:    [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32
374; LV-NEXT:    [[TMP0:%.*]] = add i64 [[N]], -1
375; LV-NEXT:    [[TMP1:%.*]] = shl i32 [[TRUNCN]], 1
376; LV-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32
377; LV-NEXT:    [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]])
378; LV-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
379; LV-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
380; LV-NEXT:    [[TMP3:%.*]] = add i32 [[TMP1]], [[MUL_RESULT]]
381; LV-NEXT:    [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]]
382; LV-NEXT:    [[TMP5:%.*]] = icmp sgt i32 [[TMP4]], [[TMP1]]
383; LV-NEXT:    [[TMP6:%.*]] = icmp slt i32 [[TMP3]], [[TMP1]]
384; LV-NEXT:    [[TMP7:%.*]] = select i1 true, i1 [[TMP5]], i1 [[TMP6]]
385; LV-NEXT:    [[TMP8:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
386; LV-NEXT:    [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
387; LV-NEXT:    [[TMP10:%.*]] = or i1 [[TMP9]], [[MUL_OVERFLOW]]
388; LV-NEXT:    [[TMP12:%.*]] = sext i32 [[TMP1]] to i64
389; LV-NEXT:    [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP12]]
390; LV-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
391; LV-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
392; LV-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
393; LV-NEXT:    [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8*
394; LV-NEXT:    [[TMP13:%.*]] = sub i64 0, [[MUL_RESULT3]]
395; LV-NEXT:    [[TMP14:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[MUL_RESULT3]]
396; LV-NEXT:    [[TMP15:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP13]]
397; LV-NEXT:    [[TMP16:%.*]] = icmp ugt i8* [[TMP15]], [[SCEVGEP5]]
398; LV-NEXT:    [[TMP17:%.*]] = icmp ult i8* [[TMP14]], [[SCEVGEP5]]
399; LV-NEXT:    [[TMP18:%.*]] = select i1 true, i1 [[TMP16]], i1 [[TMP17]]
400; LV-NEXT:    [[TMP19:%.*]] = or i1 [[TMP18]], [[MUL_OVERFLOW4]]
401; LV-NEXT:    [[TMP20:%.*]] = or i1 [[TMP10]], [[TMP19]]
402; LV-NEXT:    br i1 [[TMP20]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
403; LV:       for.body.ph.lver.orig:
404; LV-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
405; LV:       for.body.lver.orig:
406; LV-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
407; LV-NEXT:    [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
408; LV-NEXT:    [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
409; LV-NEXT:    [[MUL_EXT_LVER_ORIG:%.*]] = sext i32 [[MUL_LVER_ORIG]] to i64
410; LV-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
411; LV-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
412; LV-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
413; LV-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
414; LV-NEXT:    [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
415; LV-NEXT:    store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
416; LV-NEXT:    [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
417; LV-NEXT:    [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1
418; LV-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
419; LV-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
420; LV:       for.body.ph:
421; LV-NEXT:    br label [[FOR_BODY:%.*]]
422; LV:       for.body:
423; LV-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
424; LV-NEXT:    [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ]
425; LV-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
426; LV-NEXT:    [[MUL_EXT:%.*]] = sext i32 [[MUL]] to i64
427; LV-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
428; LV-NEXT:    [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
429; LV-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
430; LV-NEXT:    [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
431; LV-NEXT:    [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
432; LV-NEXT:    store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
433; LV-NEXT:    [[INC]] = add nuw nsw i64 [[IND]], 1
434; LV-NEXT:    [[DEC]] = sub i32 [[IND1]], 1
435; LV-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
436; LV-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
437; LV:       for.end.loopexit:
438; LV-NEXT:    br label [[FOR_END:%.*]]
439; LV:       for.end.loopexit6:
440; LV-NEXT:    br label [[FOR_END]]
441; LV:       for.end:
442; LV-NEXT:    ret void
443;
444  i16* noalias %b, i64 %N) {
445entry:
446  %TruncN = trunc i64 %N to i32
447  br label %for.body
448
449for.body:                                         ; preds = %for.body, %entry
450  %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
451  %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ]
452
453  %mul = mul i32 %ind1, 2
454  %mul_ext = sext i32 %mul to i64
455
456  %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
457  %loadA = load i16, i16* %arrayidxA, align 2
458
459  %arrayidxB = getelementptr i16, i16* %b, i64 %ind
460  %loadB = load i16, i16* %arrayidxB, align 2
461
462  %add = mul i16 %loadA, %loadB
463
464  store i16 %add, i16* %arrayidxA, align 2
465
466  %inc = add nuw nsw i64 %ind, 1
467  %dec = sub i32 %ind1, 1
468
469  %exitcond = icmp eq i64 %inc, %N
470  br i1 %exitcond, label %for.end, label %for.body
471
472for.end:                                          ; preds = %for.body
473  ret void
474}
475
476; The following function is similar to the one above, but has the GEP
477; to pointer %A inbounds. The index %mul doesn't have the nsw flag.
478; This means that the SCEV expression for %mul can wrap and we need
479; a SCEV predicate to continue analysis.
480;
481; We can still analyze this by adding the required no wrap SCEV predicates.
482
483define void @f5(i16* noalias %a,
484; LV-LABEL: @f5(
485; LV-NEXT:  for.body.lver.check:
486; LV-NEXT:    [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32
487; LV-NEXT:    [[TMP0:%.*]] = add i64 [[N]], -1
488; LV-NEXT:    [[TMP1:%.*]] = shl i32 [[TRUNCN]], 1
489; LV-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32
490; LV-NEXT:    [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]])
491; LV-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
492; LV-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
493; LV-NEXT:    [[TMP3:%.*]] = add i32 [[TMP1]], [[MUL_RESULT]]
494; LV-NEXT:    [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]]
495; LV-NEXT:    [[TMP5:%.*]] = icmp sgt i32 [[TMP4]], [[TMP1]]
496; LV-NEXT:    [[TMP6:%.*]] = icmp slt i32 [[TMP3]], [[TMP1]]
497; LV-NEXT:    [[TMP7:%.*]] = select i1 true, i1 [[TMP5]], i1 [[TMP6]]
498; LV-NEXT:    [[TMP8:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
499; LV-NEXT:    [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
500; LV-NEXT:    [[TMP10:%.*]] = or i1 [[TMP9]], [[MUL_OVERFLOW]]
501; LV-NEXT:    [[TMP12:%.*]] = sext i32 [[TMP1]] to i64
502; LV-NEXT:    [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP12]]
503; LV-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
504; LV-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
505; LV-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
506; LV-NEXT:    [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8*
507; LV-NEXT:    [[TMP13:%.*]] = sub i64 0, [[MUL_RESULT3]]
508; LV-NEXT:    [[TMP14:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[MUL_RESULT3]]
509; LV-NEXT:    [[TMP15:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP13]]
510; LV-NEXT:    [[TMP16:%.*]] = icmp ugt i8* [[TMP15]], [[SCEVGEP5]]
511; LV-NEXT:    [[TMP17:%.*]] = icmp ult i8* [[TMP14]], [[SCEVGEP5]]
512; LV-NEXT:    [[TMP18:%.*]] = select i1 true, i1 [[TMP16]], i1 [[TMP17]]
513; LV-NEXT:    [[TMP19:%.*]] = or i1 [[TMP18]], [[MUL_OVERFLOW4]]
514; LV-NEXT:    [[TMP20:%.*]] = or i1 [[TMP10]], [[TMP19]]
515; LV-NEXT:    br i1 [[TMP20]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
516; LV:       for.body.ph.lver.orig:
517; LV-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
518; LV:       for.body.lver.orig:
519; LV-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
520; LV-NEXT:    [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
521; LV-NEXT:    [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
522; LV-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i16, i16* [[A]], i32 [[MUL_LVER_ORIG]]
523; LV-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
524; LV-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
525; LV-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
526; LV-NEXT:    [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
527; LV-NEXT:    store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
528; LV-NEXT:    [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
529; LV-NEXT:    [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1
530; LV-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
531; LV-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
532; LV:       for.body.ph:
533; LV-NEXT:    br label [[FOR_BODY:%.*]]
534; LV:       for.body:
535; LV-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
536; LV-NEXT:    [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ]
537; LV-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
538; LV-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr inbounds i16, i16* [[A]], i32 [[MUL]]
539; LV-NEXT:    [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
540; LV-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr inbounds i16, i16* [[B]], i64 [[IND]]
541; LV-NEXT:    [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
542; LV-NEXT:    [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
543; LV-NEXT:    store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
544; LV-NEXT:    [[INC]] = add nuw nsw i64 [[IND]], 1
545; LV-NEXT:    [[DEC]] = sub i32 [[IND1]], 1
546; LV-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
547; LV-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT6:%.*]], label [[FOR_BODY]]
548; LV:       for.end.loopexit:
549; LV-NEXT:    br label [[FOR_END:%.*]]
550; LV:       for.end.loopexit6:
551; LV-NEXT:    br label [[FOR_END]]
552; LV:       for.end:
553; LV-NEXT:    ret void
554;
555  i16* noalias %b, i64 %N) {
556entry:
557  %TruncN = trunc i64 %N to i32
558  br label %for.body
559
560for.body:                                         ; preds = %for.body, %entry
561  %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
562  %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ]
563
564  %mul = mul i32 %ind1, 2
565
566  %arrayidxA = getelementptr inbounds i16, i16* %a, i32 %mul
567  %loadA = load i16, i16* %arrayidxA, align 2
568
569  %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %ind
570  %loadB = load i16, i16* %arrayidxB, align 2
571
572  %add = mul i16 %loadA, %loadB
573
574  store i16 %add, i16* %arrayidxA, align 2
575
576  %inc = add nuw nsw i64 %ind, 1
577  %dec = sub i32 %ind1, 1
578
579  %exitcond = icmp eq i64 %inc, %N
580  br i1 %exitcond, label %for.end, label %for.body
581
582for.end:                                          ; preds = %for.body
583  ret void
584}
585