xref: /llvm-project/llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll (revision e735f2bf379285294c41748eec3e95e3e8bd1acd)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -loop-versioning -S < %s | FileCheck %s -check-prefix=LV
3
4target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
5
6; For this loop:
7;   unsigned index = 0;
8;   for (int i = 0; i < n; i++) {
9;    A[2 * index] = A[2 * index] + B[i];
10;    index++;
11;   }
12;
13; SCEV is unable to prove that A[2 * i] does not overflow.
14;
15; Analyzing the IR does not help us because the GEPs are not
16; affine AddRecExprs. However, we can turn them into AddRecExprs
17; using SCEV Predicates.
18;
19; Once we have an affine expression we need to add an additional NUSW
20; to check that the pointers don't wrap since the GEPs are not
21; inbound.
22
23; The expression for %mul_ext as analyzed by SCEV is
24;    (zext i32 {0,+,2}<%for.body> to i64)
25; We have added the nusw flag to turn this expression into the SCEV expression:
26;    i64 {0,+,2}<%for.body>
27
28define void @f1(i16* noalias %a,
29; LV-LABEL: @f1(
30; LV-NEXT:  for.body.lver.check:
31; LV-NEXT:    [[A5:%.*]] = bitcast i16* [[A:%.*]] to i8*
32; LV-NEXT:    [[TMP0:%.*]] = add i64 [[N:%.*]], -1
33; LV-NEXT:    [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
34; LV-NEXT:    [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP1]])
35; LV-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
36; LV-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
37; LV-NEXT:    [[TMP2:%.*]] = add i32 0, [[MUL_RESULT]]
38; LV-NEXT:    [[TMP3:%.*]] = sub i32 0, [[MUL_RESULT]]
39; LV-NEXT:    [[TMP4:%.*]] = icmp ugt i32 [[TMP3]], 0
40; LV-NEXT:    [[TMP5:%.*]] = icmp ult i32 [[TMP2]], 0
41; LV-NEXT:    [[TMP6:%.*]] = select i1 false, i1 [[TMP4]], i1 [[TMP5]]
42; LV-NEXT:    [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
43; LV-NEXT:    [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
44; LV-NEXT:    [[TMP9:%.*]] = or i1 [[TMP8]], [[MUL_OVERFLOW]]
45; LV-NEXT:    [[TMP10:%.*]] = or i1 false, [[TMP9]]
46; LV-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
47; LV-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
48; LV-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
49; LV-NEXT:    [[UGLYGEP:%.*]] = getelementptr i8, i8* [[A5]], i64 [[MUL_RESULT3]]
50; LV-NEXT:    [[TMP11:%.*]] = bitcast i8* [[UGLYGEP]] to i16*
51; LV-NEXT:    [[TMP12:%.*]] = sub i64 [[MUL_RESULT3]], -4
52; LV-NEXT:    [[TMP13:%.*]] = sub i64 4, [[TMP12]]
53; LV-NEXT:    [[UGLYGEP6:%.*]] = getelementptr i8, i8* [[A5]], i64 [[TMP13]]
54; LV-NEXT:    [[TMP14:%.*]] = bitcast i8* [[UGLYGEP6]] to i16*
55; LV-NEXT:    [[TMP15:%.*]] = icmp ugt i16* [[TMP14]], [[A]]
56; LV-NEXT:    [[TMP16:%.*]] = icmp ult i16* [[TMP11]], [[A]]
57; LV-NEXT:    [[TMP17:%.*]] = select i1 false, i1 [[TMP15]], i1 [[TMP16]]
58; LV-NEXT:    [[TMP18:%.*]] = or i1 [[TMP17]], [[MUL_OVERFLOW4]]
59; LV-NEXT:    [[TMP19:%.*]] = or i1 [[TMP10]], [[TMP18]]
60; LV-NEXT:    br i1 [[TMP19]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
61; LV:       for.body.ph.lver.orig:
62; LV-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
63; LV:       for.body.lver.orig:
64; LV-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
65; LV-NEXT:    [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
66; LV-NEXT:    [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
67; LV-NEXT:    [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64
68; LV-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
69; LV-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
70; LV-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
71; LV-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
72; LV-NEXT:    [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
73; LV-NEXT:    store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
74; LV-NEXT:    [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
75; LV-NEXT:    [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1
76; LV-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
77; LV-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
78; LV:       for.body.ph:
79; LV-NEXT:    br label [[FOR_BODY:%.*]]
80; LV:       for.body:
81; LV-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
82; LV-NEXT:    [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
83; LV-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
84; LV-NEXT:    [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
85; LV-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
86; LV-NEXT:    [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
87; LV-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
88; LV-NEXT:    [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
89; LV-NEXT:    [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
90; LV-NEXT:    store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
91; LV-NEXT:    [[INC]] = add nuw nsw i64 [[IND]], 1
92; LV-NEXT:    [[INC1]] = add i32 [[IND1]], 1
93; LV-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
94; LV-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT7:%.*]], label [[FOR_BODY]]
95; LV:       for.end.loopexit:
96; LV-NEXT:    br label [[FOR_END:%.*]]
97; LV:       for.end.loopexit7:
98; LV-NEXT:    br label [[FOR_END]]
99; LV:       for.end:
100; LV-NEXT:    ret void
101;
102  i16* noalias %b, i64 %N) {
103entry:
104  br label %for.body
105
106for.body:                                         ; preds = %for.body, %entry
107  %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
108  %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ]
109
110  %mul = mul i32 %ind1, 2
111  %mul_ext = zext i32 %mul to i64
112
113  %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
114  %loadA = load i16, i16* %arrayidxA, align 2
115
116  %arrayidxB = getelementptr i16, i16* %b, i64 %ind
117  %loadB = load i16, i16* %arrayidxB, align 2
118
119  %add = mul i16 %loadA, %loadB
120
121  store i16 %add, i16* %arrayidxA, align 2
122
123  %inc = add nuw nsw i64 %ind, 1
124  %inc1 = add i32 %ind1, 1
125
126  %exitcond = icmp eq i64 %inc, %N
127  br i1 %exitcond, label %for.end, label %for.body
128
129for.end:                                          ; preds = %for.body
130  ret void
131}
132
133; For this loop:
134;   unsigned index = n;
135;   for (int i = 0; i < n; i++) {
136;    A[2 * index] = A[2 * index] + B[i];
137;    index--;
138;   }
139;
140; the SCEV expression for 2 * index is not an AddRecExpr
141; (and implictly not affine). However, we are able to make assumptions
142; that will turn the expression into an affine one and continue the
143; analysis.
144;
145; Once we have an affine expression we need to add an additional NUSW
146; to check that the pointers don't wrap since the GEPs are not
147; inbounds.
148;
149; This loop has a negative stride for A, and the nusw flag is required in
150; order to properly extend the increment from i32 -4 to i64 -4.
151
152; The expression for %mul_ext as analyzed by SCEV is
153;     (zext i32 {(2 * (trunc i64 %N to i32)),+,-2}<%for.body> to i64)
154; We have added the nusw flag to turn this expression into the following SCEV:
155;     i64 {zext i32 (2 * (trunc i64 %N to i32)) to i64,+,-2}<%for.body>
156
157define void @f2(i16* noalias %a,
158; LV-LABEL: @f2(
159; LV-NEXT:  for.body.lver.check:
160; LV-NEXT:    [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32
161; LV-NEXT:    [[TMP0:%.*]] = add i64 [[N]], -1
162; LV-NEXT:    [[TMP1:%.*]] = shl i32 [[TRUNCN]], 1
163; LV-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32
164; LV-NEXT:    [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]])
165; LV-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
166; LV-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
167; LV-NEXT:    [[TMP3:%.*]] = add i32 [[TMP1]], [[MUL_RESULT]]
168; LV-NEXT:    [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]]
169; LV-NEXT:    [[TMP5:%.*]] = icmp ugt i32 [[TMP4]], [[TMP1]]
170; LV-NEXT:    [[TMP6:%.*]] = icmp ult i32 [[TMP3]], [[TMP1]]
171; LV-NEXT:    [[TMP7:%.*]] = select i1 true, i1 [[TMP5]], i1 [[TMP6]]
172; LV-NEXT:    [[TMP8:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
173; LV-NEXT:    [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
174; LV-NEXT:    [[TMP10:%.*]] = or i1 [[TMP9]], [[MUL_OVERFLOW]]
175; LV-NEXT:    [[TMP11:%.*]] = or i1 false, [[TMP10]]
176; LV-NEXT:    [[TMP12:%.*]] = trunc i64 [[N]] to i31
177; LV-NEXT:    [[TMP13:%.*]] = zext i31 [[TMP12]] to i64
178; LV-NEXT:    [[TMP14:%.*]] = shl nuw nsw i64 [[TMP13]], 1
179; LV-NEXT:    [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP14]]
180; LV-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
181; LV-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
182; LV-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
183; LV-NEXT:    [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8*
184; LV-NEXT:    [[UGLYGEP:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[MUL_RESULT3]]
185; LV-NEXT:    [[TMP15:%.*]] = bitcast i8* [[UGLYGEP]] to i16*
186; LV-NEXT:    [[TMP16:%.*]] = sub i64 [[MUL_RESULT3]], -4
187; LV-NEXT:    [[TMP17:%.*]] = sub i64 4, [[TMP16]]
188; LV-NEXT:    [[UGLYGEP6:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP17]]
189; LV-NEXT:    [[TMP18:%.*]] = bitcast i8* [[UGLYGEP6]] to i16*
190; LV-NEXT:    [[TMP19:%.*]] = icmp ugt i16* [[TMP18]], [[SCEVGEP]]
191; LV-NEXT:    [[TMP20:%.*]] = icmp ult i16* [[TMP15]], [[SCEVGEP]]
192; LV-NEXT:    [[TMP21:%.*]] = select i1 true, i1 [[TMP19]], i1 [[TMP20]]
193; LV-NEXT:    [[TMP22:%.*]] = or i1 [[TMP21]], [[MUL_OVERFLOW4]]
194; LV-NEXT:    [[TMP23:%.*]] = or i1 [[TMP11]], [[TMP22]]
195; LV-NEXT:    br i1 [[TMP23]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
196; LV:       for.body.ph.lver.orig:
197; LV-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
198; LV:       for.body.lver.orig:
199; LV-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
200; LV-NEXT:    [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
201; LV-NEXT:    [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
202; LV-NEXT:    [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64
203; LV-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
204; LV-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
205; LV-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
206; LV-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
207; LV-NEXT:    [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
208; LV-NEXT:    store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
209; LV-NEXT:    [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
210; LV-NEXT:    [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1
211; LV-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
212; LV-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
213; LV:       for.body.ph:
214; LV-NEXT:    br label [[FOR_BODY:%.*]]
215; LV:       for.body:
216; LV-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
217; LV-NEXT:    [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ]
218; LV-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
219; LV-NEXT:    [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
220; LV-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
221; LV-NEXT:    [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
222; LV-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
223; LV-NEXT:    [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
224; LV-NEXT:    [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
225; LV-NEXT:    store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
226; LV-NEXT:    [[INC]] = add nuw nsw i64 [[IND]], 1
227; LV-NEXT:    [[DEC]] = sub i32 [[IND1]], 1
228; LV-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
229; LV-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT7:%.*]], label [[FOR_BODY]]
230; LV:       for.end.loopexit:
231; LV-NEXT:    br label [[FOR_END:%.*]]
232; LV:       for.end.loopexit7:
233; LV-NEXT:    br label [[FOR_END]]
234; LV:       for.end:
235; LV-NEXT:    ret void
236;
237  i16* noalias %b, i64 %N) {
238entry:
239  %TruncN = trunc i64 %N to i32
240  br label %for.body
241
242for.body:                                         ; preds = %for.body, %entry
243  %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
244  %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ]
245
246  %mul = mul i32 %ind1, 2
247  %mul_ext = zext i32 %mul to i64
248
249  %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
250  %loadA = load i16, i16* %arrayidxA, align 2
251
252  %arrayidxB = getelementptr i16, i16* %b, i64 %ind
253  %loadB = load i16, i16* %arrayidxB, align 2
254
255  %add = mul i16 %loadA, %loadB
256
257  store i16 %add, i16* %arrayidxA, align 2
258
259  %inc = add nuw nsw i64 %ind, 1
260  %dec = sub i32 %ind1, 1
261
262  %exitcond = icmp eq i64 %inc, %N
263  br i1 %exitcond, label %for.end, label %for.body
264
265for.end:                                          ; preds = %for.body
266  ret void
267}
268
269; We replicate the tests above, but this time sign extend 2 * index instead
270; of zero extending it.
271
272; The expression for %mul_ext as analyzed by SCEV is
273;     i64 (sext i32 {0,+,2}<%for.body> to i64)
274; We have added the nssw flag to turn this expression into the following SCEV:
275;     i64 {0,+,2}<%for.body>
276
277define void @f3(i16* noalias %a,
278; LV-LABEL: @f3(
279; LV-NEXT:  for.body.lver.check:
280; LV-NEXT:    [[A5:%.*]] = bitcast i16* [[A:%.*]] to i8*
281; LV-NEXT:    [[TMP0:%.*]] = add i64 [[N:%.*]], -1
282; LV-NEXT:    [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
283; LV-NEXT:    [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP1]])
284; LV-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
285; LV-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
286; LV-NEXT:    [[TMP2:%.*]] = add i32 0, [[MUL_RESULT]]
287; LV-NEXT:    [[TMP3:%.*]] = sub i32 0, [[MUL_RESULT]]
288; LV-NEXT:    [[TMP4:%.*]] = icmp sgt i32 [[TMP3]], 0
289; LV-NEXT:    [[TMP5:%.*]] = icmp slt i32 [[TMP2]], 0
290; LV-NEXT:    [[TMP6:%.*]] = select i1 false, i1 [[TMP4]], i1 [[TMP5]]
291; LV-NEXT:    [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
292; LV-NEXT:    [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
293; LV-NEXT:    [[TMP9:%.*]] = or i1 [[TMP8]], [[MUL_OVERFLOW]]
294; LV-NEXT:    [[TMP10:%.*]] = or i1 false, [[TMP9]]
295; LV-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
296; LV-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
297; LV-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
298; LV-NEXT:    [[UGLYGEP:%.*]] = getelementptr i8, i8* [[A5]], i64 [[MUL_RESULT3]]
299; LV-NEXT:    [[TMP11:%.*]] = bitcast i8* [[UGLYGEP]] to i16*
300; LV-NEXT:    [[TMP12:%.*]] = sub i64 [[MUL_RESULT3]], -4
301; LV-NEXT:    [[TMP13:%.*]] = sub i64 4, [[TMP12]]
302; LV-NEXT:    [[UGLYGEP6:%.*]] = getelementptr i8, i8* [[A5]], i64 [[TMP13]]
303; LV-NEXT:    [[TMP14:%.*]] = bitcast i8* [[UGLYGEP6]] to i16*
304; LV-NEXT:    [[TMP15:%.*]] = icmp ugt i16* [[TMP14]], [[A]]
305; LV-NEXT:    [[TMP16:%.*]] = icmp ult i16* [[TMP11]], [[A]]
306; LV-NEXT:    [[TMP17:%.*]] = select i1 false, i1 [[TMP15]], i1 [[TMP16]]
307; LV-NEXT:    [[TMP18:%.*]] = or i1 [[TMP17]], [[MUL_OVERFLOW4]]
308; LV-NEXT:    [[TMP19:%.*]] = or i1 [[TMP10]], [[TMP18]]
309; LV-NEXT:    br i1 [[TMP19]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
310; LV:       for.body.ph.lver.orig:
311; LV-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
312; LV:       for.body.lver.orig:
313; LV-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
314; LV-NEXT:    [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
315; LV-NEXT:    [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
316; LV-NEXT:    [[MUL_EXT_LVER_ORIG:%.*]] = sext i32 [[MUL_LVER_ORIG]] to i64
317; LV-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
318; LV-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
319; LV-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
320; LV-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
321; LV-NEXT:    [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
322; LV-NEXT:    store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
323; LV-NEXT:    [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
324; LV-NEXT:    [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1
325; LV-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
326; LV-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
327; LV:       for.body.ph:
328; LV-NEXT:    br label [[FOR_BODY:%.*]]
329; LV:       for.body:
330; LV-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
331; LV-NEXT:    [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
332; LV-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
333; LV-NEXT:    [[MUL_EXT:%.*]] = sext i32 [[MUL]] to i64
334; LV-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
335; LV-NEXT:    [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
336; LV-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
337; LV-NEXT:    [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
338; LV-NEXT:    [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
339; LV-NEXT:    store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
340; LV-NEXT:    [[INC]] = add nuw nsw i64 [[IND]], 1
341; LV-NEXT:    [[INC1]] = add i32 [[IND1]], 1
342; LV-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
343; LV-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT7:%.*]], label [[FOR_BODY]]
344; LV:       for.end.loopexit:
345; LV-NEXT:    br label [[FOR_END:%.*]]
346; LV:       for.end.loopexit7:
347; LV-NEXT:    br label [[FOR_END]]
348; LV:       for.end:
349; LV-NEXT:    ret void
350;
351  i16* noalias %b, i64 %N) {
352entry:
353  br label %for.body
354
355for.body:                                         ; preds = %for.body, %entry
356  %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
357  %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ]
358
359  %mul = mul i32 %ind1, 2
360  %mul_ext = sext i32 %mul to i64
361
362  %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
363  %loadA = load i16, i16* %arrayidxA, align 2
364
365  %arrayidxB = getelementptr i16, i16* %b, i64 %ind
366  %loadB = load i16, i16* %arrayidxB, align 2
367
368  %add = mul i16 %loadA, %loadB
369
370  store i16 %add, i16* %arrayidxA, align 2
371
372  %inc = add nuw nsw i64 %ind, 1
373  %inc1 = add i32 %ind1, 1
374
375  %exitcond = icmp eq i64 %inc, %N
376  br i1 %exitcond, label %for.end, label %for.body
377
378for.end:                                          ; preds = %for.body
379  ret void
380}
381
382define void @f4(i16* noalias %a,
383; LV-LABEL: @f4(
384; LV-NEXT:  for.body.lver.check:
385; LV-NEXT:    [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32
386; LV-NEXT:    [[TMP0:%.*]] = add i64 [[N]], -1
387; LV-NEXT:    [[TMP1:%.*]] = shl i32 [[TRUNCN]], 1
388; LV-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32
389; LV-NEXT:    [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]])
390; LV-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
391; LV-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
392; LV-NEXT:    [[TMP3:%.*]] = add i32 [[TMP1]], [[MUL_RESULT]]
393; LV-NEXT:    [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]]
394; LV-NEXT:    [[TMP5:%.*]] = icmp sgt i32 [[TMP4]], [[TMP1]]
395; LV-NEXT:    [[TMP6:%.*]] = icmp slt i32 [[TMP3]], [[TMP1]]
396; LV-NEXT:    [[TMP7:%.*]] = select i1 true, i1 [[TMP5]], i1 [[TMP6]]
397; LV-NEXT:    [[TMP8:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
398; LV-NEXT:    [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
399; LV-NEXT:    [[TMP10:%.*]] = or i1 [[TMP9]], [[MUL_OVERFLOW]]
400; LV-NEXT:    [[TMP11:%.*]] = or i1 false, [[TMP10]]
401; LV-NEXT:    [[TMP12:%.*]] = sext i32 [[TMP1]] to i64
402; LV-NEXT:    [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP12]]
403; LV-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
404; LV-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
405; LV-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
406; LV-NEXT:    [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8*
407; LV-NEXT:    [[UGLYGEP:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[MUL_RESULT3]]
408; LV-NEXT:    [[TMP13:%.*]] = bitcast i8* [[UGLYGEP]] to i16*
409; LV-NEXT:    [[TMP14:%.*]] = sub i64 [[MUL_RESULT3]], -4
410; LV-NEXT:    [[TMP15:%.*]] = sub i64 4, [[TMP14]]
411; LV-NEXT:    [[UGLYGEP6:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP15]]
412; LV-NEXT:    [[TMP16:%.*]] = bitcast i8* [[UGLYGEP6]] to i16*
413; LV-NEXT:    [[TMP17:%.*]] = icmp ugt i16* [[TMP16]], [[SCEVGEP]]
414; LV-NEXT:    [[TMP18:%.*]] = icmp ult i16* [[TMP13]], [[SCEVGEP]]
415; LV-NEXT:    [[TMP19:%.*]] = select i1 true, i1 [[TMP17]], i1 [[TMP18]]
416; LV-NEXT:    [[TMP20:%.*]] = or i1 [[TMP19]], [[MUL_OVERFLOW4]]
417; LV-NEXT:    [[TMP21:%.*]] = or i1 [[TMP11]], [[TMP20]]
418; LV-NEXT:    br i1 [[TMP21]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
419; LV:       for.body.ph.lver.orig:
420; LV-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
421; LV:       for.body.lver.orig:
422; LV-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
423; LV-NEXT:    [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
424; LV-NEXT:    [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
425; LV-NEXT:    [[MUL_EXT_LVER_ORIG:%.*]] = sext i32 [[MUL_LVER_ORIG]] to i64
426; LV-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
427; LV-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
428; LV-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
429; LV-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
430; LV-NEXT:    [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
431; LV-NEXT:    store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
432; LV-NEXT:    [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
433; LV-NEXT:    [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1
434; LV-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
435; LV-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
436; LV:       for.body.ph:
437; LV-NEXT:    br label [[FOR_BODY:%.*]]
438; LV:       for.body:
439; LV-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
440; LV-NEXT:    [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ]
441; LV-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
442; LV-NEXT:    [[MUL_EXT:%.*]] = sext i32 [[MUL]] to i64
443; LV-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr i16, i16* [[A]], i64 [[MUL_EXT]]
444; LV-NEXT:    [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
445; LV-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr i16, i16* [[B]], i64 [[IND]]
446; LV-NEXT:    [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
447; LV-NEXT:    [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
448; LV-NEXT:    store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
449; LV-NEXT:    [[INC]] = add nuw nsw i64 [[IND]], 1
450; LV-NEXT:    [[DEC]] = sub i32 [[IND1]], 1
451; LV-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
452; LV-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT7:%.*]], label [[FOR_BODY]]
453; LV:       for.end.loopexit:
454; LV-NEXT:    br label [[FOR_END:%.*]]
455; LV:       for.end.loopexit7:
456; LV-NEXT:    br label [[FOR_END]]
457; LV:       for.end:
458; LV-NEXT:    ret void
459;
460  i16* noalias %b, i64 %N) {
461entry:
462  %TruncN = trunc i64 %N to i32
463  br label %for.body
464
465for.body:                                         ; preds = %for.body, %entry
466  %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
467  %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ]
468
469  %mul = mul i32 %ind1, 2
470  %mul_ext = sext i32 %mul to i64
471
472  %arrayidxA = getelementptr i16, i16* %a, i64 %mul_ext
473  %loadA = load i16, i16* %arrayidxA, align 2
474
475  %arrayidxB = getelementptr i16, i16* %b, i64 %ind
476  %loadB = load i16, i16* %arrayidxB, align 2
477
478  %add = mul i16 %loadA, %loadB
479
480  store i16 %add, i16* %arrayidxA, align 2
481
482  %inc = add nuw nsw i64 %ind, 1
483  %dec = sub i32 %ind1, 1
484
485  %exitcond = icmp eq i64 %inc, %N
486  br i1 %exitcond, label %for.end, label %for.body
487
488for.end:                                          ; preds = %for.body
489  ret void
490}
491
492; The following function is similar to the one above, but has the GEP
493; to pointer %A inbounds. The index %mul doesn't have the nsw flag.
494; This means that the SCEV expression for %mul can wrap and we need
495; a SCEV predicate to continue analysis.
496;
497; We can still analyze this by adding the required no wrap SCEV predicates.
498
499define void @f5(i16* noalias %a,
500; LV-LABEL: @f5(
501; LV-NEXT:  for.body.lver.check:
502; LV-NEXT:    [[TRUNCN:%.*]] = trunc i64 [[N:%.*]] to i32
503; LV-NEXT:    [[TMP0:%.*]] = add i64 [[N]], -1
504; LV-NEXT:    [[TMP1:%.*]] = shl i32 [[TRUNCN]], 1
505; LV-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32
506; LV-NEXT:    [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]])
507; LV-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
508; LV-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
509; LV-NEXT:    [[TMP3:%.*]] = add i32 [[TMP1]], [[MUL_RESULT]]
510; LV-NEXT:    [[TMP4:%.*]] = sub i32 [[TMP1]], [[MUL_RESULT]]
511; LV-NEXT:    [[TMP5:%.*]] = icmp sgt i32 [[TMP4]], [[TMP1]]
512; LV-NEXT:    [[TMP6:%.*]] = icmp slt i32 [[TMP3]], [[TMP1]]
513; LV-NEXT:    [[TMP7:%.*]] = select i1 true, i1 [[TMP5]], i1 [[TMP6]]
514; LV-NEXT:    [[TMP8:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
515; LV-NEXT:    [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
516; LV-NEXT:    [[TMP10:%.*]] = or i1 [[TMP9]], [[MUL_OVERFLOW]]
517; LV-NEXT:    [[TMP11:%.*]] = or i1 false, [[TMP10]]
518; LV-NEXT:    [[TMP12:%.*]] = sext i32 [[TMP1]] to i64
519; LV-NEXT:    [[SCEVGEP:%.*]] = getelementptr i16, i16* [[A:%.*]], i64 [[TMP12]]
520; LV-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]])
521; LV-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
522; LV-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
523; LV-NEXT:    [[SCEVGEP5:%.*]] = bitcast i16* [[SCEVGEP]] to i8*
524; LV-NEXT:    [[UGLYGEP:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[MUL_RESULT3]]
525; LV-NEXT:    [[TMP13:%.*]] = bitcast i8* [[UGLYGEP]] to i16*
526; LV-NEXT:    [[TMP14:%.*]] = sub i64 [[MUL_RESULT3]], -4
527; LV-NEXT:    [[TMP15:%.*]] = sub i64 4, [[TMP14]]
528; LV-NEXT:    [[UGLYGEP6:%.*]] = getelementptr i8, i8* [[SCEVGEP5]], i64 [[TMP15]]
529; LV-NEXT:    [[TMP16:%.*]] = bitcast i8* [[UGLYGEP6]] to i16*
530; LV-NEXT:    [[TMP17:%.*]] = icmp ugt i16* [[TMP16]], [[SCEVGEP]]
531; LV-NEXT:    [[TMP18:%.*]] = icmp ult i16* [[TMP13]], [[SCEVGEP]]
532; LV-NEXT:    [[TMP19:%.*]] = select i1 true, i1 [[TMP17]], i1 [[TMP18]]
533; LV-NEXT:    [[TMP20:%.*]] = or i1 [[TMP19]], [[MUL_OVERFLOW4]]
534; LV-NEXT:    [[TMP21:%.*]] = or i1 [[TMP11]], [[TMP20]]
535; LV-NEXT:    br i1 [[TMP21]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
536; LV:       for.body.ph.lver.orig:
537; LV-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
538; LV:       for.body.lver.orig:
539; LV-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
540; LV-NEXT:    [[IND1_LVER_ORIG:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH_LVER_ORIG]] ], [ [[DEC_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
541; LV-NEXT:    [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
542; LV-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i16, i16* [[A]], i32 [[MUL_LVER_ORIG]]
543; LV-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXA_LVER_ORIG]], align 2
544; LV-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i16, i16* [[B:%.*]], i64 [[IND_LVER_ORIG]]
545; LV-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i16, i16* [[ARRAYIDXB_LVER_ORIG]], align 2
546; LV-NEXT:    [[ADD_LVER_ORIG:%.*]] = mul i16 [[LOADA_LVER_ORIG]], [[LOADB_LVER_ORIG]]
547; LV-NEXT:    store i16 [[ADD_LVER_ORIG]], i16* [[ARRAYIDXA_LVER_ORIG]], align 2
548; LV-NEXT:    [[INC_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
549; LV-NEXT:    [[DEC_LVER_ORIG]] = sub i32 [[IND1_LVER_ORIG]], 1
550; LV-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INC_LVER_ORIG]], [[N]]
551; LV-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
552; LV:       for.body.ph:
553; LV-NEXT:    br label [[FOR_BODY:%.*]]
554; LV:       for.body:
555; LV-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
556; LV-NEXT:    [[IND1:%.*]] = phi i32 [ [[TRUNCN]], [[FOR_BODY_PH]] ], [ [[DEC:%.*]], [[FOR_BODY]] ]
557; LV-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
558; LV-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr inbounds i16, i16* [[A]], i32 [[MUL]]
559; LV-NEXT:    [[LOADA:%.*]] = load i16, i16* [[ARRAYIDXA]], align 2
560; LV-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr inbounds i16, i16* [[B]], i64 [[IND]]
561; LV-NEXT:    [[LOADB:%.*]] = load i16, i16* [[ARRAYIDXB]], align 2
562; LV-NEXT:    [[ADD:%.*]] = mul i16 [[LOADA]], [[LOADB]]
563; LV-NEXT:    store i16 [[ADD]], i16* [[ARRAYIDXA]], align 2
564; LV-NEXT:    [[INC]] = add nuw nsw i64 [[IND]], 1
565; LV-NEXT:    [[DEC]] = sub i32 [[IND1]], 1
566; LV-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[N]]
567; LV-NEXT:    br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT7:%.*]], label [[FOR_BODY]]
568; LV:       for.end.loopexit:
569; LV-NEXT:    br label [[FOR_END:%.*]]
570; LV:       for.end.loopexit7:
571; LV-NEXT:    br label [[FOR_END]]
572; LV:       for.end:
573; LV-NEXT:    ret void
574;
575  i16* noalias %b, i64 %N) {
576entry:
577  %TruncN = trunc i64 %N to i32
578  br label %for.body
579
580for.body:                                         ; preds = %for.body, %entry
581  %ind = phi i64 [ 0, %entry ], [ %inc, %for.body ]
582  %ind1 = phi i32 [ %TruncN, %entry ], [ %dec, %for.body ]
583
584  %mul = mul i32 %ind1, 2
585
586  %arrayidxA = getelementptr inbounds i16, i16* %a, i32 %mul
587  %loadA = load i16, i16* %arrayidxA, align 2
588
589  %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %ind
590  %loadB = load i16, i16* %arrayidxB, align 2
591
592  %add = mul i16 %loadA, %loadB
593
594  store i16 %add, i16* %arrayidxA, align 2
595
596  %inc = add nuw nsw i64 %ind, 1
597  %dec = sub i32 %ind1, 1
598
599  %exitcond = icmp eq i64 %inc, %N
600  br i1 %exitcond, label %for.end, label %for.body
601
602for.end:                                          ; preds = %for.body
603  ret void
604}
605