xref: /llvm-project/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll (revision 713482fccf82d33c5c4ddb24538958617e1eb957)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -vectorize-num-stores-pred=1 -force-vector-width=1 -force-vector-interleave=2 -passes=loop-vectorize,simplifycfg -verify-loop-info -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck %s --check-prefix=UNROLL
3; RUN: opt -S -vectorize-num-stores-pred=1 -force-vector-width=1 -force-vector-interleave=2 -passes=loop-vectorize -verify-loop-info < %s | FileCheck %s --check-prefix=UNROLL-NOSIMPLIFY
4; RUN: opt -S -vectorize-num-stores-pred=1 -force-vector-width=2 -force-vector-interleave=1 -passes=loop-vectorize,simplifycfg -verify-loop-info -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck %s --check-prefix=VEC
5
6target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
7
8; Test predication of stores.
9define i32 @test(ptr nocapture %f) #0 {
10; UNROLL-LABEL: @test(
11; UNROLL-NEXT:  entry:
12; UNROLL-NEXT:    br label [[VECTOR_BODY:%.*]]
13; UNROLL:       vector.body:
14; UNROLL-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
15; UNROLL-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
16; UNROLL-NEXT:    [[TMP1:%.*]] = add i64 [[INDEX]], 1
17; UNROLL-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[F:%.*]], i64 [[TMP0]]
18; UNROLL-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[TMP1]]
19; UNROLL-NEXT:    [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4
20; UNROLL-NEXT:    [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4
21; UNROLL-NEXT:    [[TMP6:%.*]] = icmp sgt i32 [[TMP4]], 100
22; UNROLL-NEXT:    [[TMP7:%.*]] = icmp sgt i32 [[TMP5]], 100
23; UNROLL-NEXT:    br i1 [[TMP6]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
24; UNROLL:       pred.store.if:
25; UNROLL-NEXT:    [[TMP8:%.*]] = add nsw i32 [[TMP4]], 20
26; UNROLL-NEXT:    store i32 [[TMP8]], ptr [[TMP2]], align 4
27; UNROLL-NEXT:    br label [[PRED_STORE_CONTINUE]]
28; UNROLL:       pred.store.continue:
29; UNROLL-NEXT:    br i1 [[TMP7]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
30; UNROLL:       pred.store.if1:
31; UNROLL-NEXT:    [[TMP9:%.*]] = add nsw i32 [[TMP5]], 20
32; UNROLL-NEXT:    store i32 [[TMP9]], ptr [[TMP3]], align 4
33; UNROLL-NEXT:    br label [[PRED_STORE_CONTINUE2]]
34; UNROLL:       pred.store.continue2:
35; UNROLL-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
36; UNROLL-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
37; UNROLL-NEXT:    br i1 [[TMP10]], label [[FOR_END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
38; UNROLL:       for.end:
39; UNROLL-NEXT:    ret i32 0
40;
41; UNROLL-NOSIMPLIFY-LABEL: @test(
42; UNROLL-NOSIMPLIFY-NEXT:  entry:
43; UNROLL-NOSIMPLIFY-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
44; UNROLL-NOSIMPLIFY:       vector.ph:
45; UNROLL-NOSIMPLIFY-NEXT:    br label [[VECTOR_BODY:%.*]]
46; UNROLL-NOSIMPLIFY:       vector.body:
47; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
48; UNROLL-NOSIMPLIFY-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
49; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = add i64 [[INDEX]], 1
50; UNROLL-NOSIMPLIFY-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[F:%.*]], i64 [[TMP0]]
51; UNROLL-NOSIMPLIFY-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[TMP1]]
52; UNROLL-NOSIMPLIFY-NEXT:    [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4
53; UNROLL-NOSIMPLIFY-NEXT:    [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4
54; UNROLL-NOSIMPLIFY-NEXT:    [[TMP6:%.*]] = icmp sgt i32 [[TMP4]], 100
55; UNROLL-NOSIMPLIFY-NEXT:    [[TMP7:%.*]] = icmp sgt i32 [[TMP5]], 100
56; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP6]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
57; UNROLL-NOSIMPLIFY:       pred.store.if:
58; UNROLL-NOSIMPLIFY-NEXT:    [[TMP8:%.*]] = add nsw i32 [[TMP4]], 20
59; UNROLL-NOSIMPLIFY-NEXT:    store i32 [[TMP8]], ptr [[TMP2]], align 4
60; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE]]
61; UNROLL-NOSIMPLIFY:       pred.store.continue:
62; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP7]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
63; UNROLL-NOSIMPLIFY:       pred.store.if1:
64; UNROLL-NOSIMPLIFY-NEXT:    [[TMP9:%.*]] = add nsw i32 [[TMP5]], 20
65; UNROLL-NOSIMPLIFY-NEXT:    store i32 [[TMP9]], ptr [[TMP3]], align 4
66; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE2]]
67; UNROLL-NOSIMPLIFY:       pred.store.continue2:
68; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
69; UNROLL-NOSIMPLIFY-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
70; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
71; UNROLL-NOSIMPLIFY:       middle.block:
72; UNROLL-NOSIMPLIFY-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
73; UNROLL-NOSIMPLIFY:       scalar.ph:
74; UNROLL-NOSIMPLIFY-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 128, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
75; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_BODY:%.*]]
76; UNROLL-NOSIMPLIFY:       for.body:
77; UNROLL-NOSIMPLIFY-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
78; UNROLL-NOSIMPLIFY-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[INDVARS_IV]]
79; UNROLL-NOSIMPLIFY-NEXT:    [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
80; UNROLL-NOSIMPLIFY-NEXT:    [[CMP1:%.*]] = icmp sgt i32 [[TMP11]], 100
81; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]]
82; UNROLL-NOSIMPLIFY:       if.then:
83; UNROLL-NOSIMPLIFY-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP11]], 20
84; UNROLL-NOSIMPLIFY-NEXT:    store i32 [[ADD]], ptr [[ARRAYIDX]], align 4
85; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_INC]]
86; UNROLL-NOSIMPLIFY:       for.inc:
87; UNROLL-NOSIMPLIFY-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
88; UNROLL-NOSIMPLIFY-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 128
89; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
90; UNROLL-NOSIMPLIFY:       for.end:
91; UNROLL-NOSIMPLIFY-NEXT:    ret i32 0
92;
93; VEC-LABEL: @test(
94; VEC-NEXT:  entry:
95; VEC-NEXT:    br label [[VECTOR_BODY:%.*]]
96; VEC:       vector.body:
97; VEC-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
98; VEC-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
99; VEC-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[F:%.*]], i64 [[TMP0]]
100; VEC-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0
101; VEC-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP2]], align 4
102; VEC-NEXT:    [[TMP3:%.*]] = icmp sgt <2 x i32> [[WIDE_LOAD]], splat (i32 100)
103; VEC-NEXT:    [[TMP4:%.*]] = extractelement <2 x i1> [[TMP3]], i32 0
104; VEC-NEXT:    br i1 [[TMP4]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
105; VEC:       pred.store.if:
106; VEC-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[TMP0]]
107; VEC-NEXT:    [[TMP6:%.*]] = extractelement <2 x i32> [[WIDE_LOAD]], i32 0
108; VEC-NEXT:    [[TMP7:%.*]] = add nsw i32 [[TMP6]], 20
109; VEC-NEXT:    store i32 [[TMP7]], ptr [[TMP5]], align 4
110; VEC-NEXT:    br label [[PRED_STORE_CONTINUE]]
111; VEC:       pred.store.continue:
112; VEC-NEXT:    [[TMP8:%.*]] = extractelement <2 x i1> [[TMP3]], i32 1
113; VEC-NEXT:    br i1 [[TMP8]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
114; VEC:       pred.store.if1:
115; VEC-NEXT:    [[TMP9:%.*]] = add i64 [[INDEX]], 1
116; VEC-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[F]], i64 [[TMP9]]
117; VEC-NEXT:    [[TMP11:%.*]] = extractelement <2 x i32> [[WIDE_LOAD]], i32 1
118; VEC-NEXT:    [[TMP12:%.*]] = add nsw i32 [[TMP11]], 20
119; VEC-NEXT:    store i32 [[TMP12]], ptr [[TMP10]], align 4
120; VEC-NEXT:    br label [[PRED_STORE_CONTINUE2]]
121; VEC:       pred.store.continue2:
122; VEC-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
123; VEC-NEXT:    [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128
124; VEC-NEXT:    br i1 [[TMP13]], label [[FOR_END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
125; VEC:       for.end:
126; VEC-NEXT:    ret i32 0
127;
128entry:
129  br label %for.body
130
131
132
133for.body:
134  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
135  %arrayidx = getelementptr inbounds i32, ptr %f, i64 %indvars.iv
136  %0 = load i32, ptr %arrayidx, align 4
137  %cmp1 = icmp sgt i32 %0, 100
138  br i1 %cmp1, label %if.then, label %for.inc
139
140if.then:
141  %add = add nsw i32 %0, 20
142  store i32 %add, ptr %arrayidx, align 4
143  br label %for.inc
144
145for.inc:
146  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
147  %exitcond = icmp eq i64 %indvars.iv.next, 128
148  br i1 %exitcond, label %for.end, label %for.body
149
150for.end:
151  ret i32 0
152}
153
154; Track basic blocks when unrolling conditional blocks. This code used to assert
155; because we did not update the phi nodes with the proper predecessor in the
156; vectorized loop body.
157; PR18724
158
159define void @bug18724(i1 %cond, ptr %ptr, i1 %cond.2, i64 %v.1, i32 %v.2) {
160; UNROLL-LABEL: @bug18724(
161; UNROLL-NEXT:  entry:
162; UNROLL-NEXT:    [[TMP0:%.*]] = xor i1 [[COND:%.*]], true
163; UNROLL-NEXT:    call void @llvm.assume(i1 [[TMP0]])
164; UNROLL-NEXT:    [[TMP1:%.*]] = trunc i64 [[V_1:%.*]] to i32
165; UNROLL-NEXT:    [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP1]], i32 0)
166; UNROLL-NEXT:    [[TMP2:%.*]] = sub i32 [[SMAX]], [[TMP1]]
167; UNROLL-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
168; UNROLL-NEXT:    [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 1
169; UNROLL-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP4]], 2
170; UNROLL-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
171; UNROLL:       vector.ph:
172; UNROLL-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 2
173; UNROLL-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]]
174; UNROLL-NEXT:    [[IND_END:%.*]] = add i64 [[V_1]], [[N_VEC]]
175; UNROLL-NEXT:    [[TMP13:%.*]] = xor i1 [[COND_2:%.*]], true
176; UNROLL-NEXT:    br label [[VECTOR_BODY:%.*]]
177; UNROLL:       vector.body:
178; UNROLL-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE3:%.*]] ]
179; UNROLL-NEXT:    [[VEC_PHI:%.*]] = phi i32 [ [[V_2:%.*]], [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[PRED_STORE_CONTINUE3]] ]
180; UNROLL-NEXT:    [[VEC_PHI1:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[PREDPHI4:%.*]], [[PRED_STORE_CONTINUE3]] ]
181; UNROLL-NEXT:    [[OFFSET_IDX:%.*]] = add i64 [[V_1]], [[INDEX]]
182; UNROLL-NEXT:    [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 0
183; UNROLL-NEXT:    [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 1
184; UNROLL-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR:%.*]], i64 0, i64 [[TMP5]]
185; UNROLL-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR]], i64 0, i64 [[TMP6]]
186; UNROLL-NEXT:    [[TMP9:%.*]] = load i32, ptr [[TMP7]], align 4
187; UNROLL-NEXT:    [[TMP10:%.*]] = load i32, ptr [[TMP8]], align 4
188; UNROLL-NEXT:    br i1 [[COND_2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE3]]
189; UNROLL:       pred.store.if:
190; UNROLL-NEXT:    store i32 [[TMP9]], ptr [[TMP7]], align 4
191; UNROLL-NEXT:    store i32 [[TMP10]], ptr [[TMP8]], align 4
192; UNROLL-NEXT:    br label [[PRED_STORE_CONTINUE3]]
193; UNROLL:       pred.store.continue3:
194; UNROLL-NEXT:    [[TMP11:%.*]] = add i32 [[VEC_PHI]], 1
195; UNROLL-NEXT:    [[TMP12:%.*]] = add i32 [[VEC_PHI1]], 1
196; UNROLL-NEXT:    [[PREDPHI]] = select i1 [[TMP13]], i32 [[VEC_PHI]], i32 [[TMP11]]
197; UNROLL-NEXT:    [[PREDPHI4]] = select i1 [[TMP13]], i32 [[VEC_PHI1]], i32 [[TMP12]]
198; UNROLL-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
199; UNROLL-NEXT:    [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
200; UNROLL-NEXT:    br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
201; UNROLL:       middle.block:
202; UNROLL-NEXT:    [[BIN_RDX:%.*]] = add i32 [[PREDPHI4]], [[PREDPHI]]
203; UNROLL-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]]
204; UNROLL-NEXT:    [[TMP16:%.*]] = xor i1 [[CMP_N]], true
205; UNROLL-NEXT:    call void @llvm.assume(i1 [[TMP16]])
206; UNROLL-NEXT:    br label [[SCALAR_PH]]
207; UNROLL:       scalar.ph:
208; UNROLL-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[V_1]], [[ENTRY:%.*]] ]
209; UNROLL-NEXT:    [[BC_MERGE_RDX:%.*]] = phi i32 [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ], [ [[V_2]], [[ENTRY]] ]
210; UNROLL-NEXT:    br label [[FOR_BODY14:%.*]]
211; UNROLL:       for.body14:
212; UNROLL-NEXT:    [[INDVARS_IV3:%.*]] = phi i64 [ [[INDVARS_IV_NEXT4:%.*]], [[FOR_INC23:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
213; UNROLL-NEXT:    [[INEWCHUNKS_120:%.*]] = phi i32 [ [[INEWCHUNKS_2:%.*]], [[FOR_INC23]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
214; UNROLL-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR]], i64 0, i64 [[INDVARS_IV3]]
215; UNROLL-NEXT:    [[TMP:%.*]] = load i32, ptr [[ARRAYIDX16]], align 4
216; UNROLL-NEXT:    br i1 [[COND_2]], label [[IF_THEN18:%.*]], label [[FOR_INC23]]
217; UNROLL:       if.then18:
218; UNROLL-NEXT:    store i32 [[TMP]], ptr [[ARRAYIDX16]], align 4
219; UNROLL-NEXT:    [[INC21:%.*]] = add nsw i32 [[INEWCHUNKS_120]], 1
220; UNROLL-NEXT:    br label [[FOR_INC23]]
221; UNROLL:       for.inc23:
222; UNROLL-NEXT:    [[INEWCHUNKS_2]] = phi i32 [ [[INC21]], [[IF_THEN18]] ], [ [[INEWCHUNKS_120]], [[FOR_BODY14]] ]
223; UNROLL-NEXT:    [[INDVARS_IV_NEXT4]] = add nsw i64 [[INDVARS_IV3]], 1
224; UNROLL-NEXT:    [[TMP1:%.*]] = trunc i64 [[INDVARS_IV3]] to i32
225; UNROLL-NEXT:    [[CMP13:%.*]] = icmp slt i32 [[TMP1]], 0
226; UNROLL-NEXT:    call void @llvm.assume(i1 [[CMP13]])
227; UNROLL-NEXT:    br label [[FOR_BODY14]]
228;
229; UNROLL-NOSIMPLIFY-LABEL: @bug18724(
230; UNROLL-NOSIMPLIFY-NEXT:  entry:
231; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_BODY9:%.*]]
232; UNROLL-NOSIMPLIFY:       for.body9:
233; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[COND:%.*]], label [[FOR_INC26:%.*]], label [[FOR_BODY14_PREHEADER:%.*]]
234; UNROLL-NOSIMPLIFY:       for.body14.preheader:
235; UNROLL-NOSIMPLIFY-NEXT:    [[TMP0:%.*]] = trunc i64 [[V_1:%.*]] to i32
236; UNROLL-NOSIMPLIFY-NEXT:    [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP0]], i32 0)
237; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = sub i32 [[SMAX]], [[TMP0]]
238; UNROLL-NOSIMPLIFY-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
239; UNROLL-NOSIMPLIFY-NEXT:    [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1
240; UNROLL-NOSIMPLIFY-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 2
241; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
242; UNROLL-NOSIMPLIFY:       vector.ph:
243; UNROLL-NOSIMPLIFY-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 2
244; UNROLL-NOSIMPLIFY-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
245; UNROLL-NOSIMPLIFY-NEXT:    [[IND_END:%.*]] = add i64 [[V_1]], [[N_VEC]]
246; UNROLL-NOSIMPLIFY-NEXT:    [[TMP12:%.*]] = xor i1 [[COND_2:%.*]], true
247; UNROLL-NOSIMPLIFY-NEXT:    br label [[VECTOR_BODY:%.*]]
248; UNROLL-NOSIMPLIFY:       vector.body:
249; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE3:%.*]] ]
250; UNROLL-NOSIMPLIFY-NEXT:    [[VEC_PHI:%.*]] = phi i32 [ [[V_2:%.*]], [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[PRED_STORE_CONTINUE3]] ]
251; UNROLL-NOSIMPLIFY-NEXT:    [[VEC_PHI1:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[PREDPHI4:%.*]], [[PRED_STORE_CONTINUE3]] ]
252; UNROLL-NOSIMPLIFY-NEXT:    [[OFFSET_IDX:%.*]] = add i64 [[V_1]], [[INDEX]]
253; UNROLL-NOSIMPLIFY-NEXT:    [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 0
254; UNROLL-NOSIMPLIFY-NEXT:    [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 1
255; UNROLL-NOSIMPLIFY-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR:%.*]], i64 0, i64 [[TMP4]]
256; UNROLL-NOSIMPLIFY-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR]], i64 0, i64 [[TMP5]]
257; UNROLL-NOSIMPLIFY-NEXT:    [[TMP8:%.*]] = load i32, ptr [[TMP6]], align 4
258; UNROLL-NOSIMPLIFY-NEXT:    [[TMP9:%.*]] = load i32, ptr [[TMP7]], align 4
259; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[COND_2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
260; UNROLL-NOSIMPLIFY:       pred.store.if:
261; UNROLL-NOSIMPLIFY-NEXT:    store i32 [[TMP8]], ptr [[TMP6]], align 4
262; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE]]
263; UNROLL-NOSIMPLIFY:       pred.store.continue:
264; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[COND_2]], label [[PRED_STORE_IF2:%.*]], label [[PRED_STORE_CONTINUE3]]
265; UNROLL-NOSIMPLIFY:       pred.store.if2:
266; UNROLL-NOSIMPLIFY-NEXT:    store i32 [[TMP9]], ptr [[TMP7]], align 4
267; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE3]]
268; UNROLL-NOSIMPLIFY:       pred.store.continue3:
269; UNROLL-NOSIMPLIFY-NEXT:    [[TMP10:%.*]] = add i32 [[VEC_PHI]], 1
270; UNROLL-NOSIMPLIFY-NEXT:    [[TMP11:%.*]] = add i32 [[VEC_PHI1]], 1
271; UNROLL-NOSIMPLIFY-NEXT:    [[PREDPHI]] = select i1 [[TMP12]], i32 [[VEC_PHI]], i32 [[TMP10]]
272; UNROLL-NOSIMPLIFY-NEXT:    [[PREDPHI4]] = select i1 [[TMP12]], i32 [[VEC_PHI1]], i32 [[TMP11]]
273; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
274; UNROLL-NOSIMPLIFY-NEXT:    [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
275; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
276; UNROLL-NOSIMPLIFY:       middle.block:
277; UNROLL-NOSIMPLIFY-NEXT:    [[BIN_RDX:%.*]] = add i32 [[PREDPHI4]], [[PREDPHI]]
278; UNROLL-NOSIMPLIFY-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
279; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[CMP_N]], label [[FOR_INC26_LOOPEXIT:%.*]], label [[SCALAR_PH]]
280; UNROLL-NOSIMPLIFY:       scalar.ph:
281; UNROLL-NOSIMPLIFY-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[V_1]], [[FOR_BODY14_PREHEADER]] ]
282; UNROLL-NOSIMPLIFY-NEXT:    [[BC_MERGE_RDX:%.*]] = phi i32 [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ], [ [[V_2]], [[FOR_BODY14_PREHEADER]] ]
283; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_BODY14:%.*]]
284; UNROLL-NOSIMPLIFY:       for.body14:
285; UNROLL-NOSIMPLIFY-NEXT:    [[INDVARS_IV3:%.*]] = phi i64 [ [[INDVARS_IV_NEXT4:%.*]], [[FOR_INC23:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
286; UNROLL-NOSIMPLIFY-NEXT:    [[INEWCHUNKS_120:%.*]] = phi i32 [ [[INEWCHUNKS_2:%.*]], [[FOR_INC23]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
287; UNROLL-NOSIMPLIFY-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR]], i64 0, i64 [[INDVARS_IV3]]
288; UNROLL-NOSIMPLIFY-NEXT:    [[TMP:%.*]] = load i32, ptr [[ARRAYIDX16]], align 4
289; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[COND_2]], label [[IF_THEN18:%.*]], label [[FOR_INC23]]
290; UNROLL-NOSIMPLIFY:       if.then18:
291; UNROLL-NOSIMPLIFY-NEXT:    store i32 [[TMP]], ptr [[ARRAYIDX16]], align 4
292; UNROLL-NOSIMPLIFY-NEXT:    [[INC21:%.*]] = add nsw i32 [[INEWCHUNKS_120]], 1
293; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_INC23]]
294; UNROLL-NOSIMPLIFY:       for.inc23:
295; UNROLL-NOSIMPLIFY-NEXT:    [[INEWCHUNKS_2]] = phi i32 [ [[INC21]], [[IF_THEN18]] ], [ [[INEWCHUNKS_120]], [[FOR_BODY14]] ]
296; UNROLL-NOSIMPLIFY-NEXT:    [[INDVARS_IV_NEXT4]] = add nsw i64 [[INDVARS_IV3]], 1
297; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = trunc i64 [[INDVARS_IV3]] to i32
298; UNROLL-NOSIMPLIFY-NEXT:    [[CMP13:%.*]] = icmp slt i32 [[TMP1]], 0
299; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[CMP13]], label [[FOR_BODY14]], label [[FOR_INC26_LOOPEXIT]], !llvm.loop [[LOOP5:![0-9]+]]
300; UNROLL-NOSIMPLIFY:       for.inc26.loopexit:
301; UNROLL-NOSIMPLIFY-NEXT:    [[INEWCHUNKS_2_LCSSA:%.*]] = phi i32 [ [[INEWCHUNKS_2]], [[FOR_INC23]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ]
302; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_INC26]]
303; UNROLL-NOSIMPLIFY:       for.inc26:
304; UNROLL-NOSIMPLIFY-NEXT:    [[INEWCHUNKS_1_LCSSA:%.*]] = phi i32 [ undef, [[FOR_BODY9]] ], [ [[INEWCHUNKS_2_LCSSA]], [[FOR_INC26_LOOPEXIT]] ]
305; UNROLL-NOSIMPLIFY-NEXT:    unreachable
306;
307; VEC-LABEL: @bug18724(
308; VEC-NEXT:  entry:
309; VEC-NEXT:    [[TMP0:%.*]] = xor i1 [[COND:%.*]], true
310; VEC-NEXT:    call void @llvm.assume(i1 [[TMP0]])
311; VEC-NEXT:    [[TMP1:%.*]] = trunc i64 [[V_1:%.*]] to i32
312; VEC-NEXT:    [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[TMP1]], i32 0)
313; VEC-NEXT:    [[TMP2:%.*]] = sub i32 [[SMAX]], [[TMP1]]
314; VEC-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
315; VEC-NEXT:    [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 1
316; VEC-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP4]], 2
317; VEC-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
318; VEC:       vector.ph:
319; VEC-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 2
320; VEC-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]]
321; VEC-NEXT:    [[IND_END:%.*]] = add i64 [[V_1]], [[N_VEC]]
322; VEC-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[COND_2:%.*]], i64 0
323; VEC-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer
324; VEC-NEXT:    [[TMP17:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
325; VEC-NEXT:    [[TMP5:%.*]] = insertelement <2 x i32> zeroinitializer, i32 [[V_2:%.*]], i32 0
326; VEC-NEXT:    br label [[VECTOR_BODY:%.*]]
327; VEC:       vector.body:
328; VEC-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
329; VEC-NEXT:    [[VEC_PHI:%.*]] = phi <2 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[PRED_STORE_CONTINUE2]] ]
330; VEC-NEXT:    [[OFFSET_IDX:%.*]] = add i64 [[V_1]], [[INDEX]]
331; VEC-NEXT:    [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 0
332; VEC-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR:%.*]], i64 0, i64 [[TMP6]]
333; VEC-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i32 0
334; VEC-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP8]], align 4
335; VEC-NEXT:    br i1 [[COND_2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE2]]
336; VEC:       pred.store.if:
337; VEC-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR]], i64 0, i64 [[TMP6]]
338; VEC-NEXT:    [[TMP11:%.*]] = extractelement <2 x i32> [[WIDE_LOAD]], i32 0
339; VEC-NEXT:    store i32 [[TMP11]], ptr [[TMP10]], align 4
340; VEC-NEXT:    [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], 1
341; VEC-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR]], i64 0, i64 [[TMP13]]
342; VEC-NEXT:    [[TMP15:%.*]] = extractelement <2 x i32> [[WIDE_LOAD]], i32 1
343; VEC-NEXT:    store i32 [[TMP15]], ptr [[TMP14]], align 4
344; VEC-NEXT:    br label [[PRED_STORE_CONTINUE2]]
345; VEC:       pred.store.continue2:
346; VEC-NEXT:    [[TMP16:%.*]] = add <2 x i32> [[VEC_PHI]], splat (i32 1)
347; VEC-NEXT:    [[PREDPHI]] = select <2 x i1> [[TMP17]], <2 x i32> [[VEC_PHI]], <2 x i32> [[TMP16]]
348; VEC-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
349; VEC-NEXT:    [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
350; VEC-NEXT:    br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
351; VEC:       middle.block:
352; VEC-NEXT:    [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[PREDPHI]])
353; VEC-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]]
354; VEC-NEXT:    [[TMP20:%.*]] = xor i1 [[CMP_N]], true
355; VEC-NEXT:    call void @llvm.assume(i1 [[TMP20]])
356; VEC-NEXT:    br label [[SCALAR_PH]]
357; VEC:       scalar.ph:
358; VEC-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[V_1]], [[ENTRY:%.*]] ]
359; VEC-NEXT:    [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP19]], [[MIDDLE_BLOCK]] ], [ [[V_2]], [[ENTRY]] ]
360; VEC-NEXT:    br label [[FOR_BODY14:%.*]]
361; VEC:       for.body14:
362; VEC-NEXT:    [[INDVARS_IV3:%.*]] = phi i64 [ [[INDVARS_IV_NEXT4:%.*]], [[FOR_INC23:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
363; VEC-NEXT:    [[INEWCHUNKS_120:%.*]] = phi i32 [ [[INEWCHUNKS_2:%.*]], [[FOR_INC23]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
364; VEC-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR]], i64 0, i64 [[INDVARS_IV3]]
365; VEC-NEXT:    [[TMP:%.*]] = load i32, ptr [[ARRAYIDX16]], align 4
366; VEC-NEXT:    br i1 [[COND_2]], label [[IF_THEN18:%.*]], label [[FOR_INC23]]
367; VEC:       if.then18:
368; VEC-NEXT:    store i32 [[TMP]], ptr [[ARRAYIDX16]], align 4
369; VEC-NEXT:    [[INC21:%.*]] = add nsw i32 [[INEWCHUNKS_120]], 1
370; VEC-NEXT:    br label [[FOR_INC23]]
371; VEC:       for.inc23:
372; VEC-NEXT:    [[INEWCHUNKS_2]] = phi i32 [ [[INC21]], [[IF_THEN18]] ], [ [[INEWCHUNKS_120]], [[FOR_BODY14]] ]
373; VEC-NEXT:    [[INDVARS_IV_NEXT4]] = add nsw i64 [[INDVARS_IV3]], 1
374; VEC-NEXT:    [[TMP1:%.*]] = trunc i64 [[INDVARS_IV3]] to i32
375; VEC-NEXT:    [[CMP13:%.*]] = icmp slt i32 [[TMP1]], 0
376; VEC-NEXT:    call void @llvm.assume(i1 [[CMP13]])
377; VEC-NEXT:    br label [[FOR_BODY14]]
378;
379entry:
380  br label %for.body9
381
382for.body9:
383  br i1 %cond, label %for.inc26, label %for.body14
384
385for.body14:
386  %indvars.iv3 = phi i64 [ %indvars.iv.next4, %for.inc23 ], [ %v.1, %for.body9 ]
387  %iNewChunks.120 = phi i32 [ %iNewChunks.2, %for.inc23 ], [ %v.2, %for.body9 ]
388  %arrayidx16 = getelementptr inbounds [768 x i32], ptr %ptr, i64 0, i64 %indvars.iv3
389  %tmp = load i32, ptr %arrayidx16, align 4
390  br i1 %cond.2, label %if.then18, label %for.inc23
391
392if.then18:
393  store i32 %tmp, ptr %arrayidx16, align 4
394  %inc21 = add nsw i32 %iNewChunks.120, 1
395  br label %for.inc23
396
397for.inc23:
398  %iNewChunks.2 = phi i32 [ %inc21, %if.then18 ], [ %iNewChunks.120, %for.body14 ]
399  %indvars.iv.next4 = add nsw i64 %indvars.iv3, 1
400  %tmp1 = trunc i64 %indvars.iv3 to i32
401  %cmp13 = icmp slt i32 %tmp1, 0
402  br i1 %cmp13, label %for.body14, label %for.inc26
403
404for.inc26:
405  %iNewChunks.1.lcssa = phi i32 [ undef, %for.body9 ], [ %iNewChunks.2, %for.inc23 ]
406  unreachable
407}
408
409; In the test below, it's more profitable for the expression feeding the
410; conditional store to remain scalar. Since we can only type-shrink vector
411; types, we shouldn't try to represent the expression in a smaller type.
412;
413define void @minimal_bit_widths(i1 %c) {
414; UNROLL-LABEL: @minimal_bit_widths(
415; UNROLL-NEXT:  entry:
416; UNROLL-NEXT:    br label [[VECTOR_BODY:%.*]]
417; UNROLL:       vector.body:
418; UNROLL-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
419; UNROLL-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
420; UNROLL-NEXT:    [[TMP1:%.*]] = add i64 [[INDEX]], 1
421; UNROLL-NEXT:    [[TMP2:%.*]] = getelementptr i8, ptr undef, i64 [[TMP0]]
422; UNROLL-NEXT:    [[TMP3:%.*]] = getelementptr i8, ptr undef, i64 [[TMP1]]
423; UNROLL-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP2]], align 1
424; UNROLL-NEXT:    [[TMP5:%.*]] = load i8, ptr [[TMP3]], align 1
425; UNROLL-NEXT:    br i1 [[C:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE2]]
426; UNROLL:       pred.store.if:
427; UNROLL-NEXT:    store i8 [[TMP4]], ptr [[TMP2]], align 1
428; UNROLL-NEXT:    store i8 [[TMP5]], ptr [[TMP3]], align 1
429; UNROLL-NEXT:    br label [[PRED_STORE_CONTINUE2]]
430; UNROLL:       pred.store.continue2:
431; UNROLL-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
432; UNROLL-NEXT:    [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
433; UNROLL-NEXT:    br i1 [[TMP6]], label [[FOR_END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
434; UNROLL:       for.end:
435; UNROLL-NEXT:    ret void
436;
437; UNROLL-NOSIMPLIFY-LABEL: @minimal_bit_widths(
438; UNROLL-NOSIMPLIFY-NEXT:  entry:
439; UNROLL-NOSIMPLIFY-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
440; UNROLL-NOSIMPLIFY:       vector.ph:
441; UNROLL-NOSIMPLIFY-NEXT:    br label [[VECTOR_BODY:%.*]]
442; UNROLL-NOSIMPLIFY:       vector.body:
443; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
444; UNROLL-NOSIMPLIFY-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
445; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = add i64 [[INDEX]], 1
446; UNROLL-NOSIMPLIFY-NEXT:    [[TMP2:%.*]] = getelementptr i8, ptr undef, i64 [[TMP0]]
447; UNROLL-NOSIMPLIFY-NEXT:    [[TMP3:%.*]] = getelementptr i8, ptr undef, i64 [[TMP1]]
448; UNROLL-NOSIMPLIFY-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP2]], align 1
449; UNROLL-NOSIMPLIFY-NEXT:    [[TMP5:%.*]] = load i8, ptr [[TMP3]], align 1
450; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[C:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
451; UNROLL-NOSIMPLIFY:       pred.store.if:
452; UNROLL-NOSIMPLIFY-NEXT:    store i8 [[TMP4]], ptr [[TMP2]], align 1
453; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE]]
454; UNROLL-NOSIMPLIFY:       pred.store.continue:
455; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[C]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
456; UNROLL-NOSIMPLIFY:       pred.store.if1:
457; UNROLL-NOSIMPLIFY-NEXT:    store i8 [[TMP5]], ptr [[TMP3]], align 1
458; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE2]]
459; UNROLL-NOSIMPLIFY:       pred.store.continue2:
460; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
461; UNROLL-NOSIMPLIFY-NEXT:    [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
462; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
463; UNROLL-NOSIMPLIFY:       middle.block:
464; UNROLL-NOSIMPLIFY-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
465; UNROLL-NOSIMPLIFY:       scalar.ph:
466; UNROLL-NOSIMPLIFY-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 1000, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
467; UNROLL-NOSIMPLIFY-NEXT:    [[BC_RESUME_VAL3:%.*]] = phi i64 [ 0, [[MIDDLE_BLOCK]] ], [ 1000, [[ENTRY]] ]
468; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_BODY:%.*]]
469; UNROLL-NOSIMPLIFY:       for.body:
470; UNROLL-NOSIMPLIFY-NEXT:    [[TMP0:%.*]] = phi i64 [ [[TMP6:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
471; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ]
472; UNROLL-NOSIMPLIFY-NEXT:    [[TMP2:%.*]] = getelementptr i8, ptr undef, i64 [[TMP0]]
473; UNROLL-NOSIMPLIFY-NEXT:    [[TMP3:%.*]] = load i8, ptr [[TMP2]], align 1
474; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]]
475; UNROLL-NOSIMPLIFY:       if.then:
476; UNROLL-NOSIMPLIFY-NEXT:    [[TMP4:%.*]] = zext i8 [[TMP3]] to i32
477; UNROLL-NOSIMPLIFY-NEXT:    [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8
478; UNROLL-NOSIMPLIFY-NEXT:    store i8 [[TMP5]], ptr [[TMP2]], align 1
479; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_INC]]
480; UNROLL-NOSIMPLIFY:       for.inc:
481; UNROLL-NOSIMPLIFY-NEXT:    [[TMP6]] = add nuw nsw i64 [[TMP0]], 1
482; UNROLL-NOSIMPLIFY-NEXT:    [[TMP7]] = add i64 [[TMP1]], -1
483; UNROLL-NOSIMPLIFY-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0
484; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP8]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
485; UNROLL-NOSIMPLIFY:       for.end:
486; UNROLL-NOSIMPLIFY-NEXT:    ret void
487;
488; VEC-LABEL: @minimal_bit_widths(
489; VEC-NEXT:  entry:
490; VEC-NEXT:    br label [[VECTOR_BODY:%.*]]
491; VEC:       vector.body:
492; VEC-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
493; VEC-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
494; VEC-NEXT:    [[TMP1:%.*]] = getelementptr i8, ptr undef, i64 [[TMP0]]
495; VEC-NEXT:    [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i32 0
496; VEC-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x i8>, ptr [[TMP2]], align 1
497; VEC-NEXT:    br i1 [[C:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE2]]
498; VEC:       pred.store.if:
499; VEC-NEXT:    [[TMP4:%.*]] = getelementptr i8, ptr undef, i64 [[TMP0]]
500; VEC-NEXT:    [[TMP5:%.*]] = extractelement <2 x i8> [[WIDE_LOAD]], i32 0
501; VEC-NEXT:    store i8 [[TMP5]], ptr [[TMP4]], align 1
502; VEC-NEXT:    [[TMP7:%.*]] = add i64 [[INDEX]], 1
503; VEC-NEXT:    [[TMP8:%.*]] = getelementptr i8, ptr undef, i64 [[TMP7]]
504; VEC-NEXT:    [[TMP9:%.*]] = extractelement <2 x i8> [[WIDE_LOAD]], i32 1
505; VEC-NEXT:    store i8 [[TMP9]], ptr [[TMP8]], align 1
506; VEC-NEXT:    br label [[PRED_STORE_CONTINUE2]]
507; VEC:       pred.store.continue2:
508; VEC-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
509; VEC-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
510; VEC-NEXT:    br i1 [[TMP10]], label [[FOR_END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
511; VEC:       for.end:
512; VEC-NEXT:    ret void
513;
514entry:
515  br label %for.body
516
517for.body:
518  %tmp0 = phi i64 [ %tmp6, %for.inc ], [ 0, %entry ]
519  %tmp1 = phi i64 [ %tmp7, %for.inc ], [ 1000, %entry ]
520  %tmp2 = getelementptr i8, ptr undef, i64 %tmp0
521  %tmp3 = load i8, ptr %tmp2, align 1
522  br i1 %c, label %if.then, label %for.inc
523
524if.then:
525  %tmp4 = zext i8 %tmp3 to i32
526  %tmp5 = trunc i32 %tmp4 to i8
527  store i8 %tmp5, ptr %tmp2, align 1
528  br label %for.inc
529
530for.inc:
531  %tmp6 = add nuw nsw i64 %tmp0, 1
532  %tmp7 = add i64 %tmp1, -1
533  %tmp8 = icmp eq i64 %tmp7, 0
534  br i1 %tmp8, label %for.end, label %for.body
535
536for.end:
537  ret void
538}
539
540define void @minimal_bit_widths_with_aliasing_store(i1 %c, ptr %ptr) {
541; UNROLL-LABEL: @minimal_bit_widths_with_aliasing_store(
542; UNROLL-NEXT:  entry:
543; UNROLL-NEXT:    br label [[FOR_BODY:%.*]]
544; UNROLL:       vector.body:
545; UNROLL-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[FOR_INC:%.*]] ]
546; UNROLL-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
547; UNROLL-NEXT:    [[TMP1:%.*]] = add i64 [[INDEX]], 1
548; UNROLL-NEXT:    [[TMP2:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i64 [[TMP0]]
549; UNROLL-NEXT:    [[TMP4:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP1]]
550; UNROLL-NEXT:    [[TMP3:%.*]] = load i8, ptr [[TMP2]], align 1
551; UNROLL-NEXT:    [[TMP5:%.*]] = load i8, ptr [[TMP4]], align 1
552; UNROLL-NEXT:    store i8 0, ptr [[TMP2]], align 1
553; UNROLL-NEXT:    store i8 0, ptr [[TMP4]], align 1
554; UNROLL-NEXT:    br i1 [[C:%.*]], label [[IF_THEN:%.*]], label [[FOR_INC]]
555; UNROLL:       pred.store.if:
556; UNROLL-NEXT:    store i8 [[TMP3]], ptr [[TMP2]], align 1
557; UNROLL-NEXT:    store i8 [[TMP5]], ptr [[TMP4]], align 1
558; UNROLL-NEXT:    br label [[FOR_INC]]
559; UNROLL:       pred.store.continue2:
560; UNROLL-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
561; UNROLL-NEXT:    [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
562; UNROLL-NEXT:    br i1 [[TMP6]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
563; UNROLL:       for.end:
564; UNROLL-NEXT:    ret void
565;
566; UNROLL-NOSIMPLIFY-LABEL: @minimal_bit_widths_with_aliasing_store(
567; UNROLL-NOSIMPLIFY-NEXT:  entry:
568; UNROLL-NOSIMPLIFY-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
569; UNROLL-NOSIMPLIFY:       vector.ph:
570; UNROLL-NOSIMPLIFY-NEXT:    br label [[VECTOR_BODY:%.*]]
571; UNROLL-NOSIMPLIFY:       vector.body:
572; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
573; UNROLL-NOSIMPLIFY-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
574; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = add i64 [[INDEX]], 1
575; UNROLL-NOSIMPLIFY-NEXT:    [[TMP2:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i64 [[TMP0]]
576; UNROLL-NOSIMPLIFY-NEXT:    [[TMP3:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP1]]
577; UNROLL-NOSIMPLIFY-NEXT:    [[TMP4:%.*]] = load i8, ptr [[TMP2]], align 1
578; UNROLL-NOSIMPLIFY-NEXT:    [[TMP5:%.*]] = load i8, ptr [[TMP3]], align 1
579; UNROLL-NOSIMPLIFY-NEXT:    store i8 0, ptr [[TMP2]], align 1
580; UNROLL-NOSIMPLIFY-NEXT:    store i8 0, ptr [[TMP3]], align 1
581; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[C:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
582; UNROLL-NOSIMPLIFY:       pred.store.if:
583; UNROLL-NOSIMPLIFY-NEXT:    store i8 [[TMP4]], ptr [[TMP2]], align 1
584; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE]]
585; UNROLL-NOSIMPLIFY:       pred.store.continue:
586; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[C]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
587; UNROLL-NOSIMPLIFY:       pred.store.if1:
588; UNROLL-NOSIMPLIFY-NEXT:    store i8 [[TMP5]], ptr [[TMP3]], align 1
589; UNROLL-NOSIMPLIFY-NEXT:    br label [[PRED_STORE_CONTINUE2]]
590; UNROLL-NOSIMPLIFY:       pred.store.continue2:
591; UNROLL-NOSIMPLIFY-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
592; UNROLL-NOSIMPLIFY-NEXT:    [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
593; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
594; UNROLL-NOSIMPLIFY:       middle.block:
595; UNROLL-NOSIMPLIFY-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
596; UNROLL-NOSIMPLIFY:       scalar.ph:
597; UNROLL-NOSIMPLIFY-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ 1000, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
598; UNROLL-NOSIMPLIFY-NEXT:    [[BC_RESUME_VAL3:%.*]] = phi i64 [ 0, [[MIDDLE_BLOCK]] ], [ 1000, [[ENTRY]] ]
599; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_BODY:%.*]]
600; UNROLL-NOSIMPLIFY:       for.body:
601; UNROLL-NOSIMPLIFY-NEXT:    [[TMP0:%.*]] = phi i64 [ [[TMP6:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
602; UNROLL-NOSIMPLIFY-NEXT:    [[TMP1:%.*]] = phi i64 [ [[TMP7:%.*]], [[FOR_INC]] ], [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ]
603; UNROLL-NOSIMPLIFY-NEXT:    [[TMP2:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP0]]
604; UNROLL-NOSIMPLIFY-NEXT:    [[TMP3:%.*]] = load i8, ptr [[TMP2]], align 1
605; UNROLL-NOSIMPLIFY-NEXT:    store i8 0, ptr [[TMP2]], align 1
606; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[C]], label [[IF_THEN:%.*]], label [[FOR_INC]]
607; UNROLL-NOSIMPLIFY:       if.then:
608; UNROLL-NOSIMPLIFY-NEXT:    [[TMP4:%.*]] = zext i8 [[TMP3]] to i32
609; UNROLL-NOSIMPLIFY-NEXT:    [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8
610; UNROLL-NOSIMPLIFY-NEXT:    store i8 [[TMP5]], ptr [[TMP2]], align 1
611; UNROLL-NOSIMPLIFY-NEXT:    br label [[FOR_INC]]
612; UNROLL-NOSIMPLIFY:       for.inc:
613; UNROLL-NOSIMPLIFY-NEXT:    [[TMP6]] = add nuw nsw i64 [[TMP0]], 1
614; UNROLL-NOSIMPLIFY-NEXT:    [[TMP7]] = add i64 [[TMP1]], -1
615; UNROLL-NOSIMPLIFY-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0
616; UNROLL-NOSIMPLIFY-NEXT:    br i1 [[TMP8]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
617; UNROLL-NOSIMPLIFY:       for.end:
618; UNROLL-NOSIMPLIFY-NEXT:    ret void
619;
620; VEC-LABEL: @minimal_bit_widths_with_aliasing_store(
621; VEC-NEXT:  entry:
622; VEC-NEXT:    br label [[FOR_BODY:%.*]]
623; VEC:       vector.body:
624; VEC-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
625; VEC-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
626; VEC-NEXT:    [[TMP2:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i64 [[TMP0]]
627; VEC-NEXT:    [[TMP3:%.*]] = getelementptr i8, ptr [[TMP2]], i32 0
628; VEC-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x i8>, ptr [[TMP3]], align 1
629; VEC-NEXT:    store <2 x i8> zeroinitializer, ptr [[TMP3]], align 1
630; VEC-NEXT:    br i1 [[C:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE2]]
631; VEC:       pred.store.if:
632; VEC-NEXT:    [[TMP4:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP0]]
633; VEC-NEXT:    [[TMP5:%.*]] = extractelement <2 x i8> [[WIDE_LOAD]], i32 0
634; VEC-NEXT:    store i8 [[TMP5]], ptr [[TMP4]], align 1
635; VEC-NEXT:    [[TMP7:%.*]] = add i64 [[INDEX]], 1
636; VEC-NEXT:    [[TMP8:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[TMP7]]
637; VEC-NEXT:    [[TMP9:%.*]] = extractelement <2 x i8> [[WIDE_LOAD]], i32 1
638; VEC-NEXT:    store i8 [[TMP9]], ptr [[TMP8]], align 1
639; VEC-NEXT:    br label [[PRED_STORE_CONTINUE2]]
640; VEC:       pred.store.continue2:
641; VEC-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
642; VEC-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
643; VEC-NEXT:    br i1 [[TMP10]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
644; VEC:       for.end:
645; VEC-NEXT:    ret void
646;
647entry:
648  br label %for.body
649
650for.body:
651  %tmp0 = phi i64 [ %tmp6, %for.inc ], [ 0, %entry ]
652  %tmp1 = phi i64 [ %tmp7, %for.inc ], [ 1000, %entry ]
653  %tmp2 = getelementptr i8, ptr %ptr, i64 %tmp0
654  %tmp3 = load i8, ptr %tmp2, align 1
655  store i8 0, ptr %tmp2
656  br i1 %c, label %if.then, label %for.inc
657
658if.then:
659  %tmp4 = zext i8 %tmp3 to i32
660  %tmp5 = trunc i32 %tmp4 to i8
661  store i8 %tmp5, ptr %tmp2, align 1
662  br label %for.inc
663
664for.inc:
665  %tmp6 = add nuw nsw i64 %tmp0, 1
666  %tmp7 = add i64 %tmp1, -1
667  %tmp8 = icmp eq i64 %tmp7, 0
668  br i1 %tmp8, label %for.end, label %for.body
669
670for.end:
671  ret void
672}
673