xref: /llvm-project/llvm/test/Transforms/LoopVectorize/bsd_regex.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -passes=loop-vectorize,dce,instcombine -force-vector-width=2 -force-vector-interleave=2 < %s | FileCheck %s
3
4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5
6;PR 15830.
7
8; When scalarizing stores we need to preserve the original order.
9; Make sure that we are extracting in the correct order (0101, and not 0011).
10
11define i32 @foo(ptr nocapture %A) {
12; CHECK-LABEL: @foo(
13; CHECK-NEXT:  entry:
14; CHECK-NEXT:    br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
15; CHECK:       vector.ph:
16; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
17; CHECK:       vector.body:
18; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
19; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <2 x i64> [ <i64 0, i64 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
20; CHECK-NEXT:    [[TMP0:%.*]] = shl nsw <2 x i64> [[VEC_IND]], splat (i64 2)
21; CHECK-NEXT:    [[STEP_ADD:%.*]] = shl <2 x i64> [[VEC_IND]], splat (i64 2)
22; CHECK-NEXT:    [[TMP1:%.*]] = add <2 x i64> [[STEP_ADD]], splat (i64 8)
23; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i64> [[TMP0]], i64 0
24; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP2]]
25; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[TMP0]], i64 1
26; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP4]]
27; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i64> [[TMP1]], i64 0
28; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP6]]
29; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <2 x i64> [[TMP1]], i64 1
30; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP8]]
31; CHECK-NEXT:    store i32 4, ptr [[TMP3]], align 4
32; CHECK-NEXT:    store i32 4, ptr [[TMP5]], align 4
33; CHECK-NEXT:    store i32 4, ptr [[TMP7]], align 4
34; CHECK-NEXT:    store i32 4, ptr [[TMP9]], align 4
35; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
36; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 4)
37; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000
38; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
39; CHECK:       middle.block:
40; CHECK-NEXT:    br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
41; CHECK:       scalar.ph:
42; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
43; CHECK:       for.body:
44; CHECK-NEXT:    br i1 poison, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
45; CHECK:       for.end:
46; CHECK-NEXT:    ret i32 undef
47;
48entry:
49  br label %for.body
50
51for.body:
52  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
53  %0 = shl nsw i64 %indvars.iv, 2
54  %arrayidx = getelementptr inbounds i32, ptr %A, i64 %0
55  store i32 4, ptr %arrayidx, align 4
56  %indvars.iv.next = add i64 %indvars.iv, 1
57  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
58  %exitcond = icmp eq i32 %lftr.wideiv, 10000
59  br i1 %exitcond, label %for.end, label %for.body
60
61for.end:
62  ret i32 undef
63}
64
65
66