xref: /llvm-project/llvm/test/Transforms/PhaseOrdering/X86/pixel-splat.ll (revision 1157187496afbbb203b8ec7aa320769ec6eed8c4)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -O2                   -S < %s  | FileCheck %s
3; RUN: opt -passes="default<O2>" -S < %s  | FileCheck %s
4
5
6target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
7target triple = "x86_64-apple-macosx10.15.0"
8
9; https://llvm.org/PR49055
10;
11; void loop_or(const unsigned char* __restrict pIn, unsigned int* __restrict pOut, int s) {
12;   for (int i = 0; i < s; i++) {
13;     unsigned int pixelChar = pIn[i];
14;     unsigned int pixel = pixelChar | (pixelChar << 8) | (pixelChar << 16) | (255 << 24);
15;     pOut[i] = pixel;
16;   }
17; }
18;
19; We are looking for the shifts to get combined into mul along with vectorization.
20
21define void @loop_or(ptr noalias %pIn, ptr noalias %pOut, i32 %s) {
22; CHECK-LABEL: @loop_or(
23; CHECK-NEXT:  entry:
24; CHECK-NEXT:    [[CMP1:%.*]] = icmp sgt i32 [[S:%.*]], 0
25; CHECK-NEXT:    br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]]
26; CHECK:       for.body.preheader:
27; CHECK-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[S]] to i64
28; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[S]], 8
29; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[FOR_BODY_PREHEADER5:%.*]], label [[VECTOR_PH:%.*]]
30; CHECK:       vector.ph:
31; CHECK-NEXT:    [[N_VEC:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 2147483640
32; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
33; CHECK:       vector.body:
34; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
35; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[PIN:%.*]], i64 [[INDEX]]
36; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 4
37; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP0]], align 1
38; CHECK-NEXT:    [[WIDE_LOAD4:%.*]] = load <4 x i8>, ptr [[TMP1]], align 1
39; CHECK-NEXT:    [[TMP2:%.*]] = zext <4 x i8> [[WIDE_LOAD]] to <4 x i32>
40; CHECK-NEXT:    [[TMP3:%.*]] = zext <4 x i8> [[WIDE_LOAD4]] to <4 x i32>
41; CHECK-NEXT:    [[TMP4:%.*]] = mul nuw nsw <4 x i32> [[TMP2]], splat (i32 65793)
42; CHECK-NEXT:    [[TMP5:%.*]] = mul nuw nsw <4 x i32> [[TMP3]], splat (i32 65793)
43; CHECK-NEXT:    [[TMP6:%.*]] = or disjoint <4 x i32> [[TMP4]], splat (i32 -16777216)
44; CHECK-NEXT:    [[TMP7:%.*]] = or disjoint <4 x i32> [[TMP5]], splat (i32 -16777216)
45; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds nuw i32, ptr [[POUT:%.*]], i64 [[INDEX]]
46; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP8]], i64 16
47; CHECK-NEXT:    store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4
48; CHECK-NEXT:    store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4
49; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
50; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
51; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
52; CHECK:       middle.block:
53; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[WIDE_TRIP_COUNT]]
54; CHECK-NEXT:    br i1 [[CMP_N]], label [[FOR_END]], label [[FOR_BODY_PREHEADER5]]
55; CHECK:       for.body.preheader5:
56; CHECK-NEXT:    [[INDVARS_IV_PH:%.*]] = phi i64 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[N_VEC]], [[MIDDLE_BLOCK]] ]
57; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
58; CHECK:       for.body:
59; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[INDVARS_IV_PH]], [[FOR_BODY_PREHEADER5]] ]
60; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[PIN]], i64 [[INDVARS_IV]]
61; CHECK-NEXT:    [[TMP11:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
62; CHECK-NEXT:    [[CONV:%.*]] = zext i8 [[TMP11]] to i32
63; CHECK-NEXT:    [[OR2:%.*]] = mul nuw nsw i32 [[CONV]], 65793
64; CHECK-NEXT:    [[OR3:%.*]] = or disjoint i32 [[OR2]], -16777216
65; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw i32, ptr [[POUT]], i64 [[INDVARS_IV]]
66; CHECK-NEXT:    store i32 [[OR3]], ptr [[ARRAYIDX5]], align 4
67; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
68; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
69; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
70; CHECK:       for.end:
71; CHECK-NEXT:    ret void
72;
73entry:
74  br label %for.cond
75
76for.cond:
77  %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]
78  %cmp = icmp slt i32 %i.0, %s
79  br i1 %cmp, label %for.body, label %for.cond.cleanup
80
81for.cond.cleanup:
82  br label %for.end
83
84for.body:
85  %idxprom = sext i32 %i.0 to i64
86  %arrayidx = getelementptr inbounds i8, ptr %pIn, i64 %idxprom
87  %0 = load i8, ptr %arrayidx, align 1
88  %conv = zext i8 %0 to i32
89  %shl = shl i32 %conv, 8
90  %or = or i32 %conv, %shl
91  %shl1 = shl i32 %conv, 16
92  %or2 = or i32 %or, %shl1
93  %or3 = or i32 %or2, -16777216
94  %idxprom4 = sext i32 %i.0 to i64
95  %arrayidx5 = getelementptr inbounds i32, ptr %pOut, i64 %idxprom4
96  store i32 %or3, ptr %arrayidx5, align 4
97  br label %for.inc
98
99for.inc:
100  %inc = add nsw i32 %i.0, 1
101  br label %for.cond
102
103for.end:
104  ret void
105}
106