xref: /llvm-project/llvm/test/Transforms/InstCombine/get-lowbitmask-upto-and-including-bit.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S | FileCheck %s
3
4declare void @use8(i8)
5
6; Basic test
7define i8 @t0(i8 %x) {
8; CHECK-LABEL: @t0(
9; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 7, [[X:%.*]]
10; CHECK-NEXT:    [[MASK:%.*]] = lshr i8 -1, [[TMP1]]
11; CHECK-NEXT:    ret i8 [[MASK]]
12;
13  %bitmask = shl i8 1, %x
14  %lowbitmask = add i8 %bitmask, -1
15  %mask = or i8 %lowbitmask, %bitmask
16  ret i8 %mask
17}
18
19; Same, but different bit width
20define i16 @t1(i16 %x) {
21; CHECK-LABEL: @t1(
22; CHECK-NEXT:    [[TMP1:%.*]] = sub i16 15, [[X:%.*]]
23; CHECK-NEXT:    [[MASK:%.*]] = lshr i16 -1, [[TMP1]]
24; CHECK-NEXT:    ret i16 [[MASK]]
25;
26  %bitmask = shl i16 1, %x
27  %lowbitmask = add i16 %bitmask, -1
28  %mask = or i16 %lowbitmask, %bitmask
29  ret i16 %mask
30}
31
32; Vectors
33define <2 x i8> @t2_vec(<2 x i8> %x) {
34; CHECK-LABEL: @t2_vec(
35; CHECK-NEXT:    [[TMP1:%.*]] = sub <2 x i8> splat (i8 7), [[X:%.*]]
36; CHECK-NEXT:    [[MASK:%.*]] = lshr <2 x i8> splat (i8 -1), [[TMP1]]
37; CHECK-NEXT:    ret <2 x i8> [[MASK]]
38;
39  %bitmask = shl <2 x i8> <i8 1, i8 1>, %x
40  %lowbitmask = add <2 x i8> %bitmask, <i8 -1, i8 -1>
41  %mask = or <2 x i8> %lowbitmask, %bitmask
42  ret <2 x i8> %mask
43}
44define <3 x i8> @t3_vec_poison0(<3 x i8> %x) {
45; CHECK-LABEL: @t3_vec_poison0(
46; CHECK-NEXT:    [[TMP1:%.*]] = sub <3 x i8> splat (i8 7), [[X:%.*]]
47; CHECK-NEXT:    [[MASK:%.*]] = lshr <3 x i8> splat (i8 -1), [[TMP1]]
48; CHECK-NEXT:    ret <3 x i8> [[MASK]]
49;
50  %bitmask = shl <3 x i8> <i8 1, i8 poison, i8 1>, %x
51  %lowbitmask = add <3 x i8> %bitmask, <i8 -1, i8 -1, i8 -1>
52  %mask = or <3 x i8> %lowbitmask, %bitmask
53  ret <3 x i8> %mask
54}
55define <3 x i8> @t4_vec_poison1(<3 x i8> %x) {
56; CHECK-LABEL: @t4_vec_poison1(
57; CHECK-NEXT:    [[TMP1:%.*]] = sub <3 x i8> splat (i8 7), [[X:%.*]]
58; CHECK-NEXT:    [[MASK:%.*]] = lshr <3 x i8> splat (i8 -1), [[TMP1]]
59; CHECK-NEXT:    ret <3 x i8> [[MASK]]
60;
61  %bitmask = shl <3 x i8> <i8 1, i8 1, i8 1>, %x
62  %lowbitmask = add <3 x i8> %bitmask, <i8 -1, i8 poison, i8 -1>
63  %mask = or <3 x i8> %lowbitmask, %bitmask
64  ret <3 x i8> %mask
65}
66define <3 x i8> @t5_vec_poison2(<3 x i8> %x) {
67; CHECK-LABEL: @t5_vec_poison2(
68; CHECK-NEXT:    [[TMP1:%.*]] = sub <3 x i8> splat (i8 7), [[X:%.*]]
69; CHECK-NEXT:    [[MASK:%.*]] = lshr <3 x i8> splat (i8 -1), [[TMP1]]
70; CHECK-NEXT:    ret <3 x i8> [[MASK]]
71;
72  %bitmask = shl <3 x i8> <i8 1, i8 1, i8 poison>, %x
73  %lowbitmask = add <3 x i8> %bitmask, <i8 -1, i8 poison, i8 -1>
74  %mask = or <3 x i8> %lowbitmask, %bitmask
75  ret <3 x i8> %mask
76}
77
78; One-use tests
79define i8 @t6_extrause0(i8 %x) {
80; CHECK-LABEL: @t6_extrause0(
81; CHECK-NEXT:    [[BITMASK:%.*]] = shl nuw i8 1, [[X:%.*]]
82; CHECK-NEXT:    call void @use8(i8 [[BITMASK]])
83; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 7, [[X]]
84; CHECK-NEXT:    [[MASK:%.*]] = lshr i8 -1, [[TMP1]]
85; CHECK-NEXT:    ret i8 [[MASK]]
86;
87  %bitmask = shl i8 1, %x
88  call void @use8(i8 %bitmask)
89  %lowbitmask = add i8 %bitmask, -1
90  %mask = or i8 %lowbitmask, %bitmask
91  ret i8 %mask
92}
93define i8 @t7_extrause1(i8 %x) {
94; CHECK-LABEL: @t7_extrause1(
95; CHECK-NEXT:    [[BITMASK:%.*]] = shl nuw i8 1, [[X:%.*]]
96; CHECK-NEXT:    [[LOWBITMASK:%.*]] = add i8 [[BITMASK]], -1
97; CHECK-NEXT:    call void @use8(i8 [[LOWBITMASK]])
98; CHECK-NEXT:    [[MASK:%.*]] = or i8 [[LOWBITMASK]], [[BITMASK]]
99; CHECK-NEXT:    ret i8 [[MASK]]
100;
101  %bitmask = shl i8 1, %x
102  %lowbitmask = add i8 %bitmask, -1
103  call void @use8(i8 %lowbitmask)
104  %mask = or i8 %lowbitmask, %bitmask
105  ret i8 %mask
106}
107define i8 @t8_extrause2(i8 %x) {
108; CHECK-LABEL: @t8_extrause2(
109; CHECK-NEXT:    [[BITMASK:%.*]] = shl nuw i8 1, [[X:%.*]]
110; CHECK-NEXT:    call void @use8(i8 [[BITMASK]])
111; CHECK-NEXT:    [[LOWBITMASK:%.*]] = add i8 [[BITMASK]], -1
112; CHECK-NEXT:    call void @use8(i8 [[LOWBITMASK]])
113; CHECK-NEXT:    [[MASK:%.*]] = or i8 [[LOWBITMASK]], [[BITMASK]]
114; CHECK-NEXT:    ret i8 [[MASK]]
115;
116  %bitmask = shl i8 1, %x
117  call void @use8(i8 %bitmask)
118  %lowbitmask = add i8 %bitmask, -1
119  call void @use8(i8 %lowbitmask)
120  %mask = or i8 %lowbitmask, %bitmask
121  ret i8 %mask
122}
123
124; Non-CSE'd test
125define i8 @t9_nocse(i8 %x) {
126; CHECK-LABEL: @t9_nocse(
127; CHECK-NEXT:    [[BITMASK1:%.*]] = shl nuw i8 1, [[X:%.*]]
128; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i8 -1, [[X]]
129; CHECK-NEXT:    [[LOWBITMASK:%.*]] = xor i8 [[NOTMASK]], -1
130; CHECK-NEXT:    [[MASK:%.*]] = or i8 [[BITMASK1]], [[LOWBITMASK]]
131; CHECK-NEXT:    ret i8 [[MASK]]
132;
133  %bitmask0 = shl i8 1, %x
134  %bitmask1 = shl i8 1, %x
135  %lowbitmask = add i8 %bitmask0, -1
136  %mask = or i8 %lowbitmask, %bitmask1
137  ret i8 %mask
138}
139
140; Non-CSE'd extra uses test
141define i8 @t10_nocse_extrause0(i8 %x) {
142; CHECK-LABEL: @t10_nocse_extrause0(
143; CHECK-NEXT:    [[BITMASK0:%.*]] = shl nuw i8 1, [[X:%.*]]
144; CHECK-NEXT:    call void @use8(i8 [[BITMASK0]])
145; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 7, [[X]]
146; CHECK-NEXT:    [[MASK:%.*]] = lshr i8 -1, [[TMP1]]
147; CHECK-NEXT:    ret i8 [[MASK]]
148;
149  %bitmask0 = shl i8 1, %x
150  call void @use8(i8 %bitmask0)
151  %bitmask1 = shl i8 1, %x
152  %lowbitmask = add i8 %bitmask0, -1
153  %mask = or i8 %lowbitmask, %bitmask1
154  ret i8 %mask
155}
156define i8 @t11_nocse_extrause1(i8 %x) {
157; CHECK-LABEL: @t11_nocse_extrause1(
158; CHECK-NEXT:    [[BITMASK1:%.*]] = shl nuw i8 1, [[X:%.*]]
159; CHECK-NEXT:    call void @use8(i8 [[BITMASK1]])
160; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i8 -1, [[X]]
161; CHECK-NEXT:    [[LOWBITMASK:%.*]] = xor i8 [[NOTMASK]], -1
162; CHECK-NEXT:    [[MASK:%.*]] = or i8 [[BITMASK1]], [[LOWBITMASK]]
163; CHECK-NEXT:    ret i8 [[MASK]]
164;
165  %bitmask0 = shl i8 1, %x
166  %bitmask1 = shl i8 1, %x
167  call void @use8(i8 %bitmask1)
168  %lowbitmask = add i8 %bitmask0, -1
169  %mask = or i8 %lowbitmask, %bitmask1
170  ret i8 %mask
171}
172define i8 @t12_nocse_extrause2(i8 %x) {
173; CHECK-LABEL: @t12_nocse_extrause2(
174; CHECK-NEXT:    [[BITMASK1:%.*]] = shl nuw i8 1, [[X:%.*]]
175; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i8 -1, [[X]]
176; CHECK-NEXT:    [[LOWBITMASK:%.*]] = xor i8 [[NOTMASK]], -1
177; CHECK-NEXT:    call void @use8(i8 [[LOWBITMASK]])
178; CHECK-NEXT:    [[MASK:%.*]] = or i8 [[BITMASK1]], [[LOWBITMASK]]
179; CHECK-NEXT:    ret i8 [[MASK]]
180;
181  %bitmask0 = shl i8 1, %x
182  %bitmask1 = shl i8 1, %x
183  %lowbitmask = add i8 %bitmask0, -1
184  call void @use8(i8 %lowbitmask)
185  %mask = or i8 %lowbitmask, %bitmask1
186  ret i8 %mask
187}
188define i8 @t13_nocse_extrause3(i8 %x) {
189; CHECK-LABEL: @t13_nocse_extrause3(
190; CHECK-NEXT:    [[BITMASK0:%.*]] = shl nuw i8 1, [[X:%.*]]
191; CHECK-NEXT:    call void @use8(i8 [[BITMASK0]])
192; CHECK-NEXT:    [[BITMASK1:%.*]] = shl nuw i8 1, [[X]]
193; CHECK-NEXT:    call void @use8(i8 [[BITMASK1]])
194; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 7, [[X]]
195; CHECK-NEXT:    [[MASK:%.*]] = lshr i8 -1, [[TMP1]]
196; CHECK-NEXT:    ret i8 [[MASK]]
197;
198  %bitmask0 = shl i8 1, %x
199  call void @use8(i8 %bitmask0)
200  %bitmask1 = shl i8 1, %x
201  call void @use8(i8 %bitmask1)
202  %lowbitmask = add i8 %bitmask0, -1
203  %mask = or i8 %lowbitmask, %bitmask1
204  ret i8 %mask
205}
206define i8 @t14_nocse_extrause4(i8 %x) {
207; CHECK-LABEL: @t14_nocse_extrause4(
208; CHECK-NEXT:    [[BITMASK0:%.*]] = shl nuw i8 1, [[X:%.*]]
209; CHECK-NEXT:    call void @use8(i8 [[BITMASK0]])
210; CHECK-NEXT:    [[LOWBITMASK:%.*]] = add i8 [[BITMASK0]], -1
211; CHECK-NEXT:    call void @use8(i8 [[LOWBITMASK]])
212; CHECK-NEXT:    [[TMP1:%.*]] = sub i8 7, [[X]]
213; CHECK-NEXT:    [[MASK:%.*]] = lshr i8 -1, [[TMP1]]
214; CHECK-NEXT:    ret i8 [[MASK]]
215;
216  %bitmask0 = shl i8 1, %x
217  call void @use8(i8 %bitmask0)
218  %bitmask1 = shl i8 1, %x
219  %lowbitmask = add i8 %bitmask0, -1
220  call void @use8(i8 %lowbitmask)
221  %mask = or i8 %lowbitmask, %bitmask1
222  ret i8 %mask
223}
224define i8 @t15_nocse_extrause5(i8 %x) {
225; CHECK-LABEL: @t15_nocse_extrause5(
226; CHECK-NEXT:    [[BITMASK1:%.*]] = shl nuw i8 1, [[X:%.*]]
227; CHECK-NEXT:    call void @use8(i8 [[BITMASK1]])
228; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i8 -1, [[X]]
229; CHECK-NEXT:    [[LOWBITMASK:%.*]] = xor i8 [[NOTMASK]], -1
230; CHECK-NEXT:    call void @use8(i8 [[LOWBITMASK]])
231; CHECK-NEXT:    [[MASK:%.*]] = or i8 [[BITMASK1]], [[LOWBITMASK]]
232; CHECK-NEXT:    ret i8 [[MASK]]
233;
234  %bitmask0 = shl i8 1, %x
235  %bitmask1 = shl i8 1, %x
236  call void @use8(i8 %bitmask1)
237  %lowbitmask = add i8 %bitmask0, -1
238  call void @use8(i8 %lowbitmask)
239  %mask = or i8 %lowbitmask, %bitmask1
240  ret i8 %mask
241}
242define i8 @t16_nocse_extrause6(i8 %x) {
243; CHECK-LABEL: @t16_nocse_extrause6(
244; CHECK-NEXT:    [[BITMASK0:%.*]] = shl nuw i8 1, [[X:%.*]]
245; CHECK-NEXT:    call void @use8(i8 [[BITMASK0]])
246; CHECK-NEXT:    [[BITMASK1:%.*]] = shl nuw i8 1, [[X]]
247; CHECK-NEXT:    call void @use8(i8 [[BITMASK1]])
248; CHECK-NEXT:    [[LOWBITMASK:%.*]] = add i8 [[BITMASK0]], -1
249; CHECK-NEXT:    call void @use8(i8 [[LOWBITMASK]])
250; CHECK-NEXT:    [[MASK:%.*]] = or i8 [[LOWBITMASK]], [[BITMASK1]]
251; CHECK-NEXT:    ret i8 [[MASK]]
252;
253  %bitmask0 = shl i8 1, %x
254  call void @use8(i8 %bitmask0)
255  %bitmask1 = shl i8 1, %x
256  call void @use8(i8 %bitmask1)
257  %lowbitmask = add i8 %bitmask0, -1
258  call void @use8(i8 %lowbitmask)
259  %mask = or i8 %lowbitmask, %bitmask1
260  ret i8 %mask
261}
262
263; Non-CSE'd test with mismatching X's.
264define i8 @t17_nocse_mismatching_x(i8 %x0, i8 %x1) {
265; CHECK-LABEL: @t17_nocse_mismatching_x(
266; CHECK-NEXT:    [[BITMASK1:%.*]] = shl nuw i8 1, [[X1:%.*]]
267; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i8 -1, [[X0:%.*]]
268; CHECK-NEXT:    [[LOWBITMASK:%.*]] = xor i8 [[NOTMASK]], -1
269; CHECK-NEXT:    [[MASK:%.*]] = or i8 [[BITMASK1]], [[LOWBITMASK]]
270; CHECK-NEXT:    ret i8 [[MASK]]
271;
272  %bitmask0 = shl i8 1, %x0
273  %bitmask1 = shl i8 1, %x1
274  %lowbitmask = add i8 %bitmask0, -1
275  %mask = or i8 %lowbitmask, %bitmask1
276  ret i8 %mask
277}
278