xref: /llvm-project/llvm/test/Transforms/InstCombine/bit_floor.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S | FileCheck %s
3
4define i32 @bit_floor_32(i32 %x) {
5; CHECK-LABEL: @bit_floor_32(
6; CHECK-NEXT:    [[EQ0:%.*]] = icmp eq i32 [[X:%.*]], 0
7; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[X]], 1
8; CHECK-NEXT:    [[CTLZ:%.*]] = tail call range(i32 1, 33) i32 @llvm.ctlz.i32(i32 [[LSHR]], i1 false)
9; CHECK-NEXT:    [[SUB:%.*]] = sub nuw nsw i32 32, [[CTLZ]]
10; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i32 1, [[SUB]]
11; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[EQ0]], i32 0, i32 [[SHL]]
12; CHECK-NEXT:    ret i32 [[SEL]]
13;
14  %eq0 = icmp eq i32 %x, 0
15  %lshr = lshr i32 %x, 1
16  %ctlz = tail call i32 @llvm.ctlz.i32(i32 %lshr, i1 false)
17  %sub = sub i32 32, %ctlz
18  %shl = shl i32 1, %sub
19  %sel = select i1 %eq0, i32 0, i32 %shl
20  ret i32 %sel
21}
22
23define i64 @bit_floor_64(i64 %x) {
24; CHECK-LABEL: @bit_floor_64(
25; CHECK-NEXT:    [[EQ0:%.*]] = icmp eq i64 [[X:%.*]], 0
26; CHECK-NEXT:    [[LSHR:%.*]] = lshr i64 [[X]], 1
27; CHECK-NEXT:    [[CTLZ:%.*]] = tail call range(i64 1, 65) i64 @llvm.ctlz.i64(i64 [[LSHR]], i1 false)
28; CHECK-NEXT:    [[SUB:%.*]] = sub nuw nsw i64 64, [[CTLZ]]
29; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i64 1, [[SUB]]
30; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[EQ0]], i64 0, i64 [[SHL]]
31; CHECK-NEXT:    ret i64 [[SEL]]
32;
33  %eq0 = icmp eq i64 %x, 0
34  %lshr = lshr i64 %x, 1
35  %ctlz = tail call i64 @llvm.ctlz.i64(i64 %lshr, i1 false)
36  %sub = sub i64 64, %ctlz
37  %shl = shl i64 1, %sub
38  %sel = select i1 %eq0, i64 0, i64 %shl
39  ret i64 %sel
40}
41
42; Commutted select operands should still be recognized.
43define i32 @bit_floor_commuted_operands(i32 %x) {
44; CHECK-LABEL: @bit_floor_commuted_operands(
45; CHECK-NEXT:    [[NE0_NOT:%.*]] = icmp eq i32 [[X:%.*]], 0
46; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[X]], 1
47; CHECK-NEXT:    [[CTLZ:%.*]] = tail call range(i32 1, 33) i32 @llvm.ctlz.i32(i32 [[LSHR]], i1 false)
48; CHECK-NEXT:    [[SUB:%.*]] = sub nuw nsw i32 32, [[CTLZ]]
49; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i32 1, [[SUB]]
50; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[NE0_NOT]], i32 0, i32 [[SHL]]
51; CHECK-NEXT:    ret i32 [[SEL]]
52;
53  %ne0 = icmp ne i32 %x, 0
54  %lshr = lshr i32 %x, 1
55  %ctlz = tail call i32 @llvm.ctlz.i32(i32 %lshr, i1 false)
56  %sub = sub i32 32, %ctlz
57  %shl = shl i32 1, %sub
58  %sel = select i1 %ne0, i32 %shl, i32 0
59  ret i32 %sel
60}
61
62; Negative test: lshr used twice
63define i32 @bit_floor_lshr_used_twice(i32 %x, ptr %p) {
64; CHECK-LABEL: @bit_floor_lshr_used_twice(
65; CHECK-NEXT:    [[EQ0:%.*]] = icmp eq i32 [[X:%.*]], 0
66; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[X]], 1
67; CHECK-NEXT:    [[CTLZ:%.*]] = tail call range(i32 1, 33) i32 @llvm.ctlz.i32(i32 [[LSHR]], i1 false)
68; CHECK-NEXT:    [[SUB:%.*]] = sub nuw nsw i32 32, [[CTLZ]]
69; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i32 1, [[SUB]]
70; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[EQ0]], i32 0, i32 [[SHL]]
71; CHECK-NEXT:    store i32 [[LSHR]], ptr [[P:%.*]], align 4
72; CHECK-NEXT:    ret i32 [[SEL]]
73;
74  %eq0 = icmp eq i32 %x, 0
75  %lshr = lshr i32 %x, 1
76  %ctlz = tail call i32 @llvm.ctlz.i32(i32 %lshr, i1 false)
77  %sub = sub i32 32, %ctlz
78  %shl = shl i32 1, %sub
79  %sel = select i1 %eq0, i32 0, i32 %shl
80  store i32 %lshr, ptr %p, align 4
81  ret i32 %sel
82}
83
84; Negative test: ctlz used twice
85define i32 @bit_floor_ctlz_used_twice(i32 %x, ptr %p) {
86; CHECK-LABEL: @bit_floor_ctlz_used_twice(
87; CHECK-NEXT:    [[EQ0:%.*]] = icmp eq i32 [[X:%.*]], 0
88; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[X]], 1
89; CHECK-NEXT:    [[CTLZ:%.*]] = tail call range(i32 1, 33) i32 @llvm.ctlz.i32(i32 [[LSHR]], i1 false)
90; CHECK-NEXT:    [[SUB:%.*]] = sub nuw nsw i32 32, [[CTLZ]]
91; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i32 1, [[SUB]]
92; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[EQ0]], i32 0, i32 [[SHL]]
93; CHECK-NEXT:    store i32 [[CTLZ]], ptr [[P:%.*]], align 4
94; CHECK-NEXT:    ret i32 [[SEL]]
95;
96  %eq0 = icmp eq i32 %x, 0
97  %lshr = lshr i32 %x, 1
98  %ctlz = tail call i32 @llvm.ctlz.i32(i32 %lshr, i1 false)
99  %sub = sub i32 32, %ctlz
100  %shl = shl i32 1, %sub
101  %sel = select i1 %eq0, i32 0, i32 %shl
102  store i32 %ctlz, ptr %p, align 4
103  ret i32 %sel
104}
105
106; Negative test: sub used twice
107define i32 @bit_floor_sub_used_twice(i32 %x, ptr %p) {
108; CHECK-LABEL: @bit_floor_sub_used_twice(
109; CHECK-NEXT:    [[EQ0:%.*]] = icmp eq i32 [[X:%.*]], 0
110; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[X]], 1
111; CHECK-NEXT:    [[CTLZ:%.*]] = tail call range(i32 1, 33) i32 @llvm.ctlz.i32(i32 [[LSHR]], i1 false)
112; CHECK-NEXT:    [[SUB:%.*]] = sub nuw nsw i32 32, [[CTLZ]]
113; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i32 1, [[SUB]]
114; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[EQ0]], i32 0, i32 [[SHL]]
115; CHECK-NEXT:    store i32 [[SUB]], ptr [[P:%.*]], align 4
116; CHECK-NEXT:    ret i32 [[SEL]]
117;
118  %eq0 = icmp eq i32 %x, 0
119  %lshr = lshr i32 %x, 1
120  %ctlz = tail call i32 @llvm.ctlz.i32(i32 %lshr, i1 false)
121  %sub = sub i32 32, %ctlz
122  %shl = shl i32 1, %sub
123  %sel = select i1 %eq0, i32 0, i32 %shl
124  store i32 %sub, ptr %p, align 4
125  ret i32 %sel
126}
127
128; Negative test: shl used twice
129define i32 @bit_floor_shl_used_twice(i32 %x, ptr %p) {
130; CHECK-LABEL: @bit_floor_shl_used_twice(
131; CHECK-NEXT:    [[EQ0:%.*]] = icmp eq i32 [[X:%.*]], 0
132; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[X]], 1
133; CHECK-NEXT:    [[CTLZ:%.*]] = tail call range(i32 1, 33) i32 @llvm.ctlz.i32(i32 [[LSHR]], i1 false)
134; CHECK-NEXT:    [[SUB:%.*]] = sub nuw nsw i32 32, [[CTLZ]]
135; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i32 1, [[SUB]]
136; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[EQ0]], i32 0, i32 [[SHL]]
137; CHECK-NEXT:    store i32 [[SHL]], ptr [[P:%.*]], align 4
138; CHECK-NEXT:    ret i32 [[SEL]]
139;
140  %eq0 = icmp eq i32 %x, 0
141  %lshr = lshr i32 %x, 1
142  %ctlz = tail call i32 @llvm.ctlz.i32(i32 %lshr, i1 false)
143  %sub = sub i32 32, %ctlz
144  %shl = shl i32 1, %sub
145  %sel = select i1 %eq0, i32 0, i32 %shl
146  store i32 %shl, ptr %p, align 4
147  ret i32 %sel
148}
149
150; a vector version of @bit_floor_32 above
151define <4 x i32> @bit_floor_v4i32(<4 x i32> %x) {
152; CHECK-LABEL: @bit_floor_v4i32(
153; CHECK-NEXT:    [[EQ0:%.*]] = icmp eq <4 x i32> [[X:%.*]], zeroinitializer
154; CHECK-NEXT:    [[LSHR:%.*]] = lshr <4 x i32> [[X]], splat (i32 1)
155; CHECK-NEXT:    [[CTLZ:%.*]] = tail call range(i32 1, 33) <4 x i32> @llvm.ctlz.v4i32(<4 x i32> [[LSHR]], i1 false)
156; CHECK-NEXT:    [[SUB:%.*]] = sub nuw nsw <4 x i32> splat (i32 32), [[CTLZ]]
157; CHECK-NEXT:    [[SHL:%.*]] = shl nuw <4 x i32> splat (i32 1), [[SUB]]
158; CHECK-NEXT:    [[SEL:%.*]] = select <4 x i1> [[EQ0]], <4 x i32> zeroinitializer, <4 x i32> [[SHL]]
159; CHECK-NEXT:    ret <4 x i32> [[SEL]]
160;
161  %eq0 = icmp eq <4 x i32> %x, <i32 0, i32 0, i32 0, i32 0>
162  %lshr = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
163  %ctlz = tail call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %lshr, i1 false)
164  %sub = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %ctlz
165  %shl = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %sub
166  %sel = select <4 x i1> %eq0, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> %shl
167  ret <4 x i32> %sel
168}
169
170declare i32 @llvm.ctlz.i32(i32, i1 immarg)
171declare i64 @llvm.ctlz.i64(i64, i1 immarg)
172declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1)
173