xref: /llvm-project/llvm/test/Transforms/InstCombine/sub-ashr-and-to-icmp-select.ll (revision 4b483ecd55d7269e8d9af788e823fc9b476b33ce)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -passes=instcombine %s -S -o - | FileCheck %s
3
4; Clamp negative to zero:
5; E.g., clamp0 implemented in a shifty way, could be optimized as v > 0 ? v : 0, where sub hasNoSignedWrap.
6; int32 clamp0(int32 v) {
7;   return ((-(v) >> 31) & (v));
8; }
9;
10
11; Scalar Types
12
13define i8 @sub_ashr_and_i8(i8 %x, i8 %y) {
14; CHECK-LABEL: @sub_ashr_and_i8(
15; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i8 [[Y:%.*]], [[X:%.*]]
16; CHECK-NEXT:    [[AND:%.*]] = select i1 [[TMP1]], i8 [[X]], i8 0
17; CHECK-NEXT:    ret i8 [[AND]]
18;
19  %sub = sub nsw i8 %y, %x
20  %shr = ashr i8 %sub, 7
21  %and = and i8 %shr, %x
22  ret i8 %and
23}
24
25define i16 @sub_ashr_and_i16(i16 %x, i16 %y) {
26; CHECK-LABEL: @sub_ashr_and_i16(
27; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i16 [[Y:%.*]], [[X:%.*]]
28; CHECK-NEXT:    [[AND:%.*]] = select i1 [[TMP1]], i16 [[X]], i16 0
29; CHECK-NEXT:    ret i16 [[AND]]
30;
31
32  %sub = sub nsw i16 %y, %x
33  %shr = ashr i16 %sub, 15
34  %and = and i16 %shr, %x
35  ret i16 %and
36}
37
38define i32 @sub_ashr_and_i32(i32 %x, i32 %y) {
39; CHECK-LABEL: @sub_ashr_and_i32(
40; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i32 [[Y:%.*]], [[X:%.*]]
41; CHECK-NEXT:    [[AND:%.*]] = select i1 [[TMP1]], i32 [[X]], i32 0
42; CHECK-NEXT:    ret i32 [[AND]]
43;
44  %sub = sub nsw i32 %y, %x
45  %shr = ashr i32 %sub, 31
46  %and = and i32 %shr, %x
47  ret i32 %and
48}
49
50define i64 @sub_ashr_and_i64(i64 %x, i64 %y) {
51; CHECK-LABEL: @sub_ashr_and_i64(
52; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i64 [[Y:%.*]], [[X:%.*]]
53; CHECK-NEXT:    [[AND:%.*]] = select i1 [[TMP1]], i64 [[X]], i64 0
54; CHECK-NEXT:    ret i64 [[AND]]
55;
56  %sub = sub nsw i64 %y, %x
57  %shr = ashr i64 %sub, 63
58  %and = and i64 %shr, %x
59  ret i64 %and
60}
61
62; nuw nsw
63
64define i32 @sub_ashr_and_i32_nuw_nsw(i32 %x, i32 %y) {
65; CHECK-LABEL: @sub_ashr_and_i32_nuw_nsw(
66; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i32 [[Y:%.*]], [[X:%.*]]
67; CHECK-NEXT:    [[AND:%.*]] = select i1 [[TMP1]], i32 [[X]], i32 0
68; CHECK-NEXT:    ret i32 [[AND]]
69;
70  %sub = sub nuw nsw i32 %y, %x
71  %shr = ashr i32 %sub, 31
72  %and = and i32 %shr, %x
73  ret i32 %and
74}
75
76; Commute
77
78define i32 @sub_ashr_and_i32_commute(i32 %x, i32 %y) {
79; CHECK-LABEL: @sub_ashr_and_i32_commute(
80; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i32 [[Y:%.*]], [[X:%.*]]
81; CHECK-NEXT:    [[AND:%.*]] = select i1 [[TMP1]], i32 [[X]], i32 0
82; CHECK-NEXT:    ret i32 [[AND]]
83;
84  %sub = sub nsw i32 %y, %x
85  %shr = ashr i32 %sub, 31
86  %and = and i32 %x, %shr  ; commute %x and %shr
87  ret i32 %and
88}
89
90; Vector Types
91
92define <4 x i32> @sub_ashr_and_i32_vec(<4 x i32> %x, <4 x i32> %y) {
93; CHECK-LABEL: @sub_ashr_and_i32_vec(
94; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt <4 x i32> [[Y:%.*]], [[X:%.*]]
95; CHECK-NEXT:    [[AND:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[X]], <4 x i32> zeroinitializer
96; CHECK-NEXT:    ret <4 x i32> [[AND]]
97;
98  %sub = sub nsw <4 x i32> %y, %x
99  %shr = ashr <4 x i32> %sub, <i32 31, i32 31, i32 31, i32 31>
100  %and = and <4 x i32> %shr, %x
101  ret <4 x i32> %and
102}
103
104define <4 x i32> @sub_ashr_and_i32_vec_nuw_nsw(<4 x i32> %x, <4 x i32> %y) {
105; CHECK-LABEL: @sub_ashr_and_i32_vec_nuw_nsw(
106; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt <4 x i32> [[Y:%.*]], [[X:%.*]]
107; CHECK-NEXT:    [[AND:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[X]], <4 x i32> zeroinitializer
108; CHECK-NEXT:    ret <4 x i32> [[AND]]
109;
110  %sub = sub nuw nsw <4 x i32> %y, %x
111  %shr = ashr <4 x i32> %sub, <i32 31, i32 31, i32 31, i32 31>
112  %and = and <4 x i32> %shr, %x
113  ret <4 x i32> %and
114}
115
116define <4 x i32> @sub_ashr_and_i32_vec_commute(<4 x i32> %x, <4 x i32> %y) {
117; CHECK-LABEL: @sub_ashr_and_i32_vec_commute(
118; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt <4 x i32> [[Y:%.*]], [[X:%.*]]
119; CHECK-NEXT:    [[AND:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[X]], <4 x i32> zeroinitializer
120; CHECK-NEXT:    ret <4 x i32> [[AND]]
121;
122  %sub = sub nsw <4 x i32> %y, %x
123  %shr = ashr <4 x i32> %sub, <i32 31, i32 31, i32 31, i32 31>
124  %and = and <4 x i32> %x, %shr  ; commute %x and %shr
125  ret <4 x i32> %and
126}
127
128; Extra uses
129
130define i32 @sub_ashr_and_i32_extra_use_sub(i32 %x, i32 %y, ptr %p) {
131; CHECK-LABEL: @sub_ashr_and_i32_extra_use_sub(
132; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i32 [[Y:%.*]], [[X:%.*]]
133; CHECK-NEXT:    store i32 [[SUB]], ptr [[P:%.*]], align 4
134; CHECK-NEXT:    [[ISNEG:%.*]] = icmp slt i32 [[SUB]], 0
135; CHECK-NEXT:    [[AND:%.*]] = select i1 [[ISNEG]], i32 [[X]], i32 0
136; CHECK-NEXT:    ret i32 [[AND]]
137;
138  %sub = sub nsw i32 %y, %x
139  store i32 %sub, ptr %p
140  %shr = ashr i32 %sub, 31
141  %and = and i32 %shr, %x
142  ret i32 %and
143}
144
145define i32 @sub_ashr_and_i32_extra_use_and(i32 %x, i32 %y, ptr %p) {
146; CHECK-LABEL: @sub_ashr_and_i32_extra_use_and(
147; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i32 [[Y:%.*]], [[X:%.*]]
148; CHECK-NEXT:    [[AND:%.*]] = select i1 [[TMP1]], i32 [[X]], i32 0
149; CHECK-NEXT:    store i32 [[AND]], ptr [[P:%.*]], align 4
150; CHECK-NEXT:    ret i32 [[AND]]
151;
152  %sub = sub nsw i32 %y, %x
153  %shr = ashr i32 %sub, 31
154  %and = and i32 %shr, %x
155  store i32 %and, ptr %p
156  ret i32 %and
157}
158
159; Negative Tests
160
161define i32 @sub_ashr_and_i32_extra_use_ashr(i32 %x, i32 %y, ptr %p) {
162; CHECK-LABEL: @sub_ashr_and_i32_extra_use_ashr(
163; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i32 [[Y:%.*]], [[X:%.*]]
164; CHECK-NEXT:    [[SHR:%.*]] = sext i1 [[TMP1]] to i32
165; CHECK-NEXT:    store i32 [[SHR]], ptr [[P:%.*]], align 4
166; CHECK-NEXT:    [[AND:%.*]] = select i1 [[TMP1]], i32 [[X]], i32 0
167; CHECK-NEXT:    ret i32 [[AND]]
168;
169  %sub = sub nsw i32 %y, %x
170  %shr = ashr i32 %sub, 31
171  store i32 %shr, ptr %p
172  %and = and i32 %shr, %x
173  ret i32 %and
174}
175
176define i32 @sub_ashr_and_i32_no_nuw_nsw(i32 %x, i32 %y) {
177; CHECK-LABEL: @sub_ashr_and_i32_no_nuw_nsw(
178; CHECK-NEXT:    [[SUB:%.*]] = sub i32 [[Y:%.*]], [[X:%.*]]
179; CHECK-NEXT:    [[SHR:%.*]] = ashr i32 [[SUB]], 7
180; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHR]], [[X]]
181; CHECK-NEXT:    ret i32 [[AND]]
182;
183  %sub = sub i32 %y, %x
184  %shr = ashr i32 %sub, 7
185  %and = and i32 %shr, %x
186  ret i32 %and
187}
188
189define <4 x i32> @sub_ashr_and_i32_vec_poison(<4 x i32> %x, <4 x i32> %y) {
190; CHECK-LABEL: @sub_ashr_and_i32_vec_poison(
191; CHECK-NEXT:    [[ISNEG:%.*]] = icmp slt <4 x i32> [[Y:%.*]], [[X:%.*]]
192; CHECK-NEXT:    [[AND:%.*]] = select <4 x i1> [[ISNEG]], <4 x i32> [[X]], <4 x i32> zeroinitializer
193; CHECK-NEXT:    ret <4 x i32> [[AND]]
194;
195  %sub = sub nsw <4 x i32> %y, %x
196  %shr = ashr <4 x i32> %sub, <i32 31, i32 31, i32 31, i32 poison>
197  %and = and <4 x i32> %shr, %x
198  ret <4 x i32> %and
199}
200
201define i32 @sub_ashr_and_i32_shift_wrong_bit(i32 %x, i32 %y) {
202; CHECK-LABEL: @sub_ashr_and_i32_shift_wrong_bit(
203; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i32 [[Y:%.*]], [[X:%.*]]
204; CHECK-NEXT:    [[SHR:%.*]] = ashr i32 [[SUB]], 15
205; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHR]], [[X]]
206; CHECK-NEXT:    ret i32 [[AND]]
207;
208  %sub = sub nsw i32 %y, %x
209  %shr = ashr i32 %sub, 15
210  %and = and i32 %shr, %x
211  ret i32 %and
212}
213