xref: /llvm-project/llvm/test/Transforms/InstCombine/icmp-xor-signbit.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S | FileCheck %s
3
4; icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
5
6define i1 @slt_to_ult(i8 %x, i8 %y) {
7; CHECK-LABEL: @slt_to_ult(
8; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i8 [[X:%.*]], [[Y:%.*]]
9; CHECK-NEXT:    ret i1 [[CMP]]
10;
11  %a = xor i8 %x, 128
12  %b = xor i8 %y, 128
13  %cmp = icmp slt i8 %a, %b
14  ret i1 %cmp
15}
16
17; PR33138 - https://bugs.llvm.org/show_bug.cgi?id=33138
18
19define <2 x i1> @slt_to_ult_splat(<2 x i8> %x, <2 x i8> %y) {
20; CHECK-LABEL: @slt_to_ult_splat(
21; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i8> [[X:%.*]], [[Y:%.*]]
22; CHECK-NEXT:    ret <2 x i1> [[CMP]]
23;
24  %a = xor <2 x i8> %x, <i8 128, i8 128>
25  %b = xor <2 x i8> %y, <i8 128, i8 128>
26  %cmp = icmp slt <2 x i8> %a, %b
27  ret <2 x i1> %cmp
28}
29
30; Make sure that unsigned -> signed works too.
31
32define i1 @ult_to_slt(i8 %x, i8 %y) {
33; CHECK-LABEL: @ult_to_slt(
34; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[X:%.*]], [[Y:%.*]]
35; CHECK-NEXT:    ret i1 [[CMP]]
36;
37  %a = xor i8 %x, 128
38  %b = xor i8 %y, 128
39  %cmp = icmp ult i8 %a, %b
40  ret i1 %cmp
41}
42
43define <2 x i1> @ult_to_slt_splat(<2 x i8> %x, <2 x i8> %y) {
44; CHECK-LABEL: @ult_to_slt_splat(
45; CHECK-NEXT:    [[CMP:%.*]] = icmp slt <2 x i8> [[X:%.*]], [[Y:%.*]]
46; CHECK-NEXT:    ret <2 x i1> [[CMP]]
47;
48  %a = xor <2 x i8> %x, <i8 128, i8 128>
49  %b = xor <2 x i8> %y, <i8 128, i8 128>
50  %cmp = icmp ult <2 x i8> %a, %b
51  ret <2 x i1> %cmp
52}
53
54; icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b
55
56define i1 @slt_to_ugt(i8 %x, i8 %y) {
57; CHECK-LABEL: @slt_to_ugt(
58; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i8 [[X:%.*]], [[Y:%.*]]
59; CHECK-NEXT:    ret i1 [[CMP]]
60;
61  %a = xor i8 %x, 127
62  %b = xor i8 %y, 127
63  %cmp = icmp slt i8 %a, %b
64  ret i1 %cmp
65}
66
67define <2 x i1> @slt_to_ugt_splat(<2 x i8> %x, <2 x i8> %y) {
68; CHECK-LABEL: @slt_to_ugt_splat(
69; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt <2 x i8> [[X:%.*]], [[Y:%.*]]
70; CHECK-NEXT:    ret <2 x i1> [[CMP]]
71;
72  %a = xor <2 x i8> %x, <i8 127, i8 127>
73  %b = xor <2 x i8> %y, <i8 127, i8 127>
74  %cmp = icmp slt <2 x i8> %a, %b
75  ret <2 x i1> %cmp
76}
77
78; Make sure that unsigned -> signed works too.
79
80define i1 @ult_to_sgt(i8 %x, i8 %y) {
81; CHECK-LABEL: @ult_to_sgt(
82; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i8 [[X:%.*]], [[Y:%.*]]
83; CHECK-NEXT:    ret i1 [[CMP]]
84;
85  %a = xor i8 %x, 127
86  %b = xor i8 %y, 127
87  %cmp = icmp ult i8 %a, %b
88  ret i1 %cmp
89}
90
91define <2 x i1> @ult_to_sgt_splat(<2 x i8> %x, <2 x i8> %y) {
92; CHECK-LABEL: @ult_to_sgt_splat(
93; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt <2 x i8> [[X:%.*]], [[Y:%.*]]
94; CHECK-NEXT:    ret <2 x i1> [[CMP]]
95;
96  %a = xor <2 x i8> %x, <i8 127, i8 127>
97  %b = xor <2 x i8> %y, <i8 127, i8 127>
98  %cmp = icmp ult <2 x i8> %a, %b
99  ret <2 x i1> %cmp
100}
101
102; icmp u/s (a ^ signmask), C --> icmp s/u a, C'
103
104define i1 @sge_to_ugt(i8 %x) {
105; CHECK-LABEL: @sge_to_ugt(
106; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i8 [[X:%.*]], -114
107; CHECK-NEXT:    ret i1 [[CMP]]
108;
109  %a = xor i8 %x, 128
110  %cmp = icmp sge i8 %a, 15
111  ret i1 %cmp
112}
113
114define <2 x i1> @sge_to_ugt_splat(<2 x i8> %x) {
115; CHECK-LABEL: @sge_to_ugt_splat(
116; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt <2 x i8> [[X:%.*]], splat (i8 -114)
117; CHECK-NEXT:    ret <2 x i1> [[CMP]]
118;
119  %a = xor <2 x i8> %x, <i8 128, i8 128>
120  %cmp = icmp sge <2 x i8> %a, <i8 15, i8 15>
121  ret <2 x i1> %cmp
122}
123
124; Make sure that unsigned -> signed works too.
125
126define i1 @uge_to_sgt(i8 %x) {
127; CHECK-LABEL: @uge_to_sgt(
128; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i8 [[X:%.*]], -114
129; CHECK-NEXT:    ret i1 [[CMP]]
130;
131  %a = xor i8 %x, 128
132  %cmp = icmp uge i8 %a, 15
133  ret i1 %cmp
134}
135
136define <2 x i1> @uge_to_sgt_splat(<2 x i8> %x) {
137; CHECK-LABEL: @uge_to_sgt_splat(
138; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt <2 x i8> [[X:%.*]], splat (i8 -114)
139; CHECK-NEXT:    ret <2 x i1> [[CMP]]
140;
141  %a = xor <2 x i8> %x, <i8 128, i8 128>
142  %cmp = icmp uge <2 x i8> %a, <i8 15, i8 15>
143  ret <2 x i1> %cmp
144}
145
146; icmp u/s (a ^ maxsignval), C --> icmp s/u' a, C'
147
148define i1 @sge_to_ult(i8 %x) {
149; CHECK-LABEL: @sge_to_ult(
150; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i8 [[X:%.*]], 113
151; CHECK-NEXT:    ret i1 [[CMP]]
152;
153  %a = xor i8 %x, 127
154  %cmp = icmp sge i8 %a, 15
155  ret i1 %cmp
156}
157
158define <2 x i1> @sge_to_ult_splat(<2 x i8> %x) {
159; CHECK-LABEL: @sge_to_ult_splat(
160; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i8> [[X:%.*]], splat (i8 113)
161; CHECK-NEXT:    ret <2 x i1> [[CMP]]
162;
163  %a = xor <2 x i8> %x, <i8 127, i8 127>
164  %cmp = icmp sge <2 x i8> %a, <i8 15, i8 15>
165  ret <2 x i1> %cmp
166}
167
168; Make sure that unsigned -> signed works too.
169
170define i1 @uge_to_slt(i8 %x) {
171; CHECK-LABEL: @uge_to_slt(
172; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[X:%.*]], 113
173; CHECK-NEXT:    ret i1 [[CMP]]
174;
175  %a = xor i8 %x, 127
176  %cmp = icmp uge i8 %a, 15
177  ret i1 %cmp
178}
179
180define <2 x i1> @uge_to_slt_splat(<2 x i8> %x) {
181; CHECK-LABEL: @uge_to_slt_splat(
182; CHECK-NEXT:    [[CMP:%.*]] = icmp slt <2 x i8> [[X:%.*]], splat (i8 113)
183; CHECK-NEXT:    ret <2 x i1> [[CMP]]
184;
185  %a = xor <2 x i8> %x, <i8 127, i8 127>
186  %cmp = icmp uge <2 x i8> %a, <i8 15, i8 15>
187  ret <2 x i1> %cmp
188}
189
190; PR33138, part 2: https://bugs.llvm.org/show_bug.cgi?id=33138
191; Bitcast canonicalization ensures that we recognize the signbit constant.
192
193define <8 x i1> @sgt_to_ugt_bitcasted_splat(<2 x i32> %x, <2 x i32> %y) {
194; CHECK-LABEL: @sgt_to_ugt_bitcasted_splat(
195; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i32> [[X:%.*]] to <8 x i8>
196; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i32> [[Y:%.*]] to <8 x i8>
197; CHECK-NEXT:    [[E:%.*]] = icmp ugt <8 x i8> [[TMP1]], [[TMP2]]
198; CHECK-NEXT:    ret <8 x i1> [[E]]
199;
200  %a = xor <2 x i32> %x, <i32 2155905152, i32 2155905152> ; 0x80808080
201  %b = xor <2 x i32> %y, <i32 2155905152, i32 2155905152>
202  %c = bitcast <2 x i32> %a to <8 x i8>
203  %d = bitcast <2 x i32> %b to <8 x i8>
204  %e = icmp sgt <8 x i8> %c, %d
205  ret <8 x i1> %e
206}
207
208; Bitcast canonicalization ensures that we recognize the signbit constant.
209
210define <2 x i1> @negative_simplify_splat(<4 x i8> %x) {
211; CHECK-LABEL: @negative_simplify_splat(
212; CHECK-NEXT:    ret <2 x i1> zeroinitializer
213;
214  %a = or <4 x i8> %x, <i8 0, i8 128, i8 0, i8 128>
215  %b = bitcast <4 x i8> %a to <2 x i16>
216  %c = icmp sgt <2 x i16> %b, zeroinitializer
217  ret <2 x i1> %c
218}
219
220define i1 @slt_zero_eq_i1(i32 %a, i1 %b) {
221; CHECK-LABEL: @slt_zero_eq_i1(
222; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt i32 [[A:%.*]], -1
223; CHECK-NEXT:    [[CMP21:%.*]] = xor i1 [[TMP1]], [[B:%.*]]
224; CHECK-NEXT:    ret i1 [[CMP21]]
225;
226  %conv = zext i1 %b to i32
227  %cmp1 = lshr i32 %a, 31
228  %cmp2 = icmp eq i32 %conv, %cmp1
229  ret i1 %cmp2
230}
231
232define i1 @slt_zero_eq_i1_fail(i32 %a, i1 %b) {
233; CHECK-LABEL: @slt_zero_eq_i1_fail(
234; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[B:%.*]] to i32
235; CHECK-NEXT:    [[CMP1:%.*]] = ashr i32 [[A:%.*]], 31
236; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[CMP1]], [[CONV]]
237; CHECK-NEXT:    ret i1 [[CMP2]]
238;
239  %conv = zext i1 %b to i32
240  %cmp1 = ashr i32 %a, 31
241  %cmp2 = icmp eq i32 %conv, %cmp1
242  ret i1 %cmp2
243}
244
245define i1 @slt_zero_eq_ne_0(i32 %a) {
246; CHECK-LABEL: @slt_zero_eq_ne_0(
247; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i32 [[A:%.*]], 1
248; CHECK-NEXT:    ret i1 [[TMP1]]
249;
250  %cmp = icmp ne i32 %a, 0
251  %conv = zext i1 %cmp to i32
252  %cmp1 = lshr i32 %a, 31
253  %cmp2 = icmp eq i32 %conv, %cmp1
254  ret i1 %cmp2
255}
256
257define i1 @slt_zero_ne_ne_0(i32 %a) {
258; CHECK-LABEL: @slt_zero_ne_ne_0(
259; CHECK-NEXT:    [[CMP21:%.*]] = icmp sgt i32 [[A:%.*]], 0
260; CHECK-NEXT:    ret i1 [[CMP21]]
261;
262  %cmp = icmp ne i32 %a, 0
263  %conv = zext i1 %cmp to i32
264  %cmp1 = lshr i32 %a, 31
265  %cmp2 = icmp ne i32 %conv, %cmp1
266  ret i1 %cmp2
267}
268
269define <4 x i1> @slt_zero_eq_ne_0_vec(<4 x i32> %a) {
270; CHECK-LABEL: @slt_zero_eq_ne_0_vec(
271; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt <4 x i32> [[A:%.*]], splat (i32 1)
272; CHECK-NEXT:    ret <4 x i1> [[TMP1]]
273;
274  %cmp = icmp ne <4 x i32> %a, zeroinitializer
275  %conv = zext <4 x i1> %cmp to <4 x i32>
276  %cmp1 = lshr <4 x i32> %a, <i32 31, i32 31, i32 31, i32 31>
277  %cmp2 = icmp eq <4 x i32> %conv, %cmp1
278  ret <4 x i1> %cmp2
279}
280
281define i1 @slt_zero_ne_ne_b(i32 %a, i32 %b) {
282; CHECK-LABEL: @slt_zero_ne_ne_b(
283; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[A:%.*]], [[B:%.*]]
284; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i32 [[A]], 0
285; CHECK-NEXT:    [[CMP21:%.*]] = xor i1 [[TMP1]], [[CMP]]
286; CHECK-NEXT:    ret i1 [[CMP21]]
287;
288  %cmp = icmp ne i32 %a, %b
289  %conv = zext i1 %cmp to i32
290  %cmp1 = lshr i32 %a, 31
291  %cmp2 = icmp ne i32 %conv, %cmp1
292  ret i1 %cmp2
293}
294
295define i1 @slt_zero_eq_ne_0_fail1(i32 %a) {
296; CHECK-LABEL: @slt_zero_eq_ne_0_fail1(
297; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[A:%.*]], 0
298; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
299; CHECK-NEXT:    [[CMP1:%.*]] = ashr i32 [[A]], 31
300; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[CMP1]], [[CONV]]
301; CHECK-NEXT:    ret i1 [[CMP2]]
302;
303  %cmp = icmp ne i32 %a, 0
304  %conv = zext i1 %cmp to i32
305  %cmp1 = ashr i32 %a, 31
306  %cmp2 = icmp eq i32 %conv, %cmp1
307  ret i1 %cmp2
308}
309
310define i1 @slt_zero_eq_ne_0_fail2(i32 %a) {
311; CHECK-LABEL: @slt_zero_eq_ne_0_fail2(
312; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[A:%.*]], 0
313; CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[CMP]] to i32
314; CHECK-NEXT:    [[CMP1:%.*]] = lshr i32 [[A]], 30
315; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32 [[CMP1]], [[CONV]]
316; CHECK-NEXT:    ret i1 [[CMP2]]
317;
318  %cmp = icmp ne i32 %a, 0
319  %conv = zext i1 %cmp to i32
320  %cmp1 = lshr i32 %a, 30
321  %cmp2 = icmp eq i32 %conv, %cmp1
322  ret i1 %cmp2
323}
324