xref: /llvm-project/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S | FileCheck %s
3
4declare void @llvm.assume(i1)
5declare void @barrier()
6declare void @use.i8(i8)
7
8; test for (~x ^ y) < ~z
9define i1 @test_xor1(i8 %x, i8 %y, i8 %z) {
10; CHECK-LABEL: @test_xor1(
11; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
12; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
13; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]]
14; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z:%.*]]
15; CHECK-NEXT:    ret i1 [[R]]
16;
17  %xor = xor i8 %x, -1
18  call void @use.i8(i8 %xor)
19  %xor2 = xor i8 %xor, %y
20  %nz = xor i8 %z, -1
21  %r = icmp slt i8 %xor2, %nz
22  ret i1 %r
23}
24
25; test for ~z <= (x ^ ~y)
26define i1 @test_xor2(i8 %x, i8 %y, i8 %z) {
27; CHECK-LABEL: @test_xor2(
28; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[Y:%.*]], -1
29; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
30; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X:%.*]], [[Y]]
31; CHECK-NEXT:    [[R:%.*]] = icmp sle i8 [[TMP1]], [[Z:%.*]]
32; CHECK-NEXT:    ret i1 [[R]]
33;
34  %nz = xor i8 %z, -1
35  %xor = xor i8 %y, -1
36  call void @use.i8(i8 %xor)
37  %xor2 = xor i8 %xor, %x
38  %r = icmp sle i8 %nz, %xor2
39  ret i1 %r
40}
41
42; test for ~z > (~x ^ y)
43define i1 @test_xor3(i8 %x, i8 %y, i8 %z) {
44; CHECK-LABEL: @test_xor3(
45; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
46; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
47; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]]
48; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z:%.*]]
49; CHECK-NEXT:    ret i1 [[R]]
50;
51  %nz = xor i8 %z, -1
52  %xor = xor i8 %x, -1
53  call void @use.i8(i8 %xor)
54  %xor2 = xor i8 %xor, %y
55  %r = icmp sgt i8 %nz, %xor2
56  ret i1 %r
57}
58
59; tests for equality
60define i1 @test_xor_ne(i8 %x, i8 %y, i8 %z) {
61; CHECK-LABEL: @test_xor_ne(
62; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X:%.*]]
63; CHECK-NEXT:    [[R:%.*]] = icmp ne i8 [[Z:%.*]], [[TMP1]]
64; CHECK-NEXT:    ret i1 [[R]]
65;
66  %nz = xor i8 %z, -1
67  %xor = xor i8 %y, -1
68  %xor2 = xor i8 %xor, %x
69  %r = icmp ne i8 %nz, %xor2
70  ret i1 %r
71}
72
73define i1 @test_xor_eq(i8 %x, i8 %y, i8 %z) {
74; CHECK-LABEL: @test_xor_eq(
75; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X:%.*]]
76; CHECK-NEXT:    [[R:%.*]] = icmp eq i8 [[Z:%.*]], [[TMP1]]
77; CHECK-NEXT:    ret i1 [[R]]
78;
79  %nz = xor i8 %z, -1
80  %xor = xor i8 %y, -1
81  %xor2 = xor i8 %xor, %x
82  %r = icmp eq i8 %nz, %xor2
83  ret i1 %r
84}
85
86; other tests
87define i1 @test_xor4(i8 %x, i8 %y, i8 %z) {
88; CHECK-LABEL: @test_xor4(
89; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
90; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
91; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]]
92; CHECK-NEXT:    [[R:%.*]] = icmp sle i8 [[TMP1]], [[Z:%.*]]
93; CHECK-NEXT:    ret i1 [[R]]
94;
95  %nz = xor i8 %z, -1
96  %xor = xor i8 %x, -1
97  call void @use.i8(i8 %xor)
98  %xor2 = xor i8 %xor, %y
99  %r = icmp sge i8 %xor2, %nz
100  ret i1 %r
101}
102
103define i1 @test_xor5(i8 %x, i8 %y, i8 %z) {
104; CHECK-LABEL: @test_xor5(
105; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
106; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
107; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]]
108; CHECK-NEXT:    [[R:%.*]] = icmp ugt i8 [[TMP1]], [[Z:%.*]]
109; CHECK-NEXT:    ret i1 [[R]]
110;
111  %nz = xor i8 %z, -1
112  %xor = xor i8 %x, -1
113  call void @use.i8(i8 %xor)
114  %xor2 = xor i8 %xor, %y
115  %r = icmp ult i8 %xor2, %nz
116  ret i1 %r
117}
118
119define i1 @test_xor6(i8 %x, i8 %y, i8 %z) {
120; CHECK-LABEL: @test_xor6(
121; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
122; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
123; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]]
124; CHECK-NEXT:    [[R:%.*]] = icmp uge i8 [[TMP1]], [[Z:%.*]]
125; CHECK-NEXT:    ret i1 [[R]]
126;
127  %nz = xor i8 %z, -1
128  %xor = xor i8 %x, -1
129  call void @use.i8(i8 %xor)
130  %xor2 = xor i8 %xor, %y
131  %r = icmp ule i8 %xor2, %nz
132  ret i1 %r
133}
134
135define i1 @test_xor7(i8 %x, i8 %y, i8 %z) {
136; CHECK-LABEL: @test_xor7(
137; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
138; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
139; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]]
140; CHECK-NEXT:    [[R:%.*]] = icmp ult i8 [[TMP1]], [[Z:%.*]]
141; CHECK-NEXT:    ret i1 [[R]]
142;
143  %nz = xor i8 %z, -1
144  %xor = xor i8 %x, -1
145  call void @use.i8(i8 %xor)
146  %xor2 = xor i8 %xor, %y
147  %r = icmp ugt i8 %xor2, %nz
148  ret i1 %r
149}
150
151define i1 @test_xor8(i8 %x, i8 %y, i8 %z) {
152; CHECK-LABEL: @test_xor8(
153; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], -1
154; CHECK-NEXT:    call void @use.i8(i8 [[XOR]])
155; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]]
156; CHECK-NEXT:    [[R:%.*]] = icmp ule i8 [[TMP1]], [[Z:%.*]]
157; CHECK-NEXT:    ret i1 [[R]]
158;
159  %nz = xor i8 %z, -1
160  %xor = xor i8 %x, -1
161  call void @use.i8(i8 %xor)
162  %xor2 = xor i8 %xor, %y
163  %r = icmp uge i8 %xor2, %nz
164  ret i1 %r
165}
166
167; test (~a ^ b) < ~a
168define i1 @test_slt_xor(i32 %x, i32 %y) {
169; CHECK-LABEL: @test_slt_xor(
170; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
171; CHECK-NEXT:    [[R:%.*]] = icmp sgt i32 [[TMP1]], [[X]]
172; CHECK-NEXT:    ret i1 [[R]]
173;
174  %xor1 = xor i32 %x, -1
175  %xor2 = xor i32 %xor1, %y
176  %r = icmp slt i32 %xor2, %xor1
177  ret i1 %r
178}
179
180; test (a ^ ~b) <= ~b
181define i1 @test_sle_xor(i32 %x, i32 %y) {
182; CHECK-LABEL: @test_sle_xor(
183; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
184; CHECK-NEXT:    [[R:%.*]] = icmp sge i32 [[TMP1]], [[Y]]
185; CHECK-NEXT:    ret i1 [[R]]
186;
187  %xor1 = xor i32 %y, -1
188  %xor2 = xor i32 %xor1, %x
189  %r = icmp sle i32 %xor2, %xor1
190  ret i1 %r
191}
192
193; test ~a > (~a ^ b)
194define i1 @test_sgt_xor(i32 %x, i32 %y) {
195; CHECK-LABEL: @test_sgt_xor(
196; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
197; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP1]], [[X]]
198; CHECK-NEXT:    ret i1 [[CMP]]
199;
200  %xor1 = xor i32 %x, -1
201  %xor2 = xor i32 %xor1, %y
202  %cmp = icmp sgt i32 %xor2, %xor1
203  ret i1 %cmp
204}
205
206define i1 @test_sge_xor(i32 %x, i32 %y) {
207; CHECK-LABEL: @test_sge_xor(
208; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
209; CHECK-NEXT:    [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[X]]
210; CHECK-NEXT:    ret i1 [[CMP]]
211;
212  %xor1 = xor i32 %x, -1
213  %xor2 = xor i32 %xor1, %y
214  %cmp = icmp sge i32 %xor2, %xor1
215  ret i1 %cmp
216}
217
218define i1 @test_ult_xor(i32 %x, i32 %y) {
219; CHECK-LABEL: @test_ult_xor(
220; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
221; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[TMP1]], [[X]]
222; CHECK-NEXT:    ret i1 [[CMP]]
223;
224  %xor1 = xor i32 %x, -1
225  %xor2 = xor i32 %xor1, %y
226  %cmp = icmp ult i32 %xor2, %xor1
227  ret i1 %cmp
228}
229
230define i1 @test_ule_xor(i32 %x, i32 %y) {
231; CHECK-LABEL: @test_ule_xor(
232; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
233; CHECK-NEXT:    [[CMP:%.*]] = icmp uge i32 [[TMP1]], [[X]]
234; CHECK-NEXT:    ret i1 [[CMP]]
235;
236  %xor1 = xor i32 %x, -1
237  %xor2 = xor i32 %xor1, %y
238  %cmp = icmp ule i32 %xor2, %xor1
239  ret i1 %cmp
240}
241
242define i1 @test_ugt_xor(i32 %x, i32 %y) {
243; CHECK-LABEL: @test_ugt_xor(
244; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
245; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[TMP1]], [[X]]
246; CHECK-NEXT:    ret i1 [[CMP]]
247;
248  %xor1 = xor i32 %x, -1
249  %xor2 = xor i32 %xor1, %y
250  %cmp = icmp ugt i32 %xor2, %xor1
251  ret i1 %cmp
252}
253
254define i1 @test_uge_xor(i32 %x, i32 %y) {
255; CHECK-LABEL: @test_uge_xor(
256; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
257; CHECK-NEXT:    [[CMP:%.*]] = icmp ule i32 [[TMP1]], [[X]]
258; CHECK-NEXT:    ret i1 [[CMP]]
259;
260  %xor1 = xor i32 %x, -1
261  %xor2 = xor i32 %xor1, %y
262  %cmp = icmp uge i32 %xor2, %xor1
263  ret i1 %cmp
264}
265
266; Negative tests
267define i1 @test_xor1_nofold_multi_use(i8 %x, i8 %y, i8 %z) {
268; CHECK-LABEL: @test_xor1_nofold_multi_use(
269; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X:%.*]], [[Y:%.*]]
270; CHECK-NEXT:    [[XOR2:%.*]] = xor i8 [[TMP1]], -1
271; CHECK-NEXT:    call void @use.i8(i8 [[XOR2]])
272; CHECK-NEXT:    [[NZ:%.*]] = xor i8 [[Z:%.*]], -1
273; CHECK-NEXT:    call void @use.i8(i8 [[NZ]])
274; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z]]
275; CHECK-NEXT:    ret i1 [[R]]
276;
277  %xor = xor i8 %x, -1
278  %xor2 = xor i8 %xor, %y
279  call void @use.i8(i8 %xor2)
280  %nz = xor i8 %z, -1
281  call void @use.i8(i8 %nz)
282  %r = icmp slt i8 %xor2, %nz
283  ret i1 %r
284}
285
286define i1 @xor_uge(i8 %x, i8 %y) {
287; CHECK-LABEL: @xor_uge(
288; CHECK-NEXT:    [[YNZ:%.*]] = icmp ne i8 [[Y:%.*]], 0
289; CHECK-NEXT:    call void @llvm.assume(i1 [[YNZ]])
290; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], [[Y]]
291; CHECK-NEXT:    [[R:%.*]] = icmp ugt i8 [[XOR]], [[X]]
292; CHECK-NEXT:    ret i1 [[R]]
293;
294  %ynz = icmp ne i8 %y, 0
295  call void @llvm.assume(i1 %ynz)
296  %xor = xor i8 %x, %y
297  %r = icmp uge i8 %xor, %x
298  ret i1 %r
299}
300
301define i1 @xor_uge_fail_maybe_zero(i8 %x, i8 %y) {
302; CHECK-LABEL: @xor_uge_fail_maybe_zero(
303; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], [[Y:%.*]]
304; CHECK-NEXT:    [[R:%.*]] = icmp uge i8 [[XOR]], [[X]]
305; CHECK-NEXT:    ret i1 [[R]]
306;
307  %xor = xor i8 %x, %y
308  %r = icmp uge i8 %xor, %x
309  ret i1 %r
310}
311
312define <2 x i1> @xor_ule_2(<2 x i8> %x, <2 x i8> %yy) {
313; CHECK-LABEL: @xor_ule_2(
314; CHECK-NEXT:    [[Y:%.*]] = or <2 x i8> [[YY:%.*]], <i8 9, i8 8>
315; CHECK-NEXT:    [[XOR:%.*]] = xor <2 x i8> [[Y]], [[X:%.*]]
316; CHECK-NEXT:    [[R:%.*]] = icmp ult <2 x i8> [[XOR]], [[X]]
317; CHECK-NEXT:    ret <2 x i1> [[R]]
318;
319  %y = or <2 x i8> %yy, <i8 9, i8 8>
320  %xor = xor <2 x i8> %y, %x
321  %r = icmp ule <2 x i8> %xor, %x
322  ret <2 x i1> %r
323}
324
325define i1 @xor_sle_2(i8 %xx, i8 %y, i8 %z) {
326; CHECK-LABEL: @xor_sle_2(
327; CHECK-NEXT:    [[X:%.*]] = add i8 [[XX:%.*]], [[Z:%.*]]
328; CHECK-NEXT:    [[YNZ:%.*]] = icmp ne i8 [[Y:%.*]], 0
329; CHECK-NEXT:    call void @llvm.assume(i1 [[YNZ]])
330; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X]], [[Y]]
331; CHECK-NEXT:    [[R:%.*]] = icmp sgt i8 [[XOR]], [[X]]
332; CHECK-NEXT:    ret i1 [[R]]
333;
334  %x = add i8 %xx, %z
335  %ynz = icmp ne i8 %y, 0
336  call void @llvm.assume(i1 %ynz)
337  %xor = xor i8 %x, %y
338  %r = icmp sle i8 %x, %xor
339  ret i1 %r
340}
341
342define i1 @xor_sge(i8 %xx, i8 %yy) {
343; CHECK-LABEL: @xor_sge(
344; CHECK-NEXT:    [[X:%.*]] = mul i8 [[XX:%.*]], [[XX]]
345; CHECK-NEXT:    [[Y:%.*]] = or i8 [[YY:%.*]], -128
346; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[Y]], [[X]]
347; CHECK-NEXT:    [[R:%.*]] = icmp slt i8 [[XOR]], [[X]]
348; CHECK-NEXT:    ret i1 [[R]]
349;
350  %x = mul i8 %xx, %xx
351  %y = or i8 %yy, 128
352  %xor = xor i8 %y, %x
353  %r = icmp sge i8 %x, %xor
354  ret i1 %r
355}
356
357define i1 @xor_ugt_2(i8 %xx, i8 %y, i8 %z) {
358; CHECK-LABEL: @xor_ugt_2(
359; CHECK-NEXT:    [[X:%.*]] = add i8 [[XX:%.*]], [[Z:%.*]]
360; CHECK-NEXT:    [[YZ:%.*]] = and i8 [[Y:%.*]], 63
361; CHECK-NEXT:    [[Y1:%.*]] = or disjoint i8 [[YZ]], 64
362; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X]], [[Y1]]
363; CHECK-NEXT:    [[R:%.*]] = icmp ugt i8 [[X]], [[XOR]]
364; CHECK-NEXT:    ret i1 [[R]]
365;
366  %x = add i8 %xx, %z
367  %yz = and i8 %y, 63
368  %y1 = or i8 %yz, 64
369  %xor = xor i8 %x, %y1
370  %r = icmp ugt i8 %x, %xor
371  ret i1 %r
372}
373
374define i1 @xor_ult(i8 %x) {
375; CHECK-LABEL: @xor_ult(
376; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], 123
377; CHECK-NEXT:    [[R:%.*]] = icmp ult i8 [[XOR]], [[X]]
378; CHECK-NEXT:    ret i1 [[R]]
379;
380  %xor = xor i8 %x, 123
381  %r = icmp ult i8 %xor, %x
382  ret i1 %r
383}
384
385define <2 x i1> @xor_sgt(<2 x i8> %x, <2 x i8> %y) {
386; CHECK-LABEL: @xor_sgt(
387; CHECK-NEXT:    [[YZ:%.*]] = and <2 x i8> [[Y:%.*]], splat (i8 31)
388; CHECK-NEXT:    [[Y1:%.*]] = or disjoint <2 x i8> [[YZ]], splat (i8 64)
389; CHECK-NEXT:    [[XOR:%.*]] = xor <2 x i8> [[X:%.*]], [[Y1]]
390; CHECK-NEXT:    [[R:%.*]] = icmp sgt <2 x i8> [[XOR]], [[X]]
391; CHECK-NEXT:    ret <2 x i1> [[R]]
392;
393  %yz = and <2 x i8> %y, <i8 31, i8 31>
394  %y1 = or <2 x i8> %yz, <i8 64, i8 64>
395  %xor = xor <2 x i8> %x, %y1
396  %r = icmp sgt <2 x i8> %xor, %x
397  ret <2 x i1> %r
398}
399
400define <2 x i1> @xor_sgt_fail_no_known_msb(<2 x i8> %x, <2 x i8> %y) {
401; CHECK-LABEL: @xor_sgt_fail_no_known_msb(
402; CHECK-NEXT:    [[YZ:%.*]] = and <2 x i8> [[Y:%.*]], splat (i8 55)
403; CHECK-NEXT:    [[Y1:%.*]] = or disjoint <2 x i8> [[YZ]], splat (i8 8)
404; CHECK-NEXT:    [[XOR:%.*]] = xor <2 x i8> [[X:%.*]], [[Y1]]
405; CHECK-NEXT:    [[R:%.*]] = icmp sgt <2 x i8> [[XOR]], [[X]]
406; CHECK-NEXT:    ret <2 x i1> [[R]]
407;
408  %yz = and <2 x i8> %y, <i8 63, i8 63>
409  %y1 = or <2 x i8> %yz, <i8 8, i8 8>
410  %xor = xor <2 x i8> %x, %y1
411  %r = icmp sgt <2 x i8> %xor, %x
412  ret <2 x i1> %r
413}
414
415define i1 @xor_slt_2(i8 %x, i8 %y, i8 %z) {
416; CHECK-LABEL: @xor_slt_2(
417; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[X:%.*]], 88
418; CHECK-NEXT:    [[R:%.*]] = icmp slt i8 [[X]], [[XOR]]
419; CHECK-NEXT:    ret i1 [[R]]
420;
421  %xor = xor i8 %x, 88
422  %r = icmp slt i8 %x, %xor
423  ret i1 %r
424}
425
426define <2 x i1> @xor_sgt_intmin_2(<2 x i8> %xx, <2 x i8> %yy, <2 x i8> %z) {
427; CHECK-LABEL: @xor_sgt_intmin_2(
428; CHECK-NEXT:    [[X:%.*]] = add <2 x i8> [[XX:%.*]], [[Z:%.*]]
429; CHECK-NEXT:    [[Y:%.*]] = or <2 x i8> [[YY:%.*]], splat (i8 -128)
430; CHECK-NEXT:    [[XOR:%.*]] = xor <2 x i8> [[X]], [[Y]]
431; CHECK-NEXT:    [[R:%.*]] = icmp sgt <2 x i8> [[X]], [[XOR]]
432; CHECK-NEXT:    ret <2 x i1> [[R]]
433;
434  %x = add <2 x i8> %xx, %z
435  %y = or <2 x i8> %yy, <i8 128, i8 128>
436  %xor = xor <2 x i8> %x, %y
437  %r = icmp sgt <2 x i8> %x, %xor
438  ret <2 x i1> %r
439}
440
441define i1 @or_slt_intmin_indirect(i8 %x, i8 %C) {
442; CHECK-LABEL: @or_slt_intmin_indirect(
443; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[C:%.*]], 0
444; CHECK-NEXT:    br i1 [[CMP]], label [[NEG:%.*]], label [[POS:%.*]]
445; CHECK:       common.ret:
446; CHECK-NEXT:    [[COMMON_RET_OP:%.*]] = phi i1 [ [[R:%.*]], [[NEG]] ], [ false, [[POS]] ]
447; CHECK-NEXT:    ret i1 [[COMMON_RET_OP]]
448; CHECK:       neg:
449; CHECK-NEXT:    [[XOR:%.*]] = xor i8 [[C]], [[X:%.*]]
450; CHECK-NEXT:    [[R]] = icmp slt i8 [[XOR]], [[X]]
451; CHECK-NEXT:    br label [[COMMON_RET:%.*]]
452; CHECK:       pos:
453; CHECK-NEXT:    tail call void @barrier()
454; CHECK-NEXT:    br label [[COMMON_RET]]
455;
456  %cmp = icmp slt i8 %C, 0
457  br i1 %cmp, label %neg, label %pos
458common.ret:
459  %common.ret.op = phi i1 [ %r, %neg ], [ false, %pos ]
460  ret i1 %common.ret.op
461neg:
462  %xor = xor i8 %C, %x
463  %r = icmp slt i8 %xor, %x
464  br label %common.ret
465pos:
466  tail call void @barrier()
467  br label %common.ret
468}
469