1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -passes=instcombine -S | FileCheck %s
3
4declare void @llvm.assume(i1)
5declare i8 @gen8()
6declare void @use8(i8)
7
8define i1 @t0(i8 %base, i8 %offset) {
9; CHECK-LABEL: @t0(
10; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
11; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
12; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET]]
13; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
14; CHECK-NEXT:    [[RES:%.*]] = icmp ule i8 [[OFFSET]], [[BASE]]
15; CHECK-NEXT:    ret i1 [[RES]]
16;
17  %cmp = icmp slt i8 %offset, 0
18  call void @llvm.assume(i1 %cmp)
19
20  %adjusted = sub i8 %base, %offset
21  call void @use8(i8 %adjusted)
22  %res = icmp ult i8 %adjusted, %base
23  ret i1 %res
24}
25define i1 @t1(i8 %base, i8 %offset) {
26; CHECK-LABEL: @t1(
27; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
28; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
29; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET]]
30; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
31; CHECK-NEXT:    [[RES:%.*]] = icmp ugt i8 [[OFFSET]], [[BASE]]
32; CHECK-NEXT:    ret i1 [[RES]]
33;
34  %cmp = icmp slt i8 %offset, 0
35  call void @llvm.assume(i1 %cmp)
36
37  %adjusted = sub i8 %base, %offset
38  call void @use8(i8 %adjusted)
39  %res = icmp uge i8 %adjusted, %base
40  ret i1 %res
41}
42define i1 @t2(i8 %offset) {
43; CHECK-LABEL: @t2(
44; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
45; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
46; CHECK-NEXT:    [[BASE:%.*]] = call i8 @gen8()
47; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET]]
48; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
49; CHECK-NEXT:    [[RES:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
50; CHECK-NEXT:    ret i1 [[RES]]
51;
52  %cmp = icmp slt i8 %offset, 0
53  call void @llvm.assume(i1 %cmp)
54
55  %base = call i8 @gen8()
56  %adjusted = sub i8 %base, %offset
57  call void @use8(i8 %adjusted)
58  %res = icmp ugt i8 %base, %adjusted
59  ret i1 %res
60}
61define i1 @t3(i8 %offset) {
62; CHECK-LABEL: @t3(
63; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[OFFSET:%.*]], 0
64; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
65; CHECK-NEXT:    [[BASE:%.*]] = call i8 @gen8()
66; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET]]
67; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
68; CHECK-NEXT:    [[RES:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]]
69; CHECK-NEXT:    ret i1 [[RES]]
70;
71  %cmp = icmp slt i8 %offset, 0
72  call void @llvm.assume(i1 %cmp)
73
74  %base = call i8 @gen8()
75  %adjusted = sub i8 %base, %offset
76  call void @use8(i8 %adjusted)
77  %res = icmp ule i8 %base, %adjusted
78  ret i1 %res
79}
80
81; Here we don't know that offset is non-zero. Can't fold.
82define i1 @n4_maybezero(i8 %base, i8 %offset) {
83; CHECK-LABEL: @n4_maybezero(
84; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]]
85; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
86; CHECK-NEXT:    [[RES:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
87; CHECK-NEXT:    ret i1 [[RES]]
88;
89  %adjusted = sub i8 %base, %offset
90  call void @use8(i8 %adjusted)
91  %res = icmp ult i8 %adjusted, %base
92  ret i1 %res
93}
94; We need to know that about %offset, %base won't do. Can't fold.
95define i1 @n5_wrongnonzero(i8 %base, i8 %offset) {
96; CHECK-LABEL: @n5_wrongnonzero(
97; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i8 [[BASE:%.*]], 0
98; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
99; CHECK-NEXT:    [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET:%.*]]
100; CHECK-NEXT:    call void @use8(i8 [[ADJUSTED]])
101; CHECK-NEXT:    [[RES:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
102; CHECK-NEXT:    ret i1 [[RES]]
103;
104  %cmp = icmp sgt i8 %base, 0
105  call void @llvm.assume(i1 %cmp)
106
107  %adjusted = sub i8 %base, %offset
108  call void @use8(i8 %adjusted)
109  %res = icmp ult i8 %adjusted, %base
110  ret i1 %res
111}
112