xref: /llvm-project/llvm/test/Transforms/InstCombine/icmp-uadd-sat.ll (revision 38fffa630ee80163dc65e759392ad29798905679)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
2; RUN: opt < %s -passes=instcombine -S | FileCheck %s
3
4; Tests for InstCombineCompares.cpp::foldICmpUSubSatOrUAddSatWithConstant
5; - uadd_sat case
6
7; ==============================================================================
8; Basic tests with one user
9; ==============================================================================
10define i1 @icmp_eq_basic(i8 %arg) {
11; CHECK-LABEL: define i1 @icmp_eq_basic(
12; CHECK-SAME: i8 [[ARG:%.*]]) {
13; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8 [[ARG]], 3
14; CHECK-NEXT:    ret i1 [[CMP]]
15;
16  %add = call i8 @llvm.uadd.sat.i8(i8 %arg, i8 2)
17  %cmp = icmp eq i8 %add, 5
18  ret i1 %cmp
19}
20
21define i1 @icmp_ne_basic(i16 %arg) {
22; CHECK-LABEL: define i1 @icmp_ne_basic(
23; CHECK-SAME: i16 [[ARG:%.*]]) {
24; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i16 [[ARG]], 1
25; CHECK-NEXT:    ret i1 [[CMP]]
26;
27  %add = call i16 @llvm.uadd.sat.i16(i16 %arg, i16 8)
28  %cmp = icmp ne i16 %add, 9
29  ret i1 %cmp
30}
31
32define i1 @icmp_ule_basic(i32 %arg) {
33; CHECK-LABEL: define i1 @icmp_ule_basic(
34; CHECK-SAME: i32 [[ARG:%.*]]) {
35; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ARG]], 2
36; CHECK-NEXT:    ret i1 [[CMP]]
37;
38  %add = call i32 @llvm.uadd.sat.i32(i32 %arg, i32 2)
39  %cmp = icmp ule i32 %add, 3
40  ret i1 %cmp
41}
42
43define i1 @icmp_ult_basic(i64 %arg) {
44; CHECK-LABEL: define i1 @icmp_ult_basic(
45; CHECK-SAME: i64 [[ARG:%.*]]) {
46; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i64 [[ARG]], 15
47; CHECK-NEXT:    ret i1 [[CMP]]
48;
49  %add = call i64 @llvm.uadd.sat.i64(i64 %arg, i64 5)
50  %cmp = icmp ult i64 %add, 20
51  ret i1 %cmp
52}
53
54define i1 @icmp_uge_basic(i8 %arg) {
55; CHECK-LABEL: define i1 @icmp_uge_basic(
56; CHECK-SAME: i8 [[ARG:%.*]]) {
57; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i8 [[ARG]], 3
58; CHECK-NEXT:    ret i1 [[CMP]]
59;
60  %add = call i8 @llvm.uadd.sat.i8(i8 %arg, i8 4)
61  %cmp = icmp uge i8 %add, 8
62  ret i1 %cmp
63}
64
65define i1 @icmp_ugt_basic(i16 %arg) {
66; CHECK-LABEL: define i1 @icmp_ugt_basic(
67; CHECK-SAME: i16 [[ARG:%.*]]) {
68; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i16 [[ARG]], 2
69; CHECK-NEXT:    ret i1 [[CMP]]
70;
71  %add = call i16 @llvm.uadd.sat.i16(i16 %arg, i16 1)
72  %cmp = icmp ugt i16 %add, 3
73  ret i1 %cmp
74}
75
76define i1 @icmp_sle_basic(i32 %arg) {
77; CHECK-LABEL: define i1 @icmp_sle_basic(
78; CHECK-SAME: i32 [[ARG:%.*]]) {
79; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[ARG]], 2147483637
80; CHECK-NEXT:    ret i1 [[CMP]]
81;
82  %add = call i32 @llvm.uadd.sat.i32(i32 %arg, i32 10)
83  %cmp = icmp sle i32 %add, 8
84  ret i1 %cmp
85}
86
87define i1 @icmp_slt_basic(i64 %arg) {
88; CHECK-LABEL: define i1 @icmp_slt_basic(
89; CHECK-SAME: i64 [[ARG:%.*]]) {
90; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[ARG]], 9223372036854775783
91; CHECK-NEXT:    ret i1 [[CMP]]
92;
93  %add = call i64 @llvm.uadd.sat.i64(i64 %arg, i64 24)
94  %cmp = icmp slt i64 %add, 5
95  ret i1 %cmp
96}
97
98define i1 @icmp_sge_basic(i8 %arg) {
99; CHECK-LABEL: define i1 @icmp_sge_basic(
100; CHECK-SAME: i8 [[ARG:%.*]]) {
101; CHECK-NEXT:    [[TMP1:%.*]] = add i8 [[ARG]], -3
102; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i8 [[TMP1]], 124
103; CHECK-NEXT:    ret i1 [[CMP]]
104;
105  %add = call i8 @llvm.uadd.sat.i8(i8 %arg, i8 1)
106  %cmp = icmp sge i8 %add, 4
107  ret i1 %cmp
108}
109
110define i1 @icmp_sgt_basic(i16 %arg) {
111; CHECK-LABEL: define i1 @icmp_sgt_basic(
112; CHECK-SAME: i16 [[ARG:%.*]]) {
113; CHECK-NEXT:    [[TMP1:%.*]] = add i16 [[ARG]], -4
114; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i16 [[TMP1]], 32762
115; CHECK-NEXT:    ret i1 [[CMP]]
116;
117  %add = call i16 @llvm.uadd.sat.i16(i16 %arg, i16 2)
118  %cmp = icmp sgt i16 %add, 5
119  ret i1 %cmp
120}
121
122; ==============================================================================
123; Tests with more than user
124; ==============================================================================
125define i1 @icmp_eq_multiuse(i8 %arg) {
126; CHECK-LABEL: define i1 @icmp_eq_multiuse(
127; CHECK-SAME: i8 [[ARG:%.*]]) {
128; CHECK-NEXT:    [[ADD:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[ARG]], i8 2)
129; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8 [[ADD]], 5
130; CHECK-NEXT:    call void @use.i8(i8 [[ADD]])
131; CHECK-NEXT:    ret i1 [[CMP]]
132;
133  %add = call i8 @llvm.uadd.sat.i8(i8 %arg, i8 2)
134  %cmp = icmp eq i8 %add, 5
135  call void @use.i8(i8 %add)
136  ret i1 %cmp
137}
138
139; ==============================================================================
140; Tests with vector types
141; ==============================================================================
142define <2 x i1> @icmp_eq_vector_equal(<2 x i8> %arg) {
143; CHECK-LABEL: define <2 x i1> @icmp_eq_vector_equal(
144; CHECK-SAME: <2 x i8> [[ARG:%.*]]) {
145; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i8> [[ARG]], splat (i8 3)
146; CHECK-NEXT:    ret <2 x i1> [[CMP]]
147;
148  %add = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %arg, <2 x i8> <i8 2, i8 2>)
149  %cmp = icmp eq <2 x i8> %add, <i8 5, i8 5>
150  ret <2 x i1> %cmp
151}
152
153define <2 x i1> @icmp_eq_vector_unequal(<2 x i8> %arg) {
154; CHECK-LABEL: define <2 x i1> @icmp_eq_vector_unequal(
155; CHECK-SAME: <2 x i8> [[ARG:%.*]]) {
156; CHECK-NEXT:    [[ADD:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[ARG]], <2 x i8> <i8 1, i8 2>)
157; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i8> [[ADD]], <i8 5, i8 6>
158; CHECK-NEXT:    ret <2 x i1> [[CMP]]
159;
160  %add = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %arg, <2 x i8> <i8 1, i8 2>)
161  %cmp = icmp eq <2 x i8> %add, <i8 5, i8 6>
162  ret <2 x i1> %cmp
163}
164
165define <2 x i1> @icmp_ne_vector_equal(<2 x i16> %arg) {
166; CHECK-LABEL: define <2 x i1> @icmp_ne_vector_equal(
167; CHECK-SAME: <2 x i16> [[ARG:%.*]]) {
168; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i16> [[ARG]], splat (i16 2)
169; CHECK-NEXT:    ret <2 x i1> [[CMP]]
170;
171  %add = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %arg, <2 x i16> <i16 3, i16 3>)
172  %cmp = icmp ne <2 x i16> %add, <i16 5, i16 5>
173  ret <2 x i1> %cmp
174}
175
176define <2 x i1> @icmp_ne_vector_unequal(<2 x i16> %arg) {
177; CHECK-LABEL: define <2 x i1> @icmp_ne_vector_unequal(
178; CHECK-SAME: <2 x i16> [[ARG:%.*]]) {
179; CHECK-NEXT:    [[ADD:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[ARG]], <2 x i16> <i16 3, i16 33>)
180; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i16> [[ADD]], <i16 7, i16 6>
181; CHECK-NEXT:    ret <2 x i1> [[CMP]]
182;
183  %add = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %arg, <2 x i16> <i16 3, i16 33>)
184  %cmp = icmp ne <2 x i16> %add, <i16 7, i16 6>
185  ret <2 x i1> %cmp
186}
187
188define <2 x i1> @icmp_ule_vector_equal(<2 x i32> %arg) {
189; CHECK-LABEL: define <2 x i1> @icmp_ule_vector_equal(
190; CHECK-SAME: <2 x i32> [[ARG:%.*]]) {
191; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i32> [[ARG]], splat (i32 2)
192; CHECK-NEXT:    ret <2 x i1> [[CMP]]
193;
194  %add = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %arg, <2 x i32> <i32 3, i32 3>)
195  %cmp = icmp ult <2 x i32> %add, <i32 5, i32 5>
196  ret <2 x i1> %cmp
197}
198
199define <2 x i1> @icmp_ule_vector_unequal(<2 x i32> %arg) {
200; CHECK-LABEL: define <2 x i1> @icmp_ule_vector_unequal(
201; CHECK-SAME: <2 x i32> [[ARG:%.*]]) {
202; CHECK-NEXT:    [[ADD:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[ARG]], <2 x i32> <i32 3, i32 35>)
203; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i32> [[ADD]], <i32 5, i32 7>
204; CHECK-NEXT:    ret <2 x i1> [[CMP]]
205;
206  %add = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %arg, <2 x i32> <i32 3, i32 35>)
207  %cmp = icmp ult <2 x i32> %add, <i32 5, i32 7>
208  ret <2 x i1> %cmp
209}
210
211define <2 x i1> @icmp_sgt_vector_equal(<2 x i64> %arg) {
212; CHECK-LABEL: define <2 x i1> @icmp_sgt_vector_equal(
213; CHECK-SAME: <2 x i64> [[ARG:%.*]]) {
214; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i64> [[ARG]], splat (i64 9223372036854366185)
215; CHECK-NEXT:    ret <2 x i1> [[CMP]]
216;
217  %add = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %arg, <2 x i64> <i64 409623, i64 409623>)
218  %cmp = icmp sgt <2 x i64> %add, <i64 1234, i64 1234>
219  ret <2 x i1> %cmp
220}
221
222define <2 x i1> @icmp_sgt_vector_unequal(<2 x i64> %arg) {
223; CHECK-LABEL: define <2 x i1> @icmp_sgt_vector_unequal(
224; CHECK-SAME: <2 x i64> [[ARG:%.*]]) {
225; CHECK-NEXT:    [[ADD:%.*]] = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> [[ARG]], <2 x i64> <i64 320498, i64 409623>)
226; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt <2 x i64> [[ADD]], <i64 1234, i64 3456>
227; CHECK-NEXT:    ret <2 x i1> [[CMP]]
228;
229  %add = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %arg, <2 x i64> <i64 320498, i64 409623>)
230  %cmp = icmp sgt <2 x i64> %add, <i64 1234, i64 3456>
231  ret <2 x i1> %cmp
232}
233
234; ==============================================================================
235; Tests with vector types and multiple uses
236; ==============================================================================
237define <2 x i1> @icmp_eq_vector_multiuse_equal(<2 x i8> %arg) {
238; CHECK-LABEL: define <2 x i1> @icmp_eq_vector_multiuse_equal(
239; CHECK-SAME: <2 x i8> [[ARG:%.*]]) {
240; CHECK-NEXT:    [[ADD:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[ARG]], <2 x i8> splat (i8 2))
241; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i8> [[ADD]], splat (i8 5)
242; CHECK-NEXT:    call void @use.v2i8(<2 x i8> [[ADD]])
243; CHECK-NEXT:    ret <2 x i1> [[CMP]]
244;
245  %add = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %arg, <2 x i8> <i8 2, i8 2>)
246  %cmp = icmp eq <2 x i8> %add, <i8 5, i8 5>
247  call void @use.v2i8(<2 x i8> %add)
248  ret <2 x i1> %cmp
249}
250
251declare i8 @llvm.uadd.sat.i8(i8, i8)
252declare i16 @llvm.uadd.sat.i16(i16, i16)
253declare i32 @llvm.uadd.sat.i32(i32, i32)
254declare i64 @llvm.uadd.sat.i64(i64, i64)
255
256declare <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64>, <2 x i64>)
257declare <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32>, <2 x i32>)
258declare <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16>, <2 x i16>)
259declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>)
260
261declare void @use.i8(i8)
262declare void @use.v2i8(<2 x i8>)
263