xref: /llvm-project/llvm/test/Transforms/InstCombine/uadd-with-overflow.ll (revision 930341ba3066fdac4040b13f3404f8e02eae7afb)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S | FileCheck %s
3
4declare { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32>, <2 x i32>)
5
6declare { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8>, <2 x i8>)
7
8declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32)
9
10declare { i8, i1 } @llvm.uadd.with.overflow.i8(i8, i8)
11
12define { i32, i1 } @simple_fold(i32 %x) {
13; CHECK-LABEL: @simple_fold(
14; CHECK-NEXT:    [[A:%.*]] = add nuw i32 [[X:%.*]], 7
15; CHECK-NEXT:    [[B:%.*]] = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A]], i32 13)
16; CHECK-NEXT:    ret { i32, i1 } [[B]]
17;
18  %a = add nuw i32 %x, 7
19  %b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 13)
20  ret { i32, i1 } %b
21}
22
23define { i8, i1 } @fold_on_constant_add_no_overflow(i8 %x) {
24; CHECK-LABEL: @fold_on_constant_add_no_overflow(
25; CHECK-NEXT:    [[A:%.*]] = add nuw i8 [[X:%.*]], -56
26; CHECK-NEXT:    [[B:%.*]] = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 [[A]], i8 55)
27; CHECK-NEXT:    ret { i8, i1 } [[B]]
28;
29  %a = add nuw i8 %x, 200
30  %b = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %a, i8 55)
31  ret { i8, i1 } %b
32}
33
34define { i8, i1 } @no_fold_on_constant_add_overflow(i8 %x) {
35; CHECK-LABEL: @no_fold_on_constant_add_overflow(
36; CHECK-NEXT:    [[A:%.*]] = add nuw i8 [[X:%.*]], -56
37; CHECK-NEXT:    [[B:%.*]] = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 [[A]], i8 56)
38; CHECK-NEXT:    ret { i8, i1 } [[B]]
39;
40  %a = add nuw i8 %x, 200
41  %b = tail call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %a, i8 56)
42  ret { i8, i1 } %b
43}
44
45define { <2 x i8>, <2 x i1> } @no_fold_vector_no_overflow(<2 x i8> %x) {
46; CHECK-LABEL: @no_fold_vector_no_overflow(
47; CHECK-NEXT:    [[A:%.*]] = add nuw <2 x i8> [[X:%.*]], <i8 -57, i8 -56>
48; CHECK-NEXT:    [[B:%.*]] = tail call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8> [[A]], <2 x i8> <i8 55, i8 55>)
49; CHECK-NEXT:    ret { <2 x i8>, <2 x i1> } [[B]]
50;
51  %a = add nuw <2 x i8> %x, <i8 199, i8 200>
52  %b = tail call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8> %a, <2 x i8> <i8 55, i8 55>)
53  ret { <2 x i8>, <2 x i1> } %b
54}
55
56define { <2 x i8>, <2 x i1> } @no_fold_vector_overflow(<2 x i8> %x) {
57; CHECK-LABEL: @no_fold_vector_overflow(
58; CHECK-NEXT:    [[A:%.*]] = add nuw <2 x i8> [[X:%.*]], <i8 -56, i8 -55>
59; CHECK-NEXT:    [[B:%.*]] = tail call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8> [[A]], <2 x i8> <i8 55, i8 55>)
60; CHECK-NEXT:    ret { <2 x i8>, <2 x i1> } [[B]]
61;
62  %a = add nuw <2 x i8> %x, <i8 200, i8 201>
63  %b = tail call { <2 x i8>, <2 x i1> } @llvm.uadd.with.overflow.v2i8(<2 x i8> %a, <2 x i8> <i8 55, i8 55>)
64  ret { <2 x i8>, <2 x i1> } %b
65}
66
67define { <2 x i32>, <2 x i1> } @fold_simple_splat_constant(<2 x i32> %x) {
68; CHECK-LABEL: @fold_simple_splat_constant(
69; CHECK-NEXT:    [[A:%.*]] = add nuw <2 x i32> [[X:%.*]], <i32 12, i32 12>
70; CHECK-NEXT:    [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> <i32 30, i32 30>)
71; CHECK-NEXT:    ret { <2 x i32>, <2 x i1> } [[B]]
72;
73  %a = add nuw <2 x i32> %x, <i32 12, i32 12>
74  %b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>)
75  ret { <2 x i32>, <2 x i1> } %b
76}
77
78define { <2 x i32>, <2 x i1> } @no_fold_splat_undef_constant(<2 x i32> %x) {
79; CHECK-LABEL: @no_fold_splat_undef_constant(
80; CHECK-NEXT:    [[A:%.*]] = add nuw <2 x i32> [[X:%.*]], <i32 12, i32 undef>
81; CHECK-NEXT:    [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> <i32 30, i32 30>)
82; CHECK-NEXT:    ret { <2 x i32>, <2 x i1> } [[B]]
83;
84  %a = add nuw <2 x i32> %x, <i32 12, i32 undef>
85  %b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>)
86  ret { <2 x i32>, <2 x i1> } %b
87}
88
89define { <2 x i32>, <2 x i1> } @no_fold_splat_not_constant(<2 x i32> %x, <2 x i32> %y) {
90; CHECK-LABEL: @no_fold_splat_not_constant(
91; CHECK-NEXT:    [[A:%.*]] = add nuw <2 x i32> [[X:%.*]], [[Y:%.*]]
92; CHECK-NEXT:    [[B:%.*]] = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[A]], <2 x i32> <i32 30, i32 30>)
93; CHECK-NEXT:    ret { <2 x i32>, <2 x i1> } [[B]]
94;
95  %a = add nuw <2 x i32> %x, %y
96  %b = tail call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> %a, <2 x i32> <i32 30, i32 30>)
97  ret { <2 x i32>, <2 x i1> } %b
98}
99
100define { i32, i1 } @fold_nuwnsw(i32 %x) {
101; CHECK-LABEL: @fold_nuwnsw(
102; CHECK-NEXT:    [[A:%.*]] = add nuw nsw i32 [[X:%.*]], 12
103; CHECK-NEXT:    [[B:%.*]] = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A]], i32 30)
104; CHECK-NEXT:    ret { i32, i1 } [[B]]
105;
106  %a = add nuw nsw i32 %x, 12
107  %b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 30)
108  ret { i32, i1 } %b
109}
110
111define { i32, i1 } @no_fold_nsw(i32 %x) {
112; CHECK-LABEL: @no_fold_nsw(
113; CHECK-NEXT:    [[A:%.*]] = add nsw i32 [[X:%.*]], 12
114; CHECK-NEXT:    [[B:%.*]] = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A]], i32 30)
115; CHECK-NEXT:    ret { i32, i1 } [[B]]
116;
117  %a = add nsw i32 %x, 12
118  %b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %a, i32 30)
119  ret { i32, i1 } %b
120}
121
122define { i32, i1 } @no_fold_wrapped_add(i32 %x) {
123; CHECK-LABEL: @no_fold_wrapped_add(
124; CHECK-NEXT:    [[A:%.*]] = add i32 [[X:%.*]], 12
125; CHECK-NEXT:    [[B:%.*]] = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A]], i32 30)
126; CHECK-NEXT:    ret { i32, i1 } [[B]]
127;
128  %a = add i32 %x, 12
129  %b = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 30, i32 %a)
130  ret { i32, i1 } %b
131}
132