xref: /llvm-project/llvm/test/Transforms/CodeGenPrepare/SPARC/overflow-intrinsics.ll (revision f1ec0d12bb0843f0deab83ef2b5cf1339cbc4f0b)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -passes='require<profile-summary>,function(codegenprepare)' -S < %s | FileCheck %s
3; RUN: opt -enable-debugify -passes='require<profile-summary>,function(codegenprepare)' -S < %s 2>&1 | FileCheck %s -check-prefix=DEBUG
4
5; Subset of tests from llvm/tests/Transforms/CodeGenPrepare/X86/overflow-intrinsics.ll
6; to test shouldFormOverflowOp on SPARC, where it is not profitable to create
7; overflow intrinsics if the math part is not used.
8
9target triple = "sparc64-unknown-linux"
10
11define i64 @uaddo1_overflow_used(i64 %a, i64 %b) nounwind ssp {
12; CHECK-LABEL: @uaddo1_overflow_used(
13; CHECK-NEXT:    [[ADD:%.*]] = add i64 [[B:%.*]], [[A:%.*]]
14; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i64 [[ADD]], [[A]]
15; CHECK-NEXT:    [[Q:%.*]] = select i1 [[CMP]], i64 [[B]], i64 42
16; CHECK-NEXT:    ret i64 [[Q]]
17;
18  %add = add i64 %b, %a
19  %cmp = icmp ult i64 %add, %a
20  %Q = select i1 %cmp, i64 %b, i64 42
21  ret i64 %Q
22}
23
24define i64 @uaddo1_math_overflow_used(i64 %a, i64 %b, ptr %res) nounwind ssp {
25; CHECK-LABEL: @uaddo1_math_overflow_used(
26; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
27; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
28; CHECK-NEXT:    [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
29; CHECK-NEXT:    [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
30; CHECK-NEXT:    store i64 [[MATH]], ptr [[RES:%.*]]
31; CHECK-NEXT:    ret i64 [[Q]]
32;
33  %add = add i64 %b, %a
34  %cmp = icmp ult i64 %add, %a
35  %Q = select i1 %cmp, i64 %b, i64 42
36  store i64 %add, ptr %res
37  ret i64 %Q
38}
39
40define i64 @uaddo2_overflow_used(i64 %a, i64 %b) nounwind ssp {
41; CHECK-LABEL: @uaddo2_overflow_used(
42; CHECK-NEXT:    [[ADD:%.*]] = add i64 [[B:%.*]], [[A:%.*]]
43; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i64 [[ADD]], [[B]]
44; CHECK-NEXT:    [[Q:%.*]] = select i1 [[CMP]], i64 [[B]], i64 42
45; CHECK-NEXT:    ret i64 [[Q]]
46;
47  %add = add i64 %b, %a
48  %cmp = icmp ult i64 %add, %b
49  %Q = select i1 %cmp, i64 %b, i64 42
50  ret i64 %Q
51}
52
53define i64 @uaddo2_math_overflow_used(i64 %a, i64 %b, ptr %res) nounwind ssp {
54; CHECK-LABEL: @uaddo2_math_overflow_used(
55; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
56; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
57; CHECK-NEXT:    [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
58; CHECK-NEXT:    [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
59; CHECK-NEXT:    store i64 [[MATH]], ptr [[RES:%.*]]
60; CHECK-NEXT:    ret i64 [[Q]]
61;
62  %add = add i64 %b, %a
63  %cmp = icmp ult i64 %add, %b
64  %Q = select i1 %cmp, i64 %b, i64 42
65  store i64 %add, ptr %res
66  ret i64 %Q
67}
68
69define i64 @uaddo3_overflow_used(i64 %a, i64 %b) nounwind ssp {
70; CHECK-LABEL: @uaddo3_overflow_used(
71; CHECK-NEXT:    [[ADD:%.*]] = add i64 [[B:%.*]], [[A:%.*]]
72; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i64 [[B]], [[ADD]]
73; CHECK-NEXT:    [[Q:%.*]] = select i1 [[CMP]], i64 [[B]], i64 42
74; CHECK-NEXT:    ret i64 [[Q]]
75;
76  %add = add i64 %b, %a
77  %cmp = icmp ugt i64 %b, %add
78  %Q = select i1 %cmp, i64 %b, i64 42
79  ret i64 %Q
80}
81
82define i64 @uaddo3_math_overflow_used(i64 %a, i64 %b, ptr %res) nounwind ssp {
83; CHECK-LABEL: @uaddo3_math_overflow_used(
84; CHECK-NEXT:    [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
85; CHECK-NEXT:    [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
86; CHECK-NEXT:    [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
87; CHECK-NEXT:    [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
88; CHECK-NEXT:    store i64 [[MATH]], ptr [[RES:%.*]]
89; CHECK-NEXT:    ret i64 [[Q]]
90;
91  %add = add i64 %b, %a
92  %cmp = icmp ugt i64 %b, %add
93  %Q = select i1 %cmp, i64 %b, i64 42
94  store i64 %add, ptr %res
95  ret i64 %Q
96}
97
98define i1 @usubo_ult_i64_overflow_used(i64 %x, i64 %y, ptr %p) {
99; CHECK-LABEL: @usubo_ult_i64_overflow_used(
100; CHECK-NEXT:    [[S:%.*]] = sub i64 [[X:%.*]], [[Y:%.*]]
101; CHECK-NEXT:    [[OV:%.*]] = icmp ult i64 [[X]], [[Y]]
102; CHECK-NEXT:    ret i1 [[OV]]
103;
104  %s = sub i64 %x, %y
105  %ov = icmp ult i64 %x, %y
106  ret i1 %ov
107}
108
109define i1 @usubo_ult_i64_math_overflow_used(i64 %x, i64 %y, ptr %p) {
110; CHECK-LABEL: @usubo_ult_i64_math_overflow_used(
111; CHECK-NEXT:    [[S:%.*]] = sub i64 [[X:%.*]], [[Y:%.*]]
112; CHECK-NEXT:    store i64 [[S]], ptr [[P:%.*]]
113; CHECK-NEXT:    [[OV:%.*]] = icmp ult i64 [[X]], [[Y]]
114; CHECK-NEXT:    ret i1 [[OV]]
115;
116  %s = sub i64 %x, %y
117  store i64 %s, ptr %p
118  %ov = icmp ult i64 %x, %y
119  ret i1 %ov
120}
121
122; Check that every instruction inserted by -passes='require<profile-summary>,function(codegenprepare)' has a debug location.
123; DEBUG: CheckModuleDebugify: PASS
124