xref: /llvm-project/llvm/test/CodeGen/AArch64/aarch64-saturating-arithmetic.ll (revision 632022e61c544280685763333685b4743f105f2e)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2; RUN: llc -mtriple=aarch64-linux-gnu -O2 -o - %s | FileCheck %s
3
4define i64 @test_ssub_nonneg_rhs(i64 %x) {
5; CHECK-LABEL: test_ssub_nonneg_rhs:
6; CHECK:       // %bb.0:
7; CHECK-NEXT:    mov x8, #-9223372036854775808 // =0x8000000000000000
8; CHECK-NEXT:    subs x9, x0, #1
9; CHECK-NEXT:    csel x0, x8, x9, vs
10; CHECK-NEXT:    ret
11  %sat = call i64 @llvm.ssub.sat.i64(i64 %x, i64 1)
12  ret i64 %sat
13}
14
15define i64 @test_ssub_neg_rhs(i64 %x) {
16; CHECK-LABEL: test_ssub_neg_rhs:
17; CHECK:       // %bb.0:
18; CHECK-NEXT:    mov x8, #9223372036854775807 // =0x7fffffffffffffff
19; CHECK-NEXT:    adds x9, x0, #1
20; CHECK-NEXT:    csel x0, x8, x9, vs
21; CHECK-NEXT:    ret
22  %sat = call i64 @llvm.ssub.sat.i64(i64 %x, i64 -1)
23  ret i64 %sat
24}
25
26define i64 @test_sadd_nonneg_rhs(i64 %x) {
27; CHECK-LABEL: test_sadd_nonneg_rhs:
28; CHECK:       // %bb.0:
29; CHECK-NEXT:    mov x8, #9223372036854775807 // =0x7fffffffffffffff
30; CHECK-NEXT:    adds x9, x0, #1
31; CHECK-NEXT:    csel x0, x8, x9, vs
32; CHECK-NEXT:    ret
33  %sat = call i64 @llvm.sadd.sat.i64(i64 %x, i64 1)
34  ret i64 %sat
35}
36
37
38define i64 @test_sadd_neg_rhs(i64 %x) {
39; CHECK-LABEL: test_sadd_neg_rhs:
40; CHECK:       // %bb.0:
41; CHECK-NEXT:    mov x8, #-9223372036854775808 // =0x8000000000000000
42; CHECK-NEXT:    subs x9, x0, #1
43; CHECK-NEXT:    csel x0, x8, x9, vs
44; CHECK-NEXT:    ret
45  %sat = call i64 @llvm.sadd.sat.i64(i64 %x, i64 -1)
46  ret i64 %sat
47}
48
49define i64 @test_ssub_nonneg_lhs(i64 %x) {
50; CHECK-LABEL: test_ssub_nonneg_lhs:
51; CHECK:       // %bb.0:
52; CHECK-NEXT:    mov w8, #1 // =0x1
53; CHECK-NEXT:    mov x9, #9223372036854775807 // =0x7fffffffffffffff
54; CHECK-NEXT:    subs x8, x8, x0
55; CHECK-NEXT:    csel x0, x9, x8, vs
56; CHECK-NEXT:    ret
57  %sat = call i64 @llvm.ssub.sat.i64(i64 1, i64 %x)
58  ret i64 %sat
59}
60
61define i64 @test_ssub_neg_lhs(i64 %x) {
62; CHECK-LABEL: test_ssub_neg_lhs:
63; CHECK:       // %bb.0:
64; CHECK-NEXT:    mvn x0, x0
65; CHECK-NEXT:    ret
66  %sat = call i64 @llvm.ssub.sat.i64(i64 -1, i64 %x)
67  ret i64 %sat
68}
69
70define i64 @test_sadd_nonneg_lhs(i64 %x) {
71; CHECK-LABEL: test_sadd_nonneg_lhs:
72; CHECK:       // %bb.0:
73; CHECK-NEXT:    mov x8, #9223372036854775807 // =0x7fffffffffffffff
74; CHECK-NEXT:    adds x9, x0, #1
75; CHECK-NEXT:    csel x0, x8, x9, vs
76; CHECK-NEXT:    ret
77  %sat = call i64 @llvm.sadd.sat.i64(i64 1, i64 %x)
78  ret i64 %sat
79}
80
81define i64 @test_sadd_neg_lhs(i64 %x) {
82; CHECK-LABEL: test_sadd_neg_lhs:
83; CHECK:       // %bb.0:
84; CHECK-NEXT:    mov x8, #-9223372036854775808 // =0x8000000000000000
85; CHECK-NEXT:    subs x9, x0, #1
86; CHECK-NEXT:    csel x0, x8, x9, vs
87; CHECK-NEXT:    ret
88  %sat = call i64 @llvm.sadd.sat.i64(i64 -1, i64 %x)
89  ret i64 %sat
90}
91
92define i64 @test_ssub_nonneg_rhs_nonconst(i64 %x) {
93; CHECK-LABEL: test_ssub_nonneg_rhs_nonconst:
94; CHECK:       // %bb.0:
95; CHECK-NEXT:    mov w8, #123 // =0x7b
96; CHECK-NEXT:    mov x9, #-9223372036854775808 // =0x8000000000000000
97; CHECK-NEXT:    and x8, x0, x8
98; CHECK-NEXT:    subs x8, x0, x8
99; CHECK-NEXT:    csel x0, x9, x8, vs
100; CHECK-NEXT:    ret
101  %y = and i64 %x, 123
102  %sat = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %y)
103  ret i64 %sat
104}
105
106define i64 @test_ssub_neg_rhs_nonconst(i64 %x) {
107; CHECK-LABEL: test_ssub_neg_rhs_nonconst:
108; CHECK:       // %bb.0:
109; CHECK-NEXT:    cmn x0, #1
110; CHECK-NEXT:    mov x8, #9223372036854775807 // =0x7fffffffffffffff
111; CHECK-NEXT:    csinv x9, x0, xzr, lt
112; CHECK-NEXT:    subs x9, x0, x9
113; CHECK-NEXT:    csel x0, x8, x9, vs
114; CHECK-NEXT:    ret
115  %y = call i64 @llvm.smin(i64 %x, i64 -1)
116  %sat = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %y)
117  ret i64 %sat
118}
119
120define i64 @test_sadd_nonneg_rhs_nonconst(i64 %x) {
121; CHECK-LABEL: test_sadd_nonneg_rhs_nonconst:
122; CHECK:       // %bb.0:
123; CHECK-NEXT:    cmp x0, #1
124; CHECK-NEXT:    mov x8, #9223372036854775807 // =0x7fffffffffffffff
125; CHECK-NEXT:    csinc x9, x0, xzr, gt
126; CHECK-NEXT:    adds x9, x0, x9
127; CHECK-NEXT:    csel x0, x8, x9, vs
128; CHECK-NEXT:    ret
129  %y = call i64 @llvm.smax(i64 %x, i64 1)
130  %sat = call i64 @llvm.sadd.sat.i64(i64 %x, i64 %y)
131  ret i64 %sat
132}
133
134
135define i64 @test_sadd_neg_rhs_nonconst(i64 %x) {
136; CHECK-LABEL: test_sadd_neg_rhs_nonconst:
137; CHECK:       // %bb.0:
138; CHECK-NEXT:    orr x9, x0, #0x8000000000000000
139; CHECK-NEXT:    mov x8, #-9223372036854775808 // =0x8000000000000000
140; CHECK-NEXT:    adds x9, x0, x9
141; CHECK-NEXT:    csel x0, x8, x9, vs
142; CHECK-NEXT:    ret
143  %y = or i64 %x, u0x8000000000000000
144  %sat = call i64 @llvm.sadd.sat.i64(i64 %x, i64 %y)
145  ret i64 %sat
146}
147
148define i64 @test_ssub_nonneg_lhs_nonconst(i64 %x) {
149; CHECK-LABEL: test_ssub_nonneg_lhs_nonconst:
150; CHECK:       // %bb.0:
151; CHECK-NEXT:    mov w8, #123 // =0x7b
152; CHECK-NEXT:    mov x9, #9223372036854775807 // =0x7fffffffffffffff
153; CHECK-NEXT:    and x8, x0, x8
154; CHECK-NEXT:    subs x8, x8, x0
155; CHECK-NEXT:    csel x0, x9, x8, vs
156; CHECK-NEXT:    ret
157  %y = and i64 %x, 123
158  %sat = call i64 @llvm.ssub.sat.i64(i64 %y, i64 %x)
159  ret i64 %sat
160}
161
162define i64 @test_ssub_neg_lhs_nonconst(i64 %x) {
163; CHECK-LABEL: test_ssub_neg_lhs_nonconst:
164; CHECK:       // %bb.0:
165; CHECK-NEXT:    cmn x0, #1
166; CHECK-NEXT:    mov x8, #-9223372036854775808 // =0x8000000000000000
167; CHECK-NEXT:    csinv x9, x0, xzr, lt
168; CHECK-NEXT:    subs x9, x9, x0
169; CHECK-NEXT:    csel x0, x8, x9, vs
170; CHECK-NEXT:    ret
171  %y = call i64 @llvm.smin(i64 %x, i64 -1)
172  %sat = call i64 @llvm.ssub.sat.i64(i64 %y, i64 %x)
173  ret i64 %sat
174}
175
176define i64 @test_sadd_nonneg_lhs_nonconst(i64 %x) {
177; CHECK-LABEL: test_sadd_nonneg_lhs_nonconst:
178; CHECK:       // %bb.0:
179; CHECK-NEXT:    cmp x0, #1
180; CHECK-NEXT:    mov x8, #9223372036854775807 // =0x7fffffffffffffff
181; CHECK-NEXT:    csinc x9, x0, xzr, gt
182; CHECK-NEXT:    adds x9, x9, x0
183; CHECK-NEXT:    csel x0, x8, x9, vs
184; CHECK-NEXT:    ret
185  %y = call i64 @llvm.smax(i64 %x, i64 1)
186  %sat = call i64 @llvm.sadd.sat.i64(i64 %y, i64 %x)
187  ret i64 %sat
188}
189
190define i64 @test_sadd_neg_lhs_nonconst(i64 %x) {
191; CHECK-LABEL: test_sadd_neg_lhs_nonconst:
192; CHECK:       // %bb.0:
193; CHECK-NEXT:    orr x9, x0, #0x8000000000000000
194; CHECK-NEXT:    mov x8, #-9223372036854775808 // =0x8000000000000000
195; CHECK-NEXT:    adds x9, x9, x0
196; CHECK-NEXT:    csel x0, x8, x9, vs
197; CHECK-NEXT:    ret
198  %y = or i64 %x, u0x8000000000000000
199  %sat = call i64 @llvm.sadd.sat.i64(i64 %y, i64 %x)
200  ret i64 %sat
201}
202
203declare i64 @llvm.sadd.sat.i64(i64, i64)
204declare i64 @llvm.ssub.sat.i64(i64, i64)
205declare i64 @llvm.smax(i64, i64)
206declare i64 @llvm.smin(i64, i64)
207