xref: /llvm-project/llvm/test/CodeGen/SystemZ/int-usub-02.ll (revision 7b3bbd83c0c24087072ec5b22a76799ab31f87d5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
2; Test 64-bit subtraction in which the second operand is variable.
3;
4; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
5
6declare i64 @foo()
7
8; Check SLGR.
9define zeroext i1 @f1(i64 %dummy, i64 %a, i64 %b, ptr %res) {
10; CHECK-LABEL: f1:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    slgr %r3, %r4
13; CHECK-NEXT:    ipm %r0
14; CHECK-NEXT:    afi %r0, -536870912
15; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 33
16; CHECK-NEXT:    stg %r3, 0(%r5)
17; CHECK-NEXT:    br %r14
18  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
19  %val = extractvalue {i64, i1} %t, 0
20  %obit = extractvalue {i64, i1} %t, 1
21  store i64 %val, ptr %res
22  ret i1 %obit
23}
24
25; Check using the overflow result for a branch.
26define void @f2(i64 %dummy, i64 %a, i64 %b, ptr %res) {
27; CHECK-LABEL: f2:
28; CHECK:       # %bb.0:
29; CHECK-NEXT:    slgr %r3, %r4
30; CHECK-NEXT:    stg %r3, 0(%r5)
31; CHECK-NEXT:    jgle foo@PLT
32; CHECK-NEXT:  .LBB1_1: # %exit
33; CHECK-NEXT:    br %r14
34  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
35  %val = extractvalue {i64, i1} %t, 0
36  %obit = extractvalue {i64, i1} %t, 1
37  store i64 %val, ptr %res
38  br i1 %obit, label %call, label %exit
39
40call:
41  tail call i64 @foo()
42  br label %exit
43
44exit:
45  ret void
46}
47
48; ... and the same with the inverted direction.
49define void @f3(i64 %dummy, i64 %a, i64 %b, ptr %res) {
50; CHECK-LABEL: f3:
51; CHECK:       # %bb.0:
52; CHECK-NEXT:    slgr %r3, %r4
53; CHECK-NEXT:    stg %r3, 0(%r5)
54; CHECK-NEXT:    jgnle foo@PLT
55; CHECK-NEXT:  .LBB2_1: # %exit
56; CHECK-NEXT:    br %r14
57  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
58  %val = extractvalue {i64, i1} %t, 0
59  %obit = extractvalue {i64, i1} %t, 1
60  store i64 %val, ptr %res
61  br i1 %obit, label %exit, label %call
62
63call:
64  tail call i64 @foo()
65  br label %exit
66
67exit:
68  ret void
69}
70
71; Check SLG with no displacement.
72define zeroext i1 @f4(i64 %dummy, i64 %a, ptr %src, ptr %res) {
73; CHECK-LABEL: f4:
74; CHECK:       # %bb.0:
75; CHECK-NEXT:    slg %r3, 0(%r4)
76; CHECK-NEXT:    ipm %r0
77; CHECK-NEXT:    afi %r0, -536870912
78; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 33
79; CHECK-NEXT:    stg %r3, 0(%r5)
80; CHECK-NEXT:    br %r14
81  %b = load i64, ptr %src
82  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
83  %val = extractvalue {i64, i1} %t, 0
84  %obit = extractvalue {i64, i1} %t, 1
85  store i64 %val, ptr %res
86  ret i1 %obit
87}
88
89; Check the high end of the aligned SLG range.
90define zeroext i1 @f5(i64 %dummy, i64 %a, ptr %src, ptr %res) {
91; CHECK-LABEL: f5:
92; CHECK:       # %bb.0:
93; CHECK-NEXT:    slg %r3, 524280(%r4)
94; CHECK-NEXT:    ipm %r0
95; CHECK-NEXT:    afi %r0, -536870912
96; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 33
97; CHECK-NEXT:    stg %r3, 0(%r5)
98; CHECK-NEXT:    br %r14
99  %ptr = getelementptr i64, ptr %src, i64 65535
100  %b = load i64, ptr %ptr
101  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
102  %val = extractvalue {i64, i1} %t, 0
103  %obit = extractvalue {i64, i1} %t, 1
104  store i64 %val, ptr %res
105  ret i1 %obit
106}
107
108; Check the next doubleword up, which needs separate address logic.
109; Other sequences besides this one would be OK.
110define zeroext i1 @f6(i64 %dummy, i64 %a, ptr %src, ptr %res) {
111; CHECK-LABEL: f6:
112; CHECK:       # %bb.0:
113; CHECK-NEXT:    agfi %r4, 524288
114; CHECK-NEXT:    slg %r3, 0(%r4)
115; CHECK-NEXT:    ipm %r0
116; CHECK-NEXT:    afi %r0, -536870912
117; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 33
118; CHECK-NEXT:    stg %r3, 0(%r5)
119; CHECK-NEXT:    br %r14
120  %ptr = getelementptr i64, ptr %src, i64 65536
121  %b = load i64, ptr %ptr
122  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
123  %val = extractvalue {i64, i1} %t, 0
124  %obit = extractvalue {i64, i1} %t, 1
125  store i64 %val, ptr %res
126  ret i1 %obit
127}
128
129; Check the high end of the negative aligned SLG range.
130define zeroext i1 @f7(i64 %dummy, i64 %a, ptr %src, ptr %res) {
131; CHECK-LABEL: f7:
132; CHECK:       # %bb.0:
133; CHECK-NEXT:    slg %r3, -8(%r4)
134; CHECK-NEXT:    ipm %r0
135; CHECK-NEXT:    afi %r0, -536870912
136; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 33
137; CHECK-NEXT:    stg %r3, 0(%r5)
138; CHECK-NEXT:    br %r14
139  %ptr = getelementptr i64, ptr %src, i64 -1
140  %b = load i64, ptr %ptr
141  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
142  %val = extractvalue {i64, i1} %t, 0
143  %obit = extractvalue {i64, i1} %t, 1
144  store i64 %val, ptr %res
145  ret i1 %obit
146}
147
148; Check the low end of the SLG range.
149define zeroext i1 @f8(i64 %dummy, i64 %a, ptr %src, ptr %res) {
150; CHECK-LABEL: f8:
151; CHECK:       # %bb.0:
152; CHECK-NEXT:    slg %r3, -524288(%r4)
153; CHECK-NEXT:    ipm %r0
154; CHECK-NEXT:    afi %r0, -536870912
155; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 33
156; CHECK-NEXT:    stg %r3, 0(%r5)
157; CHECK-NEXT:    br %r14
158  %ptr = getelementptr i64, ptr %src, i64 -65536
159  %b = load i64, ptr %ptr
160  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
161  %val = extractvalue {i64, i1} %t, 0
162  %obit = extractvalue {i64, i1} %t, 1
163  store i64 %val, ptr %res
164  ret i1 %obit
165}
166
167; Check the next doubleword down, which needs separate address logic.
168; Other sequences besides this one would be OK.
169define zeroext i1 @f9(i64 %dummy, i64 %a, ptr %src, ptr %res) {
170; CHECK-LABEL: f9:
171; CHECK:       # %bb.0:
172; CHECK-NEXT:    agfi %r4, -524296
173; CHECK-NEXT:    slg %r3, 0(%r4)
174; CHECK-NEXT:    ipm %r0
175; CHECK-NEXT:    afi %r0, -536870912
176; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 33
177; CHECK-NEXT:    stg %r3, 0(%r5)
178; CHECK-NEXT:    br %r14
179  %ptr = getelementptr i64, ptr %src, i64 -65537
180  %b = load i64, ptr %ptr
181  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
182  %val = extractvalue {i64, i1} %t, 0
183  %obit = extractvalue {i64, i1} %t, 1
184  store i64 %val, ptr %res
185  ret i1 %obit
186}
187
188; Check that SLG allows an index.
189define zeroext i1 @f10(i64 %src, i64 %index, i64 %a, ptr %res) {
190; CHECK-LABEL: f10:
191; CHECK:       # %bb.0:
192; CHECK-NEXT:    slg %r4, 524280(%r3,%r2)
193; CHECK-NEXT:    ipm %r0
194; CHECK-NEXT:    afi %r0, -536870912
195; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 33
196; CHECK-NEXT:    stg %r4, 0(%r5)
197; CHECK-NEXT:    br %r14
198  %add1 = add i64 %src, %index
199  %add2 = add i64 %add1, 524280
200  %ptr = inttoptr i64 %add2 to ptr
201  %b = load i64, ptr %ptr
202  %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
203  %val = extractvalue {i64, i1} %t, 0
204  %obit = extractvalue {i64, i1} %t, 1
205  store i64 %val, ptr %res
206  ret i1 %obit
207}
208
209; Check that subtractions of spilled values can use SLG rather than SLGR.
210define zeroext i1 @f11(ptr %ptr0) {
211; CHECK-LABEL: f11:
212; CHECK:       # %bb.0:
213; CHECK-NEXT:    stmg %r6, %r15, 48(%r15)
214; CHECK-NEXT:    .cfi_offset %r6, -112
215; CHECK-NEXT:    .cfi_offset %r7, -104
216; CHECK-NEXT:    .cfi_offset %r8, -96
217; CHECK-NEXT:    .cfi_offset %r9, -88
218; CHECK-NEXT:    .cfi_offset %r10, -80
219; CHECK-NEXT:    .cfi_offset %r11, -72
220; CHECK-NEXT:    .cfi_offset %r12, -64
221; CHECK-NEXT:    .cfi_offset %r13, -56
222; CHECK-NEXT:    .cfi_offset %r14, -48
223; CHECK-NEXT:    .cfi_offset %r15, -40
224; CHECK-NEXT:    aghi %r15, -176
225; CHECK-NEXT:    .cfi_def_cfa_offset 336
226; CHECK-NEXT:    lg %r6, 0(%r2)
227; CHECK-NEXT:    lg %r13, 16(%r2)
228; CHECK-NEXT:    lg %r12, 32(%r2)
229; CHECK-NEXT:    lg %r7, 48(%r2)
230; CHECK-NEXT:    lg %r8, 64(%r2)
231; CHECK-NEXT:    lg %r9, 80(%r2)
232; CHECK-NEXT:    lg %r10, 96(%r2)
233; CHECK-NEXT:    lg %r11, 112(%r2)
234; CHECK-NEXT:    mvc 160(8,%r15), 128(%r2) # 8-byte Folded Spill
235; CHECK-NEXT:    mvc 168(8,%r15), 144(%r2) # 8-byte Folded Spill
236; CHECK-NEXT:    brasl %r14, foo@PLT
237; CHECK-NEXT:    slgr %r2, %r6
238; CHECK-NEXT:    ipm %r0
239; CHECK-NEXT:    afi %r0, -536870912
240; CHECK-NEXT:    srl %r0, 31
241; CHECK-NEXT:    slgr %r2, %r13
242; CHECK-NEXT:    ipm %r1
243; CHECK-NEXT:    afi %r1, -536870912
244; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 33
245; CHECK-NEXT:    slgr %r2, %r12
246; CHECK-NEXT:    ipm %r1
247; CHECK-NEXT:    afi %r1, -536870912
248; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 33
249; CHECK-NEXT:    slgr %r2, %r7
250; CHECK-NEXT:    ipm %r1
251; CHECK-NEXT:    afi %r1, -536870912
252; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 33
253; CHECK-NEXT:    slgr %r2, %r8
254; CHECK-NEXT:    ipm %r1
255; CHECK-NEXT:    afi %r1, -536870912
256; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 33
257; CHECK-NEXT:    slgr %r2, %r9
258; CHECK-NEXT:    ipm %r1
259; CHECK-NEXT:    afi %r1, -536870912
260; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 33
261; CHECK-NEXT:    slgr %r2, %r10
262; CHECK-NEXT:    ipm %r1
263; CHECK-NEXT:    afi %r1, -536870912
264; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 33
265; CHECK-NEXT:    slgr %r2, %r11
266; CHECK-NEXT:    ipm %r1
267; CHECK-NEXT:    afi %r1, -536870912
268; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 33
269; CHECK-NEXT:    slg %r2, 160(%r15) # 8-byte Folded Reload
270; CHECK-NEXT:    ipm %r1
271; CHECK-NEXT:    afi %r1, -536870912
272; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 33
273; CHECK-NEXT:    slg %r2, 168(%r15) # 8-byte Folded Reload
274; CHECK-NEXT:    ipm %r1
275; CHECK-NEXT:    afi %r1, -536870912
276; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 33
277; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 0
278; CHECK-NEXT:    lmg %r6, %r15, 224(%r15)
279; CHECK-NEXT:    br %r14
280  %ptr1 = getelementptr i64, ptr %ptr0, i64 2
281  %ptr2 = getelementptr i64, ptr %ptr0, i64 4
282  %ptr3 = getelementptr i64, ptr %ptr0, i64 6
283  %ptr4 = getelementptr i64, ptr %ptr0, i64 8
284  %ptr5 = getelementptr i64, ptr %ptr0, i64 10
285  %ptr6 = getelementptr i64, ptr %ptr0, i64 12
286  %ptr7 = getelementptr i64, ptr %ptr0, i64 14
287  %ptr8 = getelementptr i64, ptr %ptr0, i64 16
288  %ptr9 = getelementptr i64, ptr %ptr0, i64 18
289
290  %val0 = load i64, ptr %ptr0
291  %val1 = load i64, ptr %ptr1
292  %val2 = load i64, ptr %ptr2
293  %val3 = load i64, ptr %ptr3
294  %val4 = load i64, ptr %ptr4
295  %val5 = load i64, ptr %ptr5
296  %val6 = load i64, ptr %ptr6
297  %val7 = load i64, ptr %ptr7
298  %val8 = load i64, ptr %ptr8
299  %val9 = load i64, ptr %ptr9
300
301  %ret = call i64 @foo()
302
303  %t0 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %ret, i64 %val0)
304  %add0 = extractvalue {i64, i1} %t0, 0
305  %obit0 = extractvalue {i64, i1} %t0, 1
306  %t1 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add0, i64 %val1)
307  %add1 = extractvalue {i64, i1} %t1, 0
308  %obit1 = extractvalue {i64, i1} %t1, 1
309  %res1 = or i1 %obit0, %obit1
310  %t2 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add1, i64 %val2)
311  %add2 = extractvalue {i64, i1} %t2, 0
312  %obit2 = extractvalue {i64, i1} %t2, 1
313  %res2 = or i1 %res1, %obit2
314  %t3 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add2, i64 %val3)
315  %add3 = extractvalue {i64, i1} %t3, 0
316  %obit3 = extractvalue {i64, i1} %t3, 1
317  %res3 = or i1 %res2, %obit3
318  %t4 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add3, i64 %val4)
319  %add4 = extractvalue {i64, i1} %t4, 0
320  %obit4 = extractvalue {i64, i1} %t4, 1
321  %res4 = or i1 %res3, %obit4
322  %t5 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add4, i64 %val5)
323  %add5 = extractvalue {i64, i1} %t5, 0
324  %obit5 = extractvalue {i64, i1} %t5, 1
325  %res5 = or i1 %res4, %obit5
326  %t6 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add5, i64 %val6)
327  %add6 = extractvalue {i64, i1} %t6, 0
328  %obit6 = extractvalue {i64, i1} %t6, 1
329  %res6 = or i1 %res5, %obit6
330  %t7 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add6, i64 %val7)
331  %add7 = extractvalue {i64, i1} %t7, 0
332  %obit7 = extractvalue {i64, i1} %t7, 1
333  %res7 = or i1 %res6, %obit7
334  %t8 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add7, i64 %val8)
335  %add8 = extractvalue {i64, i1} %t8, 0
336  %obit8 = extractvalue {i64, i1} %t8, 1
337  %res8 = or i1 %res7, %obit8
338  %t9 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add8, i64 %val9)
339  %add9 = extractvalue {i64, i1} %t9, 0
340  %obit9 = extractvalue {i64, i1} %t9, 1
341  %res9 = or i1 %res8, %obit9
342
343  ret i1 %res9
344}
345
346declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
347
348