xref: /llvm-project/llvm/test/CodeGen/SystemZ/int-uadd-01.ll (revision 7b3bbd83c0c24087072ec5b22a76799ab31f87d5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
2; Test 32-bit addition in which the second operand is variable.
3;
4; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
5
6declare i32 @foo()
7
8; Check ALR.
9define zeroext i1 @f1(i32 %dummy, i32 %a, i32 %b, ptr %res) {
10; CHECK-LABEL: f1:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    alr %r3, %r4
13; CHECK-NEXT:    ipm %r0
14; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 35
15; CHECK-NEXT:    st %r3, 0(%r5)
16; CHECK-NEXT:    br %r14
17  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
18  %val = extractvalue {i32, i1} %t, 0
19  %obit = extractvalue {i32, i1} %t, 1
20  store i32 %val, ptr %res
21  ret i1 %obit
22}
23
24; Check using the overflow result for a branch.
25define void @f2(i32 %dummy, i32 %a, i32 %b, ptr %res) {
26; CHECK-LABEL: f2:
27; CHECK:       # %bb.0:
28; CHECK-NEXT:    alr %r3, %r4
29; CHECK-NEXT:    st %r3, 0(%r5)
30; CHECK-NEXT:    jgnle foo@PLT
31; CHECK-NEXT:  .LBB1_1: # %exit
32; CHECK-NEXT:    br %r14
33  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
34  %val = extractvalue {i32, i1} %t, 0
35  %obit = extractvalue {i32, i1} %t, 1
36  store i32 %val, ptr %res
37  br i1 %obit, label %call, label %exit
38
39call:
40  tail call i32 @foo()
41  br label %exit
42
43exit:
44  ret void
45}
46
47; ... and the same with the inverted direction.
48define void @f3(i32 %dummy, i32 %a, i32 %b, ptr %res) {
49; CHECK-LABEL: f3:
50; CHECK:       # %bb.0:
51; CHECK-NEXT:    alr %r3, %r4
52; CHECK-NEXT:    st %r3, 0(%r5)
53; CHECK-NEXT:    jgle foo@PLT
54; CHECK-NEXT:  .LBB2_1: # %exit
55; CHECK-NEXT:    br %r14
56  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
57  %val = extractvalue {i32, i1} %t, 0
58  %obit = extractvalue {i32, i1} %t, 1
59  store i32 %val, ptr %res
60  br i1 %obit, label %exit, label %call
61
62call:
63  tail call i32 @foo()
64  br label %exit
65
66exit:
67  ret void
68}
69
70; Check the low end of the AL range.
71define zeroext i1 @f4(i32 %dummy, i32 %a, ptr %src, ptr %res) {
72; CHECK-LABEL: f4:
73; CHECK:       # %bb.0:
74; CHECK-NEXT:    al %r3, 0(%r4)
75; CHECK-NEXT:    ipm %r0
76; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 35
77; CHECK-NEXT:    st %r3, 0(%r5)
78; CHECK-NEXT:    br %r14
79  %b = load i32, ptr %src
80  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
81  %val = extractvalue {i32, i1} %t, 0
82  %obit = extractvalue {i32, i1} %t, 1
83  store i32 %val, ptr %res
84  ret i1 %obit
85}
86
87; Check the high end of the aligned AL range.
88define zeroext i1 @f5(i32 %dummy, i32 %a, ptr %src, ptr %res) {
89; CHECK-LABEL: f5:
90; CHECK:       # %bb.0:
91; CHECK-NEXT:    al %r3, 4092(%r4)
92; CHECK-NEXT:    ipm %r0
93; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 35
94; CHECK-NEXT:    st %r3, 0(%r5)
95; CHECK-NEXT:    br %r14
96  %ptr = getelementptr i32, ptr %src, i64 1023
97  %b = load i32, ptr %ptr
98  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
99  %val = extractvalue {i32, i1} %t, 0
100  %obit = extractvalue {i32, i1} %t, 1
101  store i32 %val, ptr %res
102  ret i1 %obit
103}
104
105; Check the next word up, which should use ALY instead of AL.
106define zeroext i1 @f6(i32 %dummy, i32 %a, ptr %src, ptr %res) {
107; CHECK-LABEL: f6:
108; CHECK:       # %bb.0:
109; CHECK-NEXT:    aly %r3, 4096(%r4)
110; CHECK-NEXT:    ipm %r0
111; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 35
112; CHECK-NEXT:    st %r3, 0(%r5)
113; CHECK-NEXT:    br %r14
114  %ptr = getelementptr i32, ptr %src, i64 1024
115  %b = load i32, ptr %ptr
116  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
117  %val = extractvalue {i32, i1} %t, 0
118  %obit = extractvalue {i32, i1} %t, 1
119  store i32 %val, ptr %res
120  ret i1 %obit
121}
122
123; Check the high end of the aligned ALY range.
124define zeroext i1 @f7(i32 %dummy, i32 %a, ptr %src, ptr %res) {
125; CHECK-LABEL: f7:
126; CHECK:       # %bb.0:
127; CHECK-NEXT:    aly %r3, 524284(%r4)
128; CHECK-NEXT:    ipm %r0
129; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 35
130; CHECK-NEXT:    st %r3, 0(%r5)
131; CHECK-NEXT:    br %r14
132  %ptr = getelementptr i32, ptr %src, i64 131071
133  %b = load i32, ptr %ptr
134  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
135  %val = extractvalue {i32, i1} %t, 0
136  %obit = extractvalue {i32, i1} %t, 1
137  store i32 %val, ptr %res
138  ret i1 %obit
139}
140
141; Check the next word up, which needs separate address logic.
142; Other sequences besides this one would be OK.
143define zeroext i1 @f8(i32 %dummy, i32 %a, ptr %src, ptr %res) {
144; CHECK-LABEL: f8:
145; CHECK:       # %bb.0:
146; CHECK-NEXT:    agfi %r4, 524288
147; CHECK-NEXT:    al %r3, 0(%r4)
148; CHECK-NEXT:    ipm %r0
149; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 35
150; CHECK-NEXT:    st %r3, 0(%r5)
151; CHECK-NEXT:    br %r14
152  %ptr = getelementptr i32, ptr %src, i64 131072
153  %b = load i32, ptr %ptr
154  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
155  %val = extractvalue {i32, i1} %t, 0
156  %obit = extractvalue {i32, i1} %t, 1
157  store i32 %val, ptr %res
158  ret i1 %obit
159}
160
161; Check the high end of the negative aligned ALY range.
162define zeroext i1 @f9(i32 %dummy, i32 %a, ptr %src, ptr %res) {
163; CHECK-LABEL: f9:
164; CHECK:       # %bb.0:
165; CHECK-NEXT:    aly %r3, -4(%r4)
166; CHECK-NEXT:    ipm %r0
167; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 35
168; CHECK-NEXT:    st %r3, 0(%r5)
169; CHECK-NEXT:    br %r14
170  %ptr = getelementptr i32, ptr %src, i64 -1
171  %b = load i32, ptr %ptr
172  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
173  %val = extractvalue {i32, i1} %t, 0
174  %obit = extractvalue {i32, i1} %t, 1
175  store i32 %val, ptr %res
176  ret i1 %obit
177}
178
179; Check the low end of the ALY range.
180define zeroext i1 @f10(i32 %dummy, i32 %a, ptr %src, ptr %res) {
181; CHECK-LABEL: f10:
182; CHECK:       # %bb.0:
183; CHECK-NEXT:    aly %r3, -524288(%r4)
184; CHECK-NEXT:    ipm %r0
185; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 35
186; CHECK-NEXT:    st %r3, 0(%r5)
187; CHECK-NEXT:    br %r14
188  %ptr = getelementptr i32, ptr %src, i64 -131072
189  %b = load i32, ptr %ptr
190  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
191  %val = extractvalue {i32, i1} %t, 0
192  %obit = extractvalue {i32, i1} %t, 1
193  store i32 %val, ptr %res
194  ret i1 %obit
195}
196
197; Check the next word down, which needs separate address logic.
198; Other sequences besides this one would be OK.
199define zeroext i1 @f11(i32 %dummy, i32 %a, ptr %src, ptr %res) {
200; CHECK-LABEL: f11:
201; CHECK:       # %bb.0:
202; CHECK-NEXT:    agfi %r4, -524292
203; CHECK-NEXT:    al %r3, 0(%r4)
204; CHECK-NEXT:    ipm %r0
205; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 35
206; CHECK-NEXT:    st %r3, 0(%r5)
207; CHECK-NEXT:    br %r14
208  %ptr = getelementptr i32, ptr %src, i64 -131073
209  %b = load i32, ptr %ptr
210  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
211  %val = extractvalue {i32, i1} %t, 0
212  %obit = extractvalue {i32, i1} %t, 1
213  store i32 %val, ptr %res
214  ret i1 %obit
215}
216
217; Check that AL allows an index.
218define zeroext i1 @f12(i64 %src, i64 %index, i32 %a, ptr %res) {
219; CHECK-LABEL: f12:
220; CHECK:       # %bb.0:
221; CHECK-NEXT:    al %r4, 4092(%r3,%r2)
222; CHECK-NEXT:    ipm %r0
223; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 35
224; CHECK-NEXT:    st %r4, 0(%r5)
225; CHECK-NEXT:    br %r14
226  %add1 = add i64 %src, %index
227  %add2 = add i64 %add1, 4092
228  %ptr = inttoptr i64 %add2 to ptr
229  %b = load i32, ptr %ptr
230  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
231  %val = extractvalue {i32, i1} %t, 0
232  %obit = extractvalue {i32, i1} %t, 1
233  store i32 %val, ptr %res
234  ret i1 %obit
235}
236
237; Check that ALY allows an index.
238define zeroext i1 @f13(i64 %src, i64 %index, i32 %a, ptr %res) {
239; CHECK-LABEL: f13:
240; CHECK:       # %bb.0:
241; CHECK-NEXT:    aly %r4, 4096(%r3,%r2)
242; CHECK-NEXT:    ipm %r0
243; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 35
244; CHECK-NEXT:    st %r4, 0(%r5)
245; CHECK-NEXT:    br %r14
246  %add1 = add i64 %src, %index
247  %add2 = add i64 %add1, 4096
248  %ptr = inttoptr i64 %add2 to ptr
249  %b = load i32, ptr %ptr
250  %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
251  %val = extractvalue {i32, i1} %t, 0
252  %obit = extractvalue {i32, i1} %t, 1
253  store i32 %val, ptr %res
254  ret i1 %obit
255}
256
257; Check that additions of spilled values can use AL rather than ALR.
258define zeroext i1 @f14(ptr %ptr0) {
259; CHECK-LABEL: f14:
260; CHECK:       # %bb.0:
261; CHECK-NEXT:    stmg %r6, %r15, 48(%r15)
262; CHECK-NEXT:    .cfi_offset %r6, -112
263; CHECK-NEXT:    .cfi_offset %r7, -104
264; CHECK-NEXT:    .cfi_offset %r8, -96
265; CHECK-NEXT:    .cfi_offset %r9, -88
266; CHECK-NEXT:    .cfi_offset %r10, -80
267; CHECK-NEXT:    .cfi_offset %r11, -72
268; CHECK-NEXT:    .cfi_offset %r12, -64
269; CHECK-NEXT:    .cfi_offset %r13, -56
270; CHECK-NEXT:    .cfi_offset %r14, -48
271; CHECK-NEXT:    .cfi_offset %r15, -40
272; CHECK-NEXT:    aghi %r15, -168
273; CHECK-NEXT:    .cfi_def_cfa_offset 328
274; CHECK-NEXT:    l %r6, 0(%r2)
275; CHECK-NEXT:    l %r13, 8(%r2)
276; CHECK-NEXT:    l %r12, 16(%r2)
277; CHECK-NEXT:    l %r7, 24(%r2)
278; CHECK-NEXT:    l %r8, 32(%r2)
279; CHECK-NEXT:    l %r9, 40(%r2)
280; CHECK-NEXT:    l %r10, 48(%r2)
281; CHECK-NEXT:    l %r11, 56(%r2)
282; CHECK-NEXT:    mvc 160(4,%r15), 64(%r2) # 4-byte Folded Spill
283; CHECK-NEXT:    mvc 164(4,%r15), 72(%r2) # 4-byte Folded Spill
284; CHECK-NEXT:    brasl %r14, foo@PLT
285; CHECK-NEXT:    alr %r2, %r6
286; CHECK-NEXT:    ipm %r0
287; CHECK-NEXT:    risbg %r0, %r0, 63, 191, 35
288; CHECK-NEXT:    alr %r2, %r13
289; CHECK-NEXT:    ipm %r1
290; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 35
291; CHECK-NEXT:    alr %r2, %r12
292; CHECK-NEXT:    ipm %r1
293; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 35
294; CHECK-NEXT:    alr %r2, %r7
295; CHECK-NEXT:    ipm %r1
296; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 35
297; CHECK-NEXT:    alr %r2, %r8
298; CHECK-NEXT:    ipm %r1
299; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 35
300; CHECK-NEXT:    alr %r2, %r9
301; CHECK-NEXT:    ipm %r1
302; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 35
303; CHECK-NEXT:    alr %r2, %r10
304; CHECK-NEXT:    ipm %r1
305; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 35
306; CHECK-NEXT:    alr %r2, %r11
307; CHECK-NEXT:    ipm %r1
308; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 35
309; CHECK-NEXT:    al %r2, 160(%r15) # 4-byte Folded Reload
310; CHECK-NEXT:    ipm %r1
311; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 35
312; CHECK-NEXT:    al %r2, 164(%r15) # 4-byte Folded Reload
313; CHECK-NEXT:    ipm %r1
314; CHECK-NEXT:    rosbg %r0, %r1, 63, 63, 35
315; CHECK-NEXT:    risbg %r2, %r0, 63, 191, 0
316; CHECK-NEXT:    lmg %r6, %r15, 216(%r15)
317; CHECK-NEXT:    br %r14
318  %ptr1 = getelementptr i32, ptr %ptr0, i64 2
319  %ptr2 = getelementptr i32, ptr %ptr0, i64 4
320  %ptr3 = getelementptr i32, ptr %ptr0, i64 6
321  %ptr4 = getelementptr i32, ptr %ptr0, i64 8
322  %ptr5 = getelementptr i32, ptr %ptr0, i64 10
323  %ptr6 = getelementptr i32, ptr %ptr0, i64 12
324  %ptr7 = getelementptr i32, ptr %ptr0, i64 14
325  %ptr8 = getelementptr i32, ptr %ptr0, i64 16
326  %ptr9 = getelementptr i32, ptr %ptr0, i64 18
327
328  %val0 = load i32, ptr %ptr0
329  %val1 = load i32, ptr %ptr1
330  %val2 = load i32, ptr %ptr2
331  %val3 = load i32, ptr %ptr3
332  %val4 = load i32, ptr %ptr4
333  %val5 = load i32, ptr %ptr5
334  %val6 = load i32, ptr %ptr6
335  %val7 = load i32, ptr %ptr7
336  %val8 = load i32, ptr %ptr8
337  %val9 = load i32, ptr %ptr9
338
339  %ret = call i32 @foo()
340
341  %t0 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %ret, i32 %val0)
342  %add0 = extractvalue {i32, i1} %t0, 0
343  %obit0 = extractvalue {i32, i1} %t0, 1
344  %t1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add0, i32 %val1)
345  %add1 = extractvalue {i32, i1} %t1, 0
346  %obit1 = extractvalue {i32, i1} %t1, 1
347  %res1 = or i1 %obit0, %obit1
348  %t2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add1, i32 %val2)
349  %add2 = extractvalue {i32, i1} %t2, 0
350  %obit2 = extractvalue {i32, i1} %t2, 1
351  %res2 = or i1 %res1, %obit2
352  %t3 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add2, i32 %val3)
353  %add3 = extractvalue {i32, i1} %t3, 0
354  %obit3 = extractvalue {i32, i1} %t3, 1
355  %res3 = or i1 %res2, %obit3
356  %t4 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add3, i32 %val4)
357  %add4 = extractvalue {i32, i1} %t4, 0
358  %obit4 = extractvalue {i32, i1} %t4, 1
359  %res4 = or i1 %res3, %obit4
360  %t5 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add4, i32 %val5)
361  %add5 = extractvalue {i32, i1} %t5, 0
362  %obit5 = extractvalue {i32, i1} %t5, 1
363  %res5 = or i1 %res4, %obit5
364  %t6 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add5, i32 %val6)
365  %add6 = extractvalue {i32, i1} %t6, 0
366  %obit6 = extractvalue {i32, i1} %t6, 1
367  %res6 = or i1 %res5, %obit6
368  %t7 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add6, i32 %val7)
369  %add7 = extractvalue {i32, i1} %t7, 0
370  %obit7 = extractvalue {i32, i1} %t7, 1
371  %res7 = or i1 %res6, %obit7
372  %t8 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add7, i32 %val8)
373  %add8 = extractvalue {i32, i1} %t8, 0
374  %obit8 = extractvalue {i32, i1} %t8, 1
375  %res8 = or i1 %res7, %obit8
376  %t9 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add8, i32 %val9)
377  %add9 = extractvalue {i32, i1} %t9, 0
378  %obit9 = extractvalue {i32, i1} %t9, 1
379  %res9 = or i1 %res8, %obit9
380
381  ret i1 %res9
382}
383
384declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
385
386