1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 2; Test subtraction of a zero-extended i32 from an i64. 3; 4; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s 5 6declare i64 @foo() 7 8; Check SLGFR. 9define zeroext i1 @f1(i64 %dummy, i64 %a, i32 %b, ptr %res) { 10; CHECK-LABEL: f1: 11; CHECK: # %bb.0: 12; CHECK-NEXT: slgfr %r3, %r4 13; CHECK-NEXT: ipm %r0 14; CHECK-NEXT: afi %r0, -536870912 15; CHECK-NEXT: risbg %r2, %r0, 63, 191, 33 16; CHECK-NEXT: stg %r3, 0(%r5) 17; CHECK-NEXT: br %r14 18 %bext = zext i32 %b to i64 19 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 20 %val = extractvalue {i64, i1} %t, 0 21 %obit = extractvalue {i64, i1} %t, 1 22 store i64 %val, ptr %res 23 ret i1 %obit 24} 25 26; Check using the overflow result for a branch. 27define void @f2(i64 %dummy, i64 %a, i32 %b, ptr %res) { 28; CHECK-LABEL: f2: 29; CHECK: # %bb.0: 30; CHECK-NEXT: slgfr %r3, %r4 31; CHECK-NEXT: stg %r3, 0(%r5) 32; CHECK-NEXT: jgle foo@PLT 33; CHECK-NEXT: .LBB1_1: # %exit 34; CHECK-NEXT: br %r14 35 %bext = zext i32 %b to i64 36 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 37 %val = extractvalue {i64, i1} %t, 0 38 %obit = extractvalue {i64, i1} %t, 1 39 store i64 %val, ptr %res 40 br i1 %obit, label %call, label %exit 41 42call: 43 tail call i64 @foo() 44 br label %exit 45 46exit: 47 ret void 48} 49 50; ... and the same with the inverted direction. 51define void @f3(i64 %dummy, i64 %a, i32 %b, ptr %res) { 52; CHECK-LABEL: f3: 53; CHECK: # %bb.0: 54; CHECK-NEXT: slgfr %r3, %r4 55; CHECK-NEXT: stg %r3, 0(%r5) 56; CHECK-NEXT: jgnle foo@PLT 57; CHECK-NEXT: .LBB2_1: # %exit 58; CHECK-NEXT: br %r14 59 %bext = zext i32 %b to i64 60 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 61 %val = extractvalue {i64, i1} %t, 0 62 %obit = extractvalue {i64, i1} %t, 1 63 store i64 %val, ptr %res 64 br i1 %obit, label %exit, label %call 65 66call: 67 tail call i64 @foo() 68 br label %exit 69 70exit: 71 ret void 72} 73 74; Check SLGF with no displacement. 75define zeroext i1 @f4(i64 %dummy, i64 %a, ptr %src, ptr %res) { 76; CHECK-LABEL: f4: 77; CHECK: # %bb.0: 78; CHECK-NEXT: slgf %r3, 0(%r4) 79; CHECK-NEXT: ipm %r0 80; CHECK-NEXT: afi %r0, -536870912 81; CHECK-NEXT: risbg %r2, %r0, 63, 191, 33 82; CHECK-NEXT: stg %r3, 0(%r5) 83; CHECK-NEXT: br %r14 84 %b = load i32, ptr %src 85 %bext = zext i32 %b to i64 86 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 87 %val = extractvalue {i64, i1} %t, 0 88 %obit = extractvalue {i64, i1} %t, 1 89 store i64 %val, ptr %res 90 ret i1 %obit 91} 92 93; Check the high end of the aligned SLGF range. 94define zeroext i1 @f5(i64 %dummy, i64 %a, ptr %src, ptr %res) { 95; CHECK-LABEL: f5: 96; CHECK: # %bb.0: 97; CHECK-NEXT: slgf %r3, 524284(%r4) 98; CHECK-NEXT: ipm %r0 99; CHECK-NEXT: afi %r0, -536870912 100; CHECK-NEXT: risbg %r2, %r0, 63, 191, 33 101; CHECK-NEXT: stg %r3, 0(%r5) 102; CHECK-NEXT: br %r14 103 %ptr = getelementptr i32, ptr %src, i64 131071 104 %b = load i32, ptr %ptr 105 %bext = zext i32 %b to i64 106 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 107 %val = extractvalue {i64, i1} %t, 0 108 %obit = extractvalue {i64, i1} %t, 1 109 store i64 %val, ptr %res 110 ret i1 %obit 111} 112 113; Check the next doubleword up, which needs separate address logic. 114; Other sequences besides this one would be OK. 115define zeroext i1 @f6(i64 %dummy, i64 %a, ptr %src, ptr %res) { 116; CHECK-LABEL: f6: 117; CHECK: # %bb.0: 118; CHECK-NEXT: agfi %r4, 524288 119; CHECK-NEXT: slgf %r3, 0(%r4) 120; CHECK-NEXT: ipm %r0 121; CHECK-NEXT: afi %r0, -536870912 122; CHECK-NEXT: risbg %r2, %r0, 63, 191, 33 123; CHECK-NEXT: stg %r3, 0(%r5) 124; CHECK-NEXT: br %r14 125 %ptr = getelementptr i32, ptr %src, i64 131072 126 %b = load i32, ptr %ptr 127 %bext = zext i32 %b to i64 128 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 129 %val = extractvalue {i64, i1} %t, 0 130 %obit = extractvalue {i64, i1} %t, 1 131 store i64 %val, ptr %res 132 ret i1 %obit 133} 134 135; Check the high end of the negative aligned SLGF range. 136define zeroext i1 @f7(i64 %dummy, i64 %a, ptr %src, ptr %res) { 137; CHECK-LABEL: f7: 138; CHECK: # %bb.0: 139; CHECK-NEXT: slgf %r3, -4(%r4) 140; CHECK-NEXT: ipm %r0 141; CHECK-NEXT: afi %r0, -536870912 142; CHECK-NEXT: risbg %r2, %r0, 63, 191, 33 143; CHECK-NEXT: stg %r3, 0(%r5) 144; CHECK-NEXT: br %r14 145 %ptr = getelementptr i32, ptr %src, i64 -1 146 %b = load i32, ptr %ptr 147 %bext = zext i32 %b to i64 148 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 149 %val = extractvalue {i64, i1} %t, 0 150 %obit = extractvalue {i64, i1} %t, 1 151 store i64 %val, ptr %res 152 ret i1 %obit 153} 154 155; Check the low end of the SLGF range. 156define zeroext i1 @f8(i64 %dummy, i64 %a, ptr %src, ptr %res) { 157; CHECK-LABEL: f8: 158; CHECK: # %bb.0: 159; CHECK-NEXT: slgf %r3, -524288(%r4) 160; CHECK-NEXT: ipm %r0 161; CHECK-NEXT: afi %r0, -536870912 162; CHECK-NEXT: risbg %r2, %r0, 63, 191, 33 163; CHECK-NEXT: stg %r3, 0(%r5) 164; CHECK-NEXT: br %r14 165 %ptr = getelementptr i32, ptr %src, i64 -131072 166 %b = load i32, ptr %ptr 167 %bext = zext i32 %b to i64 168 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 169 %val = extractvalue {i64, i1} %t, 0 170 %obit = extractvalue {i64, i1} %t, 1 171 store i64 %val, ptr %res 172 ret i1 %obit 173} 174 175; Check the next doubleword down, which needs separate address logic. 176; Other sequences besides this one would be OK. 177define zeroext i1 @f9(i64 %dummy, i64 %a, ptr %src, ptr %res) { 178; CHECK-LABEL: f9: 179; CHECK: # %bb.0: 180; CHECK-NEXT: agfi %r4, -524292 181; CHECK-NEXT: slgf %r3, 0(%r4) 182; CHECK-NEXT: ipm %r0 183; CHECK-NEXT: afi %r0, -536870912 184; CHECK-NEXT: risbg %r2, %r0, 63, 191, 33 185; CHECK-NEXT: stg %r3, 0(%r5) 186; CHECK-NEXT: br %r14 187 %ptr = getelementptr i32, ptr %src, i64 -131073 188 %b = load i32, ptr %ptr 189 %bext = zext i32 %b to i64 190 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 191 %val = extractvalue {i64, i1} %t, 0 192 %obit = extractvalue {i64, i1} %t, 1 193 store i64 %val, ptr %res 194 ret i1 %obit 195} 196 197; Check that SLGF allows an index. 198define zeroext i1 @f10(i64 %src, i64 %index, i64 %a, ptr %res) { 199; CHECK-LABEL: f10: 200; CHECK: # %bb.0: 201; CHECK-NEXT: slgf %r4, 524284(%r3,%r2) 202; CHECK-NEXT: ipm %r0 203; CHECK-NEXT: afi %r0, -536870912 204; CHECK-NEXT: risbg %r2, %r0, 63, 191, 33 205; CHECK-NEXT: stg %r4, 0(%r5) 206; CHECK-NEXT: br %r14 207 %add1 = add i64 %src, %index 208 %add2 = add i64 %add1, 524284 209 %ptr = inttoptr i64 %add2 to ptr 210 %b = load i32, ptr %ptr 211 %bext = zext i32 %b to i64 212 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %bext) 213 %val = extractvalue {i64, i1} %t, 0 214 %obit = extractvalue {i64, i1} %t, 1 215 store i64 %val, ptr %res 216 ret i1 %obit 217} 218 219; Check that subtractions of spilled values can use SLGF rather than SLGFR. 220define zeroext i1 @f11(ptr %ptr0) { 221; CHECK-LABEL: f11: 222; CHECK: # %bb.0: 223; CHECK-NEXT: stmg %r6, %r15, 48(%r15) 224; CHECK-NEXT: .cfi_offset %r6, -112 225; CHECK-NEXT: .cfi_offset %r7, -104 226; CHECK-NEXT: .cfi_offset %r8, -96 227; CHECK-NEXT: .cfi_offset %r9, -88 228; CHECK-NEXT: .cfi_offset %r10, -80 229; CHECK-NEXT: .cfi_offset %r11, -72 230; CHECK-NEXT: .cfi_offset %r12, -64 231; CHECK-NEXT: .cfi_offset %r13, -56 232; CHECK-NEXT: .cfi_offset %r14, -48 233; CHECK-NEXT: .cfi_offset %r15, -40 234; CHECK-NEXT: aghi %r15, -168 235; CHECK-NEXT: .cfi_def_cfa_offset 328 236; CHECK-NEXT: lhi %r0, 100 237; CHECK-NEXT: lhi %r12, 100 238; CHECK-NEXT: a %r12, 0(%r2) 239; CHECK-NEXT: lhi %r13, 100 240; CHECK-NEXT: a %r13, 8(%r2) 241; CHECK-NEXT: lhi %r6, 100 242; CHECK-NEXT: a %r6, 16(%r2) 243; CHECK-NEXT: lhi %r7, 100 244; CHECK-NEXT: a %r7, 24(%r2) 245; CHECK-NEXT: lhi %r8, 100 246; CHECK-NEXT: a %r8, 32(%r2) 247; CHECK-NEXT: lhi %r9, 100 248; CHECK-NEXT: a %r9, 40(%r2) 249; CHECK-NEXT: lhi %r10, 100 250; CHECK-NEXT: a %r10, 48(%r2) 251; CHECK-NEXT: lhi %r11, 100 252; CHECK-NEXT: a %r11, 56(%r2) 253; CHECK-NEXT: lhi %r1, 100 254; CHECK-NEXT: a %r1, 64(%r2) 255; CHECK-NEXT: st %r1, 160(%r15) # 4-byte Folded Spill 256; CHECK-NEXT: a %r0, 72(%r2) 257; CHECK-NEXT: st %r0, 164(%r15) # 4-byte Folded Spill 258; CHECK-NEXT: st %r12, 0(%r2) 259; CHECK-NEXT: st %r13, 8(%r2) 260; CHECK-NEXT: st %r6, 16(%r2) 261; CHECK-NEXT: st %r7, 24(%r2) 262; CHECK-NEXT: st %r8, 32(%r2) 263; CHECK-NEXT: st %r9, 40(%r2) 264; CHECK-NEXT: st %r10, 48(%r2) 265; CHECK-NEXT: st %r11, 56(%r2) 266; CHECK-NEXT: st %r1, 64(%r2) 267; CHECK-NEXT: st %r0, 72(%r2) 268; CHECK-NEXT: brasl %r14, foo@PLT 269; CHECK-NEXT: slgfr %r2, %r12 270; CHECK-NEXT: ipm %r0 271; CHECK-NEXT: afi %r0, -536870912 272; CHECK-NEXT: srl %r0, 31 273; CHECK-NEXT: slgfr %r2, %r13 274; CHECK-NEXT: ipm %r1 275; CHECK-NEXT: afi %r1, -536870912 276; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 33 277; CHECK-NEXT: slgfr %r2, %r6 278; CHECK-NEXT: ipm %r1 279; CHECK-NEXT: afi %r1, -536870912 280; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 33 281; CHECK-NEXT: slgfr %r2, %r7 282; CHECK-NEXT: ipm %r1 283; CHECK-NEXT: afi %r1, -536870912 284; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 33 285; CHECK-NEXT: slgfr %r2, %r8 286; CHECK-NEXT: ipm %r1 287; CHECK-NEXT: afi %r1, -536870912 288; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 33 289; CHECK-NEXT: slgfr %r2, %r9 290; CHECK-NEXT: ipm %r1 291; CHECK-NEXT: afi %r1, -536870912 292; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 33 293; CHECK-NEXT: slgfr %r2, %r10 294; CHECK-NEXT: ipm %r1 295; CHECK-NEXT: afi %r1, -536870912 296; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 33 297; CHECK-NEXT: slgfr %r2, %r11 298; CHECK-NEXT: ipm %r1 299; CHECK-NEXT: afi %r1, -536870912 300; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 33 301; CHECK-NEXT: slgf %r2, 160(%r15) # 4-byte Folded Reload 302; CHECK-NEXT: ipm %r1 303; CHECK-NEXT: afi %r1, -536870912 304; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 33 305; CHECK-NEXT: slgf %r2, 164(%r15) # 4-byte Folded Reload 306; CHECK-NEXT: ipm %r1 307; CHECK-NEXT: afi %r1, -536870912 308; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 33 309; CHECK-NEXT: risbg %r2, %r0, 63, 191, 0 310; CHECK-NEXT: lmg %r6, %r15, 216(%r15) 311; CHECK-NEXT: br %r14 312 %ptr1 = getelementptr i32, ptr %ptr0, i64 2 313 %ptr2 = getelementptr i32, ptr %ptr0, i64 4 314 %ptr3 = getelementptr i32, ptr %ptr0, i64 6 315 %ptr4 = getelementptr i32, ptr %ptr0, i64 8 316 %ptr5 = getelementptr i32, ptr %ptr0, i64 10 317 %ptr6 = getelementptr i32, ptr %ptr0, i64 12 318 %ptr7 = getelementptr i32, ptr %ptr0, i64 14 319 %ptr8 = getelementptr i32, ptr %ptr0, i64 16 320 %ptr9 = getelementptr i32, ptr %ptr0, i64 18 321 322 %val0 = load i32, ptr %ptr0 323 %val1 = load i32, ptr %ptr1 324 %val2 = load i32, ptr %ptr2 325 %val3 = load i32, ptr %ptr3 326 %val4 = load i32, ptr %ptr4 327 %val5 = load i32, ptr %ptr5 328 %val6 = load i32, ptr %ptr6 329 %val7 = load i32, ptr %ptr7 330 %val8 = load i32, ptr %ptr8 331 %val9 = load i32, ptr %ptr9 332 333 %frob0 = add i32 %val0, 100 334 %frob1 = add i32 %val1, 100 335 %frob2 = add i32 %val2, 100 336 %frob3 = add i32 %val3, 100 337 %frob4 = add i32 %val4, 100 338 %frob5 = add i32 %val5, 100 339 %frob6 = add i32 %val6, 100 340 %frob7 = add i32 %val7, 100 341 %frob8 = add i32 %val8, 100 342 %frob9 = add i32 %val9, 100 343 344 store i32 %frob0, ptr %ptr0 345 store i32 %frob1, ptr %ptr1 346 store i32 %frob2, ptr %ptr2 347 store i32 %frob3, ptr %ptr3 348 store i32 %frob4, ptr %ptr4 349 store i32 %frob5, ptr %ptr5 350 store i32 %frob6, ptr %ptr6 351 store i32 %frob7, ptr %ptr7 352 store i32 %frob8, ptr %ptr8 353 store i32 %frob9, ptr %ptr9 354 355 %ret = call i64 @foo() 356 357 %ext0 = zext i32 %frob0 to i64 358 %ext1 = zext i32 %frob1 to i64 359 %ext2 = zext i32 %frob2 to i64 360 %ext3 = zext i32 %frob3 to i64 361 %ext4 = zext i32 %frob4 to i64 362 %ext5 = zext i32 %frob5 to i64 363 %ext6 = zext i32 %frob6 to i64 364 %ext7 = zext i32 %frob7 to i64 365 %ext8 = zext i32 %frob8 to i64 366 %ext9 = zext i32 %frob9 to i64 367 368 %t0 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %ret, i64 %ext0) 369 %add0 = extractvalue {i64, i1} %t0, 0 370 %obit0 = extractvalue {i64, i1} %t0, 1 371 %t1 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add0, i64 %ext1) 372 %add1 = extractvalue {i64, i1} %t1, 0 373 %obit1 = extractvalue {i64, i1} %t1, 1 374 %res1 = or i1 %obit0, %obit1 375 %t2 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add1, i64 %ext2) 376 %add2 = extractvalue {i64, i1} %t2, 0 377 %obit2 = extractvalue {i64, i1} %t2, 1 378 %res2 = or i1 %res1, %obit2 379 %t3 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add2, i64 %ext3) 380 %add3 = extractvalue {i64, i1} %t3, 0 381 %obit3 = extractvalue {i64, i1} %t3, 1 382 %res3 = or i1 %res2, %obit3 383 %t4 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add3, i64 %ext4) 384 %add4 = extractvalue {i64, i1} %t4, 0 385 %obit4 = extractvalue {i64, i1} %t4, 1 386 %res4 = or i1 %res3, %obit4 387 %t5 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add4, i64 %ext5) 388 %add5 = extractvalue {i64, i1} %t5, 0 389 %obit5 = extractvalue {i64, i1} %t5, 1 390 %res5 = or i1 %res4, %obit5 391 %t6 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add5, i64 %ext6) 392 %add6 = extractvalue {i64, i1} %t6, 0 393 %obit6 = extractvalue {i64, i1} %t6, 1 394 %res6 = or i1 %res5, %obit6 395 %t7 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add6, i64 %ext7) 396 %add7 = extractvalue {i64, i1} %t7, 0 397 %obit7 = extractvalue {i64, i1} %t7, 1 398 %res7 = or i1 %res6, %obit7 399 %t8 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add7, i64 %ext8) 400 %add8 = extractvalue {i64, i1} %t8, 0 401 %obit8 = extractvalue {i64, i1} %t8, 1 402 %res8 = or i1 %res7, %obit8 403 %t9 = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %add8, i64 %ext9) 404 %add9 = extractvalue {i64, i1} %t9, 0 405 %obit9 = extractvalue {i64, i1} %t9, 1 406 %res9 = or i1 %res8, %obit9 407 408 ret i1 %res9 409} 410 411declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone 412 413