1; RUN: llc -mtriple=s390x-linux-gnu < %s | FileCheck %s 2 3; Trivial patchpoint codegen 4; 5define i64 @trivial_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) { 6entry: 7; CHECK-LABEL: trivial_patchpoint_codegen: 8; CHECK: llilf %r1, 559038736 9; CHECK-NEXT: basr %r14, %r1 10; CHECK-NEXT: bcr 0, %r0 11; CHECK: lgr [[REG0:%r[0-9]+]], %r2 12; CHECK: llilf %r1, 559038737 13; CHECK-NEXT: basr %r14, %r1 14; CHECK-NEXT: bcr 0, %r0 15; CHECK: lgr %r2, [[REG0:%r[0-9]+]] 16; CHECK: br %r14 17 %resolveCall2 = inttoptr i64 559038736 to ptr 18 %result = tail call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 10, ptr %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4) 19 %resolveCall3 = inttoptr i64 559038737 to ptr 20 tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 3, i32 10, ptr %resolveCall3, i32 2, i64 %p1, i64 %result) 21 ret i64 %result 22} 23 24; Trivial symbolic patchpoint codegen. 25; 26 27declare i64 @foo(i64 %p1, i64 %p2) 28define i64 @trivial_symbolic_patchpoint_codegen(i64 %p1, i64 %p2) { 29entry: 30; CHECK-LABEL: trivial_symbolic_patchpoint_codegen: 31; CHECK: brasl %r14, foo@PLT 32; CHECK-NEXT: bcr 0, %r0 33; CHECK: br %r14 34 %result = tail call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 9, i32 8, ptr @foo, i32 2, i64 %p1, i64 %p2) 35 ret i64 %result 36} 37 38 39; Caller frame metadata with stackmaps. This should not be optimized 40; as a leaf function. 41; 42; CHECK-LABEL: caller_meta_leaf 43; CHECK: aghi %r15, -184 44; CHECK: .Ltmp 45; CHECK: lmg %r14, %r15, 296(%r15) 46; CHECK: br %r14 47define void @caller_meta_leaf() { 48entry: 49 %metadata = alloca i64, i32 3, align 8 50 store i64 11, ptr %metadata 51 store i64 12, ptr %metadata 52 store i64 13, ptr %metadata 53 call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, ptr %metadata) 54 ret void 55} 56 57; Test patchpoints reusing the same TargetConstant. 58; <rdar:15390785> Assertion failed: (CI.getNumArgOperands() >= NumArgs + 4) 59; There is no way to verify this, since it depends on memory allocation. 60; But I think it's useful to include as a working example. 61define i64 @testLowerConstant(i64 %arg, i64 %tmp2, i64 %tmp10, ptr %tmp33, i64 %tmp79) { 62entry: 63 %tmp80 = add i64 %tmp79, -16 64 %tmp81 = inttoptr i64 %tmp80 to ptr 65 %tmp82 = load i64, ptr %tmp81, align 8 66 tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 14, i32 6, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82) 67 tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 15, i32 30, ptr null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82) 68 %tmp83 = load i64, ptr %tmp33, align 8 69 %tmp84 = add i64 %tmp83, -24 70 %tmp85 = inttoptr i64 %tmp84 to ptr 71 %tmp86 = load i64, ptr %tmp85, align 8 72 tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 17, i32 6, i64 %arg, i64 %tmp10, i64 %tmp86) 73 tail call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 18, i32 30, ptr null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86) 74 ret i64 10 75} 76 77; Test small patchpoints that don't emit calls. 78define void @small_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) { 79entry: 80; CHECK-LABEL: small_patchpoint_codegen: 81; CHECK: .Ltmp 82; CHECK: bcr 0, %r0 83; CHECK: br %r14 84 %result = tail call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 2, ptr null, i32 2, i64 %p1, i64 %p2) 85 ret void 86} 87 88; Test large target address. 89define i64 @large_target_address_patchpoint_codegen() { 90entry: 91; CHECK-LABEL: large_target_address_patchpoint_codegen: 92; CHECK: llilf %r1, 2566957755 93; CHECK-NEXT: iihf %r1, 1432778632 94; CHECK-NEXT: basr %r14, %r1 95 %resolveCall2 = inttoptr i64 6153737369414576827 to ptr 96 %result = tail call i64 (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 14, ptr %resolveCall2, i32 0) 97 ret i64 %result 98} 99 100; Test that the number of bytes is reflected in the instruction size and 101; therefore cause relaxation of the initial branch. 102define void @patchpoint_size(i32 %Arg) { 103; CHECK-LABEL: patchpoint_size: 104; CHECK: # %bb.0: 105; CHECK-NEXT: stmg %r14, %r15, 112(%r15) 106; CHECK-NEXT: .cfi_offset %r14, -48 107; CHECK-NEXT: .cfi_offset %r15, -40 108; CHECK-NEXT: aghi %r15, -160 109; CHECK-NEXT: .cfi_def_cfa_offset 320 110; CHECK-NEXT: chi %r2, 0 111; CHECK-NEXT: jge .LBB6_2 112 %c = icmp eq i32 %Arg, 0 113 br i1 %c, label %block0, label %patch1 114 115block0: 116 call i64 @foo(i64 0, i64 0) 117 br label %exit 118 119patch1: 120 call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 0, i32 65536, ptr null, i32 0) 121 br label %exit 122 123exit: 124 ret void 125} 126 127define void @stackmap_size(i32 %Arg) { 128; CHECK-LABEL: stackmap_size: 129; CHECK: # %bb.0: 130; CHECK-NEXT: stmg %r14, %r15, 112(%r15) 131; CHECK-NEXT: .cfi_offset %r14, -48 132; CHECK-NEXT: .cfi_offset %r15, -40 133; CHECK-NEXT: aghi %r15, -160 134; CHECK-NEXT: .cfi_def_cfa_offset 320 135; CHECK-NEXT: chi %r2, 0 136; CHECK-NEXT: jge .LBB7_2 137 %c = icmp eq i32 %Arg, 0 138 br i1 %c, label %block0, label %stackmap1 139 140block0: 141 call i64 @foo(i64 0, i64 0) 142 br label %exit 143 144stackmap1: 145 call void (i64, i32, ...) @llvm.experimental.stackmap(i64 1, i32 65536) 146 br label %exit 147 148exit: 149 ret void 150} 151 152 153declare void @llvm.experimental.stackmap(i64, i32, ...) 154declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...) 155declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...) 156