1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple x86_64-apple-macosx10.13.0 < %s | FileCheck %s --check-prefix=X86_64 3; RUN: llc -mtriple i386-apple-macosx10.13.0 < %s | FileCheck %s --check-prefix=X86 4 5; The MacOS tripples are used to get trapping behavior on the "unreachable" IR 6; instruction, so that the placement of the ud2 instruction could be verified. 7 8;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; 9;; The IR was created using the following C code: 10;; typedef ptr jmp_buf; 11;; jmp_buf buf; 12;; 13;; __attribute__((noinline)) int bar(int i) { 14;; int j = i - 111; 15;; __builtin_longjmp(&buf, 1); 16;; return j; 17;; } 18;; 19;; int foo(int i) { 20;; int j = i * 11; 21;; if (!__builtin_setjmp(&buf)) { 22;; j += 33 + bar(j); 23;; } 24;; return j + i; 25;; } 26;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; 27 28@buf = common local_unnamed_addr global ptr null, align 8 29 30; Functions that use LongJmp should fix the Shadow Stack using previosuly saved 31; ShadowStackPointer in the input buffer. 32; The fix requires unwinding the shadow stack to the last SSP. 33define i32 @bar(i32 %i) local_unnamed_addr { 34; X86_64-LABEL: bar: 35; X86_64: ## %bb.0: ## %entry 36; X86_64-NEXT: pushq %rbp 37; X86_64-NEXT: .cfi_def_cfa_offset 16 38; X86_64-NEXT: .cfi_offset %rbp, -16 39; X86_64-NEXT: movq _buf@GOTPCREL(%rip), %rax 40; X86_64-NEXT: movq (%rax), %rax 41; X86_64-NEXT: xorl %edx, %edx 42; X86_64-NEXT: rdsspq %rdx 43; X86_64-NEXT: testq %rdx, %rdx 44; X86_64-NEXT: je LBB0_5 45; X86_64-NEXT: ## %bb.1: ## %entry 46; X86_64-NEXT: movq 24(%rax), %rcx 47; X86_64-NEXT: subq %rdx, %rcx 48; X86_64-NEXT: jbe LBB0_5 49; X86_64-NEXT: ## %bb.2: ## %entry 50; X86_64-NEXT: shrq $3, %rcx 51; X86_64-NEXT: incsspq %rcx 52; X86_64-NEXT: shrq $8, %rcx 53; X86_64-NEXT: je LBB0_5 54; X86_64-NEXT: ## %bb.3: ## %entry 55; X86_64-NEXT: shlq %rcx 56; X86_64-NEXT: movq $128, %rdx 57; X86_64-NEXT: LBB0_4: ## %entry 58; X86_64-NEXT: ## =>This Inner Loop Header: Depth=1 59; X86_64-NEXT: incsspq %rdx 60; X86_64-NEXT: decq %rcx 61; X86_64-NEXT: jne LBB0_4 62; X86_64-NEXT: LBB0_5: ## %entry 63; X86_64-NEXT: movq (%rax), %rbp 64; X86_64-NEXT: movq 8(%rax), %rcx 65; X86_64-NEXT: movq 16(%rax), %rsp 66; X86_64-NEXT: jmpq *%rcx 67; 68; X86-LABEL: bar: 69; X86: ## %bb.0: ## %entry 70; X86-NEXT: pushl %ebp 71; X86-NEXT: .cfi_def_cfa_offset 8 72; X86-NEXT: .cfi_offset %ebp, -8 73; X86-NEXT: movl L_buf$non_lazy_ptr, %eax 74; X86-NEXT: movl (%eax), %eax 75; X86-NEXT: xorl %edx, %edx 76; X86-NEXT: rdsspd %edx 77; X86-NEXT: testl %edx, %edx 78; X86-NEXT: je LBB0_5 79; X86-NEXT: ## %bb.1: ## %entry 80; X86-NEXT: movl 12(%eax), %ecx 81; X86-NEXT: subl %edx, %ecx 82; X86-NEXT: jbe LBB0_5 83; X86-NEXT: ## %bb.2: ## %entry 84; X86-NEXT: shrl $2, %ecx 85; X86-NEXT: incsspd %ecx 86; X86-NEXT: shrl $8, %ecx 87; X86-NEXT: je LBB0_5 88; X86-NEXT: ## %bb.3: ## %entry 89; X86-NEXT: shll %ecx 90; X86-NEXT: movl $128, %edx 91; X86-NEXT: LBB0_4: ## %entry 92; X86-NEXT: ## =>This Inner Loop Header: Depth=1 93; X86-NEXT: incsspd %edx 94; X86-NEXT: decl %ecx 95; X86-NEXT: jne LBB0_4 96; X86-NEXT: LBB0_5: ## %entry 97; X86-NEXT: movl (%eax), %ebp 98; X86-NEXT: movl 4(%eax), %ecx 99; X86-NEXT: movl 8(%eax), %esp 100; X86-NEXT: jmpl *%ecx 101entry: 102 %0 = load ptr, ptr @buf, align 8 103 tail call void @llvm.eh.sjlj.longjmp(ptr %0) 104 unreachable 105} 106 107declare void @llvm.eh.sjlj.longjmp(ptr) 108 109; Functions that call SetJmp should save the current ShadowStackPointer for 110; future fixing of the Shadow Stack. 111define i32 @foo(i32 %i) local_unnamed_addr { 112; X86_64-LABEL: foo: 113; X86_64: ## %bb.0: ## %entry 114; X86_64-NEXT: pushq %rbp 115; X86_64-NEXT: .cfi_def_cfa_offset 16 116; X86_64-NEXT: .cfi_offset %rbp, -16 117; X86_64-NEXT: movq %rsp, %rbp 118; X86_64-NEXT: .cfi_def_cfa_register %rbp 119; X86_64-NEXT: pushq %r15 120; X86_64-NEXT: pushq %r14 121; X86_64-NEXT: pushq %r13 122; X86_64-NEXT: pushq %r12 123; X86_64-NEXT: pushq %rbx 124; X86_64-NEXT: pushq %rax 125; X86_64-NEXT: .cfi_offset %rbx, -56 126; X86_64-NEXT: .cfi_offset %r12, -48 127; X86_64-NEXT: .cfi_offset %r13, -40 128; X86_64-NEXT: .cfi_offset %r14, -32 129; X86_64-NEXT: .cfi_offset %r15, -24 130; X86_64-NEXT: ## kill: def $edi killed $edi def $rdi 131; X86_64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) ## 8-byte Spill 132; X86_64-NEXT: movq _buf@GOTPCREL(%rip), %rax 133; X86_64-NEXT: movq (%rax), %rax 134; X86_64-NEXT: movq %rbp, (%rax) 135; X86_64-NEXT: movq %rsp, 16(%rax) 136; X86_64-NEXT: leaq LBB1_4(%rip), %rcx 137; X86_64-NEXT: movq %rcx, 8(%rax) 138; X86_64-NEXT: xorq %rcx, %rcx 139; X86_64-NEXT: rdsspq %rcx 140; X86_64-NEXT: movq %rcx, 24(%rax) 141; X86_64-NEXT: #EH_SjLj_Setup LBB1_4 142; X86_64-NEXT: ## %bb.1: ## %entry 143; X86_64-NEXT: xorl %eax, %eax 144; X86_64-NEXT: jmp LBB1_2 145; X86_64-NEXT: LBB1_4: ## Block address taken 146; X86_64-NEXT: ## %entry 147; X86_64-NEXT: movl $1, %eax 148; X86_64-NEXT: LBB1_2: ## %entry 149; X86_64-NEXT: testl %eax, %eax 150; X86_64-NEXT: je LBB1_5 151; X86_64-NEXT: ## %bb.3: ## %if.end 152; X86_64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax ## 8-byte Reload 153; X86_64-NEXT: shll $2, %eax 154; X86_64-NEXT: leal (%rax,%rax,2), %eax 155; X86_64-NEXT: addq $8, %rsp 156; X86_64-NEXT: popq %rbx 157; X86_64-NEXT: popq %r12 158; X86_64-NEXT: popq %r13 159; X86_64-NEXT: popq %r14 160; X86_64-NEXT: popq %r15 161; X86_64-NEXT: popq %rbp 162; X86_64-NEXT: retq 163; X86_64-NEXT: LBB1_5: ## %if.then 164; X86_64-NEXT: callq _bar 165; X86_64-NEXT: ud2 166; 167; X86-LABEL: foo: 168; X86: ## %bb.0: ## %entry 169; X86-NEXT: pushl %ebp 170; X86-NEXT: .cfi_def_cfa_offset 8 171; X86-NEXT: .cfi_offset %ebp, -8 172; X86-NEXT: movl %esp, %ebp 173; X86-NEXT: .cfi_def_cfa_register %ebp 174; X86-NEXT: pushl %ebx 175; X86-NEXT: pushl %edi 176; X86-NEXT: pushl %esi 177; X86-NEXT: subl $12, %esp 178; X86-NEXT: .cfi_offset %esi, -20 179; X86-NEXT: .cfi_offset %edi, -16 180; X86-NEXT: .cfi_offset %ebx, -12 181; X86-NEXT: movl L_buf$non_lazy_ptr, %eax 182; X86-NEXT: movl (%eax), %eax 183; X86-NEXT: movl %ebp, (%eax) 184; X86-NEXT: movl %esp, 16(%eax) 185; X86-NEXT: movl $LBB1_4, 4(%eax) 186; X86-NEXT: xorl %ecx, %ecx 187; X86-NEXT: rdsspd %ecx 188; X86-NEXT: movl %ecx, 12(%eax) 189; X86-NEXT: #EH_SjLj_Setup LBB1_4 190; X86-NEXT: ## %bb.1: ## %entry 191; X86-NEXT: xorl %eax, %eax 192; X86-NEXT: jmp LBB1_2 193; X86-NEXT: LBB1_4: ## Block address taken 194; X86-NEXT: ## %entry 195; X86-NEXT: movl $1, %eax 196; X86-NEXT: LBB1_2: ## %entry 197; X86-NEXT: testl %eax, %eax 198; X86-NEXT: je LBB1_5 199; X86-NEXT: ## %bb.3: ## %if.end 200; X86-NEXT: movl 8(%ebp), %eax 201; X86-NEXT: shll $2, %eax 202; X86-NEXT: leal (%eax,%eax,2), %eax 203; X86-NEXT: addl $12, %esp 204; X86-NEXT: popl %esi 205; X86-NEXT: popl %edi 206; X86-NEXT: popl %ebx 207; X86-NEXT: popl %ebp 208; X86-NEXT: retl 209; X86-NEXT: LBB1_5: ## %if.then 210; X86-NEXT: calll _bar 211; X86-NEXT: ud2 212entry: 213 %0 = load ptr, ptr @buf, align 8 214 %1 = tail call ptr @llvm.frameaddress(i32 0) 215 store ptr %1, ptr %0, align 8 216 %2 = tail call ptr @llvm.stacksave() 217 %3 = getelementptr inbounds i8, ptr %0, i64 16 218 store ptr %2, ptr %3, align 8 219 %4 = tail call i32 @llvm.eh.sjlj.setjmp(ptr %0) 220 %tobool = icmp eq i32 %4, 0 221 br i1 %tobool, label %if.then, label %if.end 222 223if.then: ; preds = %entry 224 %call = tail call i32 @bar(i32 undef) 225 unreachable 226 227if.end: ; preds = %entry 228 %add2 = mul nsw i32 %i, 12 229 ret i32 %add2 230} 231 232declare ptr @llvm.frameaddress(i32) 233declare ptr @llvm.stacksave() 234declare i32 @llvm.eh.sjlj.setjmp(ptr) 235 236!llvm.module.flags = !{!0} 237 238!0 = !{i32 8, !"cf-protection-return", i32 1} 239