1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mcpu=generic -mtriple=i686-linux -verify-machineinstrs | FileCheck %s -check-prefix=X86 3; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux -verify-machineinstrs | FileCheck %s -check-prefix=X64 4; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux-gnux32 -verify-machineinstrs | FileCheck %s -check-prefix=X32ABI 5; RUN: llc < %s -mcpu=generic -mtriple=i686-linux -filetype=obj 6; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux -filetype=obj 7; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux-gnux32 -filetype=obj 8 9; Just to prevent the alloca from being optimized away 10declare void @dummy_use(ptr, i32) 11 12define i32 @test_basic(i32 %l) #0 { 13; X86-LABEL: test_basic: 14; X86: # %bb.0: 15; X86-NEXT: cmpl %gs:48, %esp 16; X86-NEXT: jbe .LBB0_1 17; X86-NEXT: .LBB0_2: 18; X86-NEXT: pushl %ebp 19; X86-NEXT: .cfi_def_cfa_offset 8 20; X86-NEXT: .cfi_offset %ebp, -8 21; X86-NEXT: movl %esp, %ebp 22; X86-NEXT: .cfi_def_cfa_register %ebp 23; X86-NEXT: pushl %esi 24; X86-NEXT: pushl %eax 25; X86-NEXT: .cfi_offset %esi, -12 26; X86-NEXT: movl 8(%ebp), %esi 27; X86-NEXT: movl %esp, %eax 28; X86-NEXT: leal 15(,%esi,4), %ecx 29; X86-NEXT: andl $-16, %ecx 30; X86-NEXT: subl %ecx, %eax 31; X86-NEXT: cmpl %eax, %gs:48 32; X86-NEXT: jg .LBB0_4 33; X86-NEXT: # %bb.3: 34; X86-NEXT: movl %eax, %esp 35; X86-NEXT: jmp .LBB0_5 36; X86-NEXT: .LBB0_4: 37; X86-NEXT: subl $12, %esp 38; X86-NEXT: pushl %ecx 39; X86-NEXT: calll __morestack_allocate_stack_space 40; X86-NEXT: addl $16, %esp 41; X86-NEXT: .LBB0_5: 42; X86-NEXT: subl $16, %esp 43; X86-NEXT: movl %esi, {{[0-9]+}}(%esp) 44; X86-NEXT: movl %eax, (%esp) 45; X86-NEXT: calll dummy_use@PLT 46; X86-NEXT: addl $16, %esp 47; X86-NEXT: testl %esi, %esi 48; X86-NEXT: je .LBB0_6 49; X86-NEXT: # %bb.8: # %false 50; X86-NEXT: decl %esi 51; X86-NEXT: subl $16, %esp 52; X86-NEXT: movl %esi, (%esp) 53; X86-NEXT: calll test_basic@PLT 54; X86-NEXT: jmp .LBB0_7 55; X86-NEXT: .LBB0_6: # %true 56; X86-NEXT: xorl %eax, %eax 57; X86-NEXT: .LBB0_7: # %true 58; X86-NEXT: leal -4(%ebp), %esp 59; X86-NEXT: popl %esi 60; X86-NEXT: popl %ebp 61; X86-NEXT: .cfi_def_cfa %esp, 4 62; X86-NEXT: retl 63; X86-NEXT: .LBB0_1: 64; X86-NEXT: .cfi_restore %ebp 65; X86-NEXT: .cfi_restore %esi 66; X86-NEXT: pushl $4 67; X86-NEXT: pushl $12 68; X86-NEXT: calll __morestack 69; X86-NEXT: retl 70; X86-NEXT: jmp .LBB0_2 71; 72; X64-LABEL: test_basic: 73; X64: # %bb.0: 74; X64-NEXT: cmpq %fs:112, %rsp 75; X64-NEXT: jbe .LBB0_1 76; X64-NEXT: .LBB0_2: 77; X64-NEXT: pushq %rbp 78; X64-NEXT: .cfi_def_cfa_offset 16 79; X64-NEXT: .cfi_offset %rbp, -16 80; X64-NEXT: movq %rsp, %rbp 81; X64-NEXT: .cfi_def_cfa_register %rbp 82; X64-NEXT: pushq %rbx 83; X64-NEXT: pushq %rax 84; X64-NEXT: .cfi_offset %rbx, -24 85; X64-NEXT: movl %edi, %ebx 86; X64-NEXT: movq %rsp, %rdi 87; X64-NEXT: movl %ebx, %eax 88; X64-NEXT: leaq 15(,%rax,4), %rax 89; X64-NEXT: andq $-16, %rax 90; X64-NEXT: subq %rax, %rdi 91; X64-NEXT: cmpq %rdi, %fs:112 92; X64-NEXT: jg .LBB0_4 93; X64-NEXT: # %bb.3: 94; X64-NEXT: movq %rdi, %rsp 95; X64-NEXT: jmp .LBB0_5 96; X64-NEXT: .LBB0_4: 97; X64-NEXT: movq %rax, %rdi 98; X64-NEXT: callq __morestack_allocate_stack_space 99; X64-NEXT: movq %rax, %rdi 100; X64-NEXT: .LBB0_5: 101; X64-NEXT: movl %ebx, %esi 102; X64-NEXT: callq dummy_use@PLT 103; X64-NEXT: testl %ebx, %ebx 104; X64-NEXT: je .LBB0_6 105; X64-NEXT: # %bb.8: # %false 106; X64-NEXT: decl %ebx 107; X64-NEXT: movl %ebx, %edi 108; X64-NEXT: callq test_basic@PLT 109; X64-NEXT: jmp .LBB0_7 110; X64-NEXT: .LBB0_6: # %true 111; X64-NEXT: xorl %eax, %eax 112; X64-NEXT: .LBB0_7: # %true 113; X64-NEXT: leaq -8(%rbp), %rsp 114; X64-NEXT: popq %rbx 115; X64-NEXT: popq %rbp 116; X64-NEXT: .cfi_def_cfa %rsp, 8 117; X64-NEXT: retq 118; X64-NEXT: .LBB0_1: 119; X64-NEXT: .cfi_restore %rbx 120; X64-NEXT: .cfi_restore %rbp 121; X64-NEXT: movl $24, %r10d 122; X64-NEXT: movl $0, %r11d 123; X64-NEXT: callq __morestack 124; X64-NEXT: retq 125; X64-NEXT: jmp .LBB0_2 126; 127; X32ABI-LABEL: test_basic: 128; X32ABI: # %bb.0: 129; X32ABI-NEXT: cmpl %fs:64, %esp 130; X32ABI-NEXT: jbe .LBB0_1 131; X32ABI-NEXT: .LBB0_2: 132; X32ABI-NEXT: pushq %rbp 133; X32ABI-NEXT: .cfi_def_cfa_offset 16 134; X32ABI-NEXT: .cfi_offset %rbp, -16 135; X32ABI-NEXT: movl %esp, %ebp 136; X32ABI-NEXT: .cfi_def_cfa_register %rbp 137; X32ABI-NEXT: pushq %rbx 138; X32ABI-NEXT: pushq %rax 139; X32ABI-NEXT: .cfi_offset %rbx, -24 140; X32ABI-NEXT: movl %edi, %ebx 141; X32ABI-NEXT: leal 15(,%rbx,4), %eax 142; X32ABI-NEXT: andl $-16, %eax 143; X32ABI-NEXT: movl %esp, %edi 144; X32ABI-NEXT: subl %eax, %edi 145; X32ABI-NEXT: cmpl %edi, %fs:64 146; X32ABI-NEXT: jg .LBB0_4 147; X32ABI-NEXT: # %bb.3: 148; X32ABI-NEXT: movl %edi, %esp 149; X32ABI-NEXT: jmp .LBB0_5 150; X32ABI-NEXT: .LBB0_4: 151; X32ABI-NEXT: movl %eax, %edi 152; X32ABI-NEXT: callq __morestack_allocate_stack_space 153; X32ABI-NEXT: movl %eax, %edi 154; X32ABI-NEXT: .LBB0_5: 155; X32ABI-NEXT: movl %ebx, %esi 156; X32ABI-NEXT: callq dummy_use@PLT 157; X32ABI-NEXT: testl %ebx, %ebx 158; X32ABI-NEXT: je .LBB0_6 159; X32ABI-NEXT: # %bb.8: # %false 160; X32ABI-NEXT: decl %ebx 161; X32ABI-NEXT: movl %ebx, %edi 162; X32ABI-NEXT: callq test_basic@PLT 163; X32ABI-NEXT: jmp .LBB0_7 164; X32ABI-NEXT: .LBB0_6: # %true 165; X32ABI-NEXT: xorl %eax, %eax 166; X32ABI-NEXT: .LBB0_7: # %true 167; X32ABI-NEXT: leal -8(%ebp), %esp 168; X32ABI-NEXT: popq %rbx 169; X32ABI-NEXT: popq %rbp 170; X32ABI-NEXT: .cfi_def_cfa %rsp, 8 171; X32ABI-NEXT: retq 172; X32ABI-NEXT: .LBB0_1: 173; X32ABI-NEXT: .cfi_def_cfa_register 4294967294 174; X32ABI-NEXT: .cfi_restore %rbx 175; X32ABI-NEXT: .cfi_restore %rbp 176; X32ABI-NEXT: movl $24, %r10d 177; X32ABI-NEXT: movl $0, %r11d 178; X32ABI-NEXT: callq __morestack 179; X32ABI-NEXT: retq 180; X32ABI-NEXT: jmp .LBB0_2 181 %mem = alloca i32, i32 %l 182 call void @dummy_use (ptr %mem, i32 %l) 183 %terminate = icmp eq i32 %l, 0 184 br i1 %terminate, label %true, label %false 185 186true: 187 ret i32 0 188 189false: 190 %newlen = sub i32 %l, 1 191 %retvalue = call i32 @test_basic(i32 %newlen) 192 ret i32 %retvalue 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212} 213 214attributes #0 = { "split-stack" } 215