1; RUN: llc -mtriple=arm-none-none-eabi -mcpu=cortex-a15 -o - %s | FileCheck --check-prefix=CHECK-A %s 2; RUN: llc -mtriple=thumb-none-none-eabi -mcpu=cortex-a15 -o - %s | FileCheck --check-prefix=CHECK-A-THUMB %s 3; RUN: llc -mtriple=thumb-apple-none-macho -mcpu=cortex-m3 -o - %s | FileCheck --check-prefix=CHECK-M %s 4 5declare arm_aapcscc void @bar() 6 7@bigvar = global [16 x i32] zeroinitializer 8 9define arm_aapcscc void @irq_fn() alignstack(8) "interrupt"="IRQ" { 10 ; Must save all registers except banked sp and lr (we save lr anyway because 11 ; we actually need it at the end to execute the return ourselves). 12 13 ; Also need special function return setting pc and CPSR simultaneously. 14; CHECK-A-LABEL: irq_fn: 15; CHECK-A: push {r0, r1, r2, r3, r11, lr} 16; CHECK-A: add r11, sp, #16 17; CHECK-A: sub sp, sp, #{{[0-9]+}} 18; CHECK-A: bic sp, sp, #7 19; CHECK-A: bl bar 20; CHECK-A: sub sp, r11, #16 21; CHECK-A: pop {r0, r1, r2, r3, r11, lr} 22; CHECK-A: subs pc, lr, #4 23 24; CHECK-A-THUMB-LABEL: irq_fn: 25; CHECK-A-THUMB: push {r0, r1, r2, r3, r4, r7, lr} 26; CHECK-A-THUMB: mov r4, sp 27; CHECK-A-THUMB: add r7, sp, #20 28; CHECK-A-THUMB: bic r4, r4, #7 29; CHECK-A-THUMB: bl bar 30; CHECK-A-THUMB: sub.w r4, r7, #20 31; CHECK-A-THUMB: mov sp, r4 32; CHECK-A-THUMB: pop.w {r0, r1, r2, r3, r4, r7, lr} 33; CHECK-A-THUMB: subs pc, lr, #4 34 35 ; Normal AAPCS function (r0-r3 pushed onto stack by hardware, lr set to 36 ; appropriate sentinel so no special return needed). 37; CHECK-M-LABEL: irq_fn: 38; CHECK-M: push {r4, r7, lr} 39; CHECK-M: add r7, sp, #4 40; CHECK-M: mov r4, sp 41; CHECK-M: bic r4, r4, #7 42; CHECK-M: mov sp, r4 43; CHECK-M: blx _bar 44; CHECK-M: subs r4, r7, #4 45; CHECK-M: mov sp, r4 46; CHECK-M: pop {r4, r7, pc} 47 48 call arm_aapcscc void @bar() 49 ret void 50} 51 52define arm_aapcscc void @fiq_fn() alignstack(8) "interrupt"="FIQ" { 53; CHECK-A-LABEL: fiq_fn: 54; CHECK-A: push {r0, r1, r2, r3, r4, r5, r6, r7, r11, lr} 55 ; 32 to get past r0, r1, ..., r7 56; CHECK-A: add r11, sp, #32 57; CHECK-A: sub sp, sp, #{{[0-9]+}} 58; CHECK-A: bic sp, sp, #7 59; [...] 60 ; 32 must match above 61; CHECK-A: sub sp, r11, #32 62; CHECK-A: pop {r0, r1, r2, r3, r4, r5, r6, r7, r11, lr} 63; CHECK-A: subs pc, lr, #4 64 65; CHECK-A-THUMB-LABEL: fiq_fn: 66; CHECK-M-LABEL: fiq_fn: 67 %val = load volatile [16 x i32]* @bigvar 68 store volatile [16 x i32] %val, [16 x i32]* @bigvar 69 ret void 70} 71 72define arm_aapcscc void @swi_fn() alignstack(8) "interrupt"="SWI" { 73; CHECK-A-LABEL: swi_fn: 74; CHECK-A: push {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} 75; CHECK-A: add r11, sp, #44 76; CHECK-A: sub sp, sp, #{{[0-9]+}} 77; CHECK-A: bic sp, sp, #7 78; [...] 79; CHECK-A: sub sp, r11, #44 80; CHECK-A: pop {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, lr} 81; CHECK-A: subs pc, lr, #0 82 83 %val = load volatile [16 x i32]* @bigvar 84 store volatile [16 x i32] %val, [16 x i32]* @bigvar 85 ret void 86} 87 88define arm_aapcscc void @undef_fn() alignstack(8) "interrupt"="UNDEF" { 89; CHECK-A-LABEL: undef_fn: 90; CHECK-A: push {r0, r1, r2, r3, r11, lr} 91; CHECK-A: add r11, sp, #16 92; CHECK-A: sub sp, sp, #{{[0-9]+}} 93; CHECK-A: bic sp, sp, #7 94; [...] 95; CHECK-A: sub sp, r11, #16 96; CHECK-A: pop {r0, r1, r2, r3, r11, lr} 97; CHECK-A: subs pc, lr, #0 98 99 call void @bar() 100 ret void 101} 102 103define arm_aapcscc void @abort_fn() alignstack(8) "interrupt"="ABORT" { 104; CHECK-A-LABEL: abort_fn: 105; CHECK-A: push {r0, r1, r2, r3, r11, lr} 106; CHECK-A: add r11, sp, #16 107; CHECK-A: sub sp, sp, #{{[0-9]+}} 108; CHECK-A: bic sp, sp, #7 109; [...] 110; CHECK-A: sub sp, r11, #16 111; CHECK-A: pop {r0, r1, r2, r3, r11, lr} 112; CHECK-A: subs pc, lr, #4 113 114 call void @bar() 115 ret void 116} 117 118@var = global double 0.0 119 120; We don't save VFP regs, since it would be a massive overhead in the general 121; case. 122define arm_aapcscc void @floating_fn() alignstack(8) "interrupt"="IRQ" { 123; CHECK-A-LABEL: floating_fn: 124; CHECK-A-NOT: vpush 125; CHECK-A-NOT: vstr 126; CHECK-A-NOT: vstm 127; CHECK-A: vadd.f64 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}} 128 %lhs = load volatile double* @var 129 %rhs = load volatile double* @var 130 %sum = fadd double %lhs, %rhs 131 store double %sum, double* @var 132 ret void 133} 134