1; RUN: llc -O0 --mtriple=aarch64 -verify-machineinstrs --filetype=asm %s -o - 2>&1 | FileCheck %s 2; RUN: llc -O1 --mtriple=aarch64 -verify-machineinstrs --filetype=asm %s -o - 2>&1 | FileCheck %s 3; RUN: llc -O2 --mtriple=aarch64 -verify-machineinstrs --filetype=asm %s -o - 2>&1 | FileCheck %s 4target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" 5target triple = "aarch64-unknown-linux-gnu" 6 7declare void @normal_cc() 8 9; Caller: preserve_allcc; callee: normalcc. Normally callee saved registers 10; x9~x15 need to be spilled. Since most of them will be spilled in pairs in 11; reverse order, we only check the odd number ones due to FileCheck not 12; matching the same line of assembly twice. 13; CHECK-LABEL: preserve_all 14; CHECK-DAG: {{st[rp]}} {{(q[0-9]+, )?q8(, x[0-9]+)?}}, [sp, #{{[-0-9]+}}] 15; CHECK-DAG: {{st[rp]}} {{(q[0-9]+, )?q10(, x[0-9]+)?}}, [sp, #{{[-0-9]+}}] 16; CHECK-DAG: {{st[rp]}} {{(q[0-9]+, )?q12(, x[0-9]+)?}}, [sp, #{{[-0-9]+}}] 17; CHECK-DAG: {{st[rp]}} {{(q[0-9]+, )?q14(, x[0-9]+)?}}, [sp, #{{[-0-9]+}}] 18; CHECK-DAG: {{st[rp]}} {{(q[0-9]+, )?q16(, x[0-9]+)?}}, [sp, #{{[-0-9]+}}] 19; CHECK-DAG: {{st[rp]}} {{(q[0-9]+, )?q18(, x[0-9]+)?}}, [sp, #{{[-0-9]+}}] 20; CHECK-DAG: {{st[rp]}} {{(q[0-9]+, )?q22(, x[0-9]+)?}}, [sp, #{{[-0-9]+}}] 21; CHECK-DAG: {{st[rp]}} {{(q[0-9]+, )?q24(, x[0-9]+)?}}, [sp, #{{[-0-9]+}}] 22; CHECK-DAG: {{st[rp]}} {{(q[0-9]+, )?q26(, x[0-9]+)?}}, [sp, #{{[-0-9]+}}] 23; CHECK-DAG: {{st[rp]}} {{(q[0-9]+, )?q28(, x[0-9]+)?}}, [sp, #{{[-0-9]+}}] 24; CHECK-DAG: {{st[rp]}} {{(q[0-9]+, )?q30(, x[0-9]+)?}}, [sp, #{{[-0-9]+}}] 25; CHECK-DAG: {{st[rp]}} {{(x[0-9]+, )?x9(, x[0-9]+)?}}, [sp, #{{[-0-9]+}}] 26; CHECK-DAG: {{st[rp]}} {{(x[0-9]+, )?x11(, x[0-9]+)?}}, [sp, #{{[-0-9]+}}] 27; CHECK-DAG: {{st[rp]}} {{(x[0-9]+, )?x13(, x[0-9]+)?}}, [sp, #{{[-0-9]+}}] 28; CHECK-DAG: {{st[rp]}} {{(x[0-9]+, )?x15(, x[0-9]+)?}}, [sp, #{{[-0-9]+}}] 29define preserve_allcc void @preserve_all() { 30 call void @normal_cc() 31 ret void 32} 33 34; Caller: normalcc; callee: preserve_allcc. x9/q9 does not need to be spilled. 35; The same holds for other x and q registers, but we only check x9 and q9. 36; CHECK-LABEL: normal_cc_caller 37; CHECK-NOT: stp {{x[0-9]+}}, x9, [sp, #{{[-0-9]+}}] 38; CHECK-NOT: stp x9, {{x[0-9]+}}, [sp, #{{[-0-9]+}}] 39; CHECK-NOT: str x9, [sp, {{#[-0-9]+}}] 40; CHECK-NOT: stp {{q[0-9]+}}, q9, [sp, #{{[-0-9]+}}] 41; CHECK-NOT: stp q9, {{q[0-9]+}}, [sp, #{{[-0-9]+}}] 42; CHECK-NOT: str q9, [sp, {{#[-0-9]+}}] 43define dso_local void @normal_cc_caller() { 44entry: 45 %v = alloca i32, align 4 46 call void asm sideeffect "mov x9, $0", "N,~{x9}"(i32 48879) #2 47 call void asm sideeffect "movi v9.2d, #0","~{v9}" () #2 48 49 50 call preserve_allcc void @preserve_all() 51 %0 = load i32, ptr %v, align 4 52 %1 = call i32 asm sideeffect "mov ${0:w}, w9", "=r,r"(i32 %0) #2 53 %2 = call i32 asm sideeffect "fneg v9.4s, v9.4s", "=r,~{v9}"() #2 54 store i32 %1, ptr %v, align 4 55 ret void 56} 57