xref: /llvm-project/llvm/test/CodeGen/X86/x86-64-ms_abi-vararg.ll (revision 2f448bf509432c1a19ec46ab8cbc7353c03c6280)
1; RUN: llc < %s -mcpu=generic -mtriple=x86_64-pc-linux-gnu | FileCheck %s
2
3; Verify that the var arg parameters which are passed in registers are stored
4; in home stack slots allocated by the caller and that AP is correctly
5; calculated.
6define win64cc void @average_va(i32 %count, ...) nounwind {
7entry:
8; CHECK: pushq
9; CHECK-DAG: movq   %r9, 40(%rsp)
10; CHECK-DAG: movq   %r8, 32(%rsp)
11; CHECK-DAG: movq   %rdx, 24(%rsp)
12; CHECK: leaq   24(%rsp), %rax
13
14  %ap = alloca ptr, align 8                       ; <ptr> [#uses=1]
15  call void @llvm.va_start(ptr %ap)
16  ret void
17}
18
19declare void @llvm.va_start(ptr) nounwind
20declare void @llvm.va_copy(ptr, ptr) nounwind
21declare void @llvm.va_end(ptr) nounwind
22
23; CHECK-LABEL: f5:
24; CHECK: pushq
25; CHECK: leaq 56(%rsp),
26define win64cc ptr @f5(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, ...) nounwind {
27entry:
28  %ap = alloca ptr, align 8
29  call void @llvm.va_start(ptr %ap)
30  ret ptr %ap
31}
32
33; CHECK-LABEL: f4:
34; CHECK: pushq
35; CHECK: leaq 48(%rsp),
36define win64cc ptr @f4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
37entry:
38  %ap = alloca ptr, align 8
39  call void @llvm.va_start(ptr %ap)
40  ret ptr %ap
41}
42
43; CHECK-LABEL: f3:
44; CHECK: pushq
45; CHECK: leaq 40(%rsp),
46define win64cc ptr @f3(i64 %a0, i64 %a1, i64 %a2, ...) nounwind {
47entry:
48  %ap = alloca ptr, align 8
49  call void @llvm.va_start(ptr %ap)
50  ret ptr %ap
51}
52
53; WinX86_64 uses char* for va_list. Verify that the correct amount of bytes
54; are copied using va_copy.
55
56; CHECK-LABEL: copy1:
57; CHECK: leaq 32(%rsp), [[REG_copy1:%[a-z]+]]
58; CHECK-DAG: movq [[REG_copy1]], 8(%rsp)
59; CHECK-DAG: movq [[REG_copy1]], (%rsp)
60; CHECK: ret
61define win64cc void @copy1(i64 %a0, ...) nounwind {
62entry:
63  %ap = alloca ptr, align 8
64  %cp = alloca ptr, align 8
65  call void @llvm.va_start(ptr %ap)
66  call void @llvm.va_copy(ptr %cp, ptr %ap)
67  ret void
68}
69
70; CHECK-LABEL: copy4:
71; CHECK: leaq 56(%rsp), [[REG_copy4:%[a-z]+]]
72; CHECK: movq [[REG_copy4]], 8(%rsp)
73; CHECK: movq [[REG_copy4]], (%rsp)
74; CHECK: ret
75define win64cc void @copy4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
76entry:
77  %ap = alloca ptr, align 8
78  %cp = alloca ptr, align 8
79  call void @llvm.va_start(ptr %ap)
80  call void @llvm.va_copy(ptr %cp, ptr %ap)
81  ret void
82}
83
84; CHECK-LABEL: arg4:
85; va_start (optimized away as overwritten by va_arg)
86; va_arg:
87; CHECK: leaq 52(%rsp), [[REG_arg4_2:%[a-z]+]]
88; CHECK: movq [[REG_arg4_2]], (%rsp)
89; CHECK: movl 48(%rsp), %eax
90; CHECK: ret
91define win64cc i32 @arg4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
92entry:
93  %ap = alloca ptr, align 8
94  call void @llvm.va_start(ptr %ap)
95  %tmp = va_arg ptr %ap, i32
96  ret i32 %tmp
97}
98