xref: /llvm-project/llvm/test/CodeGen/X86/win64_vararg.ll (revision 2f448bf509432c1a19ec46ab8cbc7353c03c6280)
1; RUN: llc < %s -mcpu=generic -mtriple=x86_64-pc-win32 | FileCheck %s
2
3; Verify that the var arg parameters which are passed in registers are stored
4; in home stack slots allocated by the caller and that AP is correctly
5; calculated.
6define void @average_va(i32 %count, ...) nounwind {
7entry:
8; CHECK: pushq
9; CHECK-DAG: movq   %r9, 40(%rsp)
10; CHECK-DAG: movq   %r8, 32(%rsp)
11; CHECK-DAG: movq   %rdx, 24(%rsp)
12; CHECK: leaq   24(%rsp), %rax
13
14  %ap = alloca ptr, align 8                       ; <ptr> [#uses=1]
15  call void @llvm.va_start(ptr %ap)
16  ret void
17}
18
19declare void @llvm.va_start(ptr) nounwind
20declare void @llvm.va_copy(ptr, ptr) nounwind
21
22; CHECK-LABEL: f5:
23; CHECK: pushq
24; CHECK: leaq 56(%rsp),
25define ptr @f5(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, ...) nounwind {
26entry:
27  %ap = alloca ptr, align 8
28  call void @llvm.va_start(ptr %ap)
29  ret ptr %ap
30}
31
32; CHECK-LABEL: f4:
33; CHECK: pushq
34; CHECK: leaq 48(%rsp),
35define ptr @f4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
36entry:
37  %ap = alloca ptr, align 8
38  call void @llvm.va_start(ptr %ap)
39  ret ptr %ap
40}
41
42; CHECK-LABEL: f3:
43; CHECK: pushq
44; CHECK: leaq 40(%rsp),
45define ptr @f3(i64 %a0, i64 %a1, i64 %a2, ...) nounwind {
46entry:
47  %ap = alloca ptr, align 8
48  call void @llvm.va_start(ptr %ap)
49  ret ptr %ap
50}
51
52; WinX86_64 uses char* for va_list. Verify that the correct amount of bytes
53; are copied using va_copy.
54
55; CHECK-LABEL: copy1:
56; CHECK: subq $16
57; CHECK: leaq 32(%rsp), [[REG_copy1:%[a-z]+]]
58; CHECK: movq [[REG_copy1]], 8(%rsp)
59; CHECK: movq [[REG_copy1]], (%rsp)
60; CHECK: addq $16
61; CHECK: ret
62define void @copy1(i64 %a0, ...) nounwind {
63entry:
64  %ap = alloca ptr, align 8
65  %cp = alloca ptr, align 8
66  call void @llvm.va_start(ptr %ap)
67  call void @llvm.va_copy(ptr %cp, ptr %ap)
68  ret void
69}
70
71; CHECK-LABEL: copy4:
72; CHECK: subq $16
73; CHECK: leaq 56(%rsp), [[REG_copy4:%[a-z]+]]
74; CHECK: movq [[REG_copy4]], 8(%rsp)
75; CHECK: movq [[REG_copy4]], (%rsp)
76; CHECK: addq $16
77; CHECK: ret
78define void @copy4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
79entry:
80  %ap = alloca ptr, align 8
81  %cp = alloca ptr, align 8
82  call void @llvm.va_start(ptr %ap)
83  call void @llvm.va_copy(ptr %cp, ptr %ap)
84  ret void
85}
86
87; CHECK-LABEL: arg4:
88; CHECK: pushq
89; va_start (optimized away as overwritten by va_arg)
90; va_arg:
91; CHECK: leaq 52(%rsp), [[REG_arg4_2:%[a-z]+]]
92; CHECK: movq [[REG_arg4_2]], (%rsp)
93; CHECK: movl 48(%rsp), %eax
94; CHECK: popq
95; CHECK: ret
96define i32 @arg4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
97entry:
98  %ap = alloca ptr, align 8
99  call void @llvm.va_start(ptr %ap)
100  %tmp = va_arg ptr %ap, i32
101  ret i32 %tmp
102}
103
104define void @sret_arg(ptr sret(i32) %agg.result, ptr nocapture readnone %format, ...) {
105entry:
106  %ap = alloca ptr
107  call void @llvm.va_start(ptr %ap)
108  %tmp = va_arg ptr %ap, i32
109  store i32 %tmp, ptr %agg.result
110  ret void
111}
112; CHECK-LABEL: sret_arg:
113; CHECK: pushq
114; CHECK: movq %rcx, %rax
115; CHECK-DAG: movq %r9, 40(%rsp)
116; CHECK-DAG: movq %r8, 32(%rsp)
117; CHECK-DAG: leaq 36(%rsp), %[[sret:[^ ]*]]
118; CHECK-DAG: movl %r8d, (%rax)
119; CHECK-DAG: movq %[[sret]], (%rsp)
120; CHECK: popq
121; CHECK: retq
122