xref: /llvm-project/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel  -verify-machineinstrs \
3; RUN:   | FileCheck -check-prefixes=RV32,ILP32 %s
4; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel  -mattr=+d -verify-machineinstrs \
5; RUN:   | FileCheck -check-prefixes=RV32,RV32D-ILP32 %s
6; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel  -mattr=+d -target-abi ilp32f \
7; RUN:     -verify-machineinstrs \
8; RUN:   | FileCheck -check-prefixes=RV32,RV32D-ILP32F %s
9; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel  -mattr=+d -target-abi ilp32d \
10; RUN:     -verify-machineinstrs \
11; RUN:   | FileCheck -check-prefixes=RV32,RV32D-ILP32D %s
12; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel  -verify-machineinstrs \
13; RUN:   | FileCheck -check-prefixes=RV64,LP64 %s
14; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel  -mattr=+d -target-abi lp64f \
15; RUN:     -verify-machineinstrs \
16; RUN:   | FileCheck -check-prefixes=RV64,LP64F %s
17; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel  -mattr=+d -target-abi lp64d \
18; RUN:     -verify-machineinstrs \
19; RUN:   | FileCheck -check-prefixes=RV64,LP64D %s
20; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel \
21; RUN:     -frame-pointer=all -target-abi ilp32 -verify-machineinstrs \
22; RUN:   | FileCheck -check-prefixes=RV32-WITHFP %s
23; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel \
24; RUN:     -frame-pointer=all -target-abi lp64 -verify-machineinstrs \
25; RUN:   | FileCheck -check-prefixes=RV64-WITHFP %s
26
27; The same vararg calling convention is used for ilp32/ilp32f/ilp32d and for
28; lp64/lp64f/lp64d. Different CHECK lines are required due to slight
29; codegen differences due to the way the f64 load operations are lowered and
30; because the PseudoCALL specifies the calling  convention.
31; The nounwind attribute is omitted for some of the tests, to check that CFI
32; directives are correctly generated.
33
34declare void @llvm.va_start(ptr)
35declare void @llvm.va_end(ptr)
36
37declare void @notdead(ptr)
38
39; Although frontends are recommended to not generate va_arg due to the lack of
40; support for aggregate types, we test simple cases here to ensure they are
41; lowered correctly
42
43define i32 @va1(ptr %fmt, ...) {
44; RV32-LABEL: va1:
45; RV32:       # %bb.0:
46; RV32-NEXT:    addi sp, sp, -48
47; RV32-NEXT:    .cfi_def_cfa_offset 48
48; RV32-NEXT:    sw a1, 20(sp)
49; RV32-NEXT:    sw a2, 24(sp)
50; RV32-NEXT:    sw a3, 28(sp)
51; RV32-NEXT:    sw a4, 32(sp)
52; RV32-NEXT:    addi a0, sp, 20
53; RV32-NEXT:    sw a0, 12(sp)
54; RV32-NEXT:    lw a0, 12(sp)
55; RV32-NEXT:    sw a5, 36(sp)
56; RV32-NEXT:    sw a6, 40(sp)
57; RV32-NEXT:    sw a7, 44(sp)
58; RV32-NEXT:    addi a1, a0, 4
59; RV32-NEXT:    sw a1, 12(sp)
60; RV32-NEXT:    lw a0, 0(a0)
61; RV32-NEXT:    addi sp, sp, 48
62; RV32-NEXT:    .cfi_def_cfa_offset 0
63; RV32-NEXT:    ret
64;
65; RV64-LABEL: va1:
66; RV64:       # %bb.0:
67; RV64-NEXT:    addi sp, sp, -80
68; RV64-NEXT:    .cfi_def_cfa_offset 80
69; RV64-NEXT:    sd a1, 24(sp)
70; RV64-NEXT:    sd a2, 32(sp)
71; RV64-NEXT:    sd a3, 40(sp)
72; RV64-NEXT:    sd a4, 48(sp)
73; RV64-NEXT:    addi a0, sp, 8
74; RV64-NEXT:    addi a1, sp, 24
75; RV64-NEXT:    sd a1, 8(sp)
76; RV64-NEXT:    lw a0, 4(a0)
77; RV64-NEXT:    lwu a1, 8(sp)
78; RV64-NEXT:    sd a5, 56(sp)
79; RV64-NEXT:    sd a6, 64(sp)
80; RV64-NEXT:    sd a7, 72(sp)
81; RV64-NEXT:    slli a0, a0, 32
82; RV64-NEXT:    or a0, a0, a1
83; RV64-NEXT:    addi a1, a0, 4
84; RV64-NEXT:    srli a2, a1, 32
85; RV64-NEXT:    sw a1, 8(sp)
86; RV64-NEXT:    sw a2, 12(sp)
87; RV64-NEXT:    lw a0, 0(a0)
88; RV64-NEXT:    addi sp, sp, 80
89; RV64-NEXT:    .cfi_def_cfa_offset 0
90; RV64-NEXT:    ret
91;
92; RV32-WITHFP-LABEL: va1:
93; RV32-WITHFP:       # %bb.0:
94; RV32-WITHFP-NEXT:    addi sp, sp, -48
95; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 48
96; RV32-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
97; RV32-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
98; RV32-WITHFP-NEXT:    .cfi_offset ra, -36
99; RV32-WITHFP-NEXT:    .cfi_offset s0, -40
100; RV32-WITHFP-NEXT:    addi s0, sp, 16
101; RV32-WITHFP-NEXT:    .cfi_def_cfa s0, 32
102; RV32-WITHFP-NEXT:    sw a1, 4(s0)
103; RV32-WITHFP-NEXT:    sw a2, 8(s0)
104; RV32-WITHFP-NEXT:    sw a3, 12(s0)
105; RV32-WITHFP-NEXT:    sw a4, 16(s0)
106; RV32-WITHFP-NEXT:    addi a0, s0, 4
107; RV32-WITHFP-NEXT:    sw a0, -12(s0)
108; RV32-WITHFP-NEXT:    lw a0, -12(s0)
109; RV32-WITHFP-NEXT:    sw a5, 20(s0)
110; RV32-WITHFP-NEXT:    sw a6, 24(s0)
111; RV32-WITHFP-NEXT:    sw a7, 28(s0)
112; RV32-WITHFP-NEXT:    addi a1, a0, 4
113; RV32-WITHFP-NEXT:    sw a1, -12(s0)
114; RV32-WITHFP-NEXT:    lw a0, 0(a0)
115; RV32-WITHFP-NEXT:    .cfi_def_cfa sp, 48
116; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
117; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
118; RV32-WITHFP-NEXT:    .cfi_restore ra
119; RV32-WITHFP-NEXT:    .cfi_restore s0
120; RV32-WITHFP-NEXT:    addi sp, sp, 48
121; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 0
122; RV32-WITHFP-NEXT:    ret
123;
124; RV64-WITHFP-LABEL: va1:
125; RV64-WITHFP:       # %bb.0:
126; RV64-WITHFP-NEXT:    addi sp, sp, -96
127; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 96
128; RV64-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
129; RV64-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
130; RV64-WITHFP-NEXT:    .cfi_offset ra, -72
131; RV64-WITHFP-NEXT:    .cfi_offset s0, -80
132; RV64-WITHFP-NEXT:    addi s0, sp, 32
133; RV64-WITHFP-NEXT:    .cfi_def_cfa s0, 64
134; RV64-WITHFP-NEXT:    sd a1, 8(s0)
135; RV64-WITHFP-NEXT:    sd a2, 16(s0)
136; RV64-WITHFP-NEXT:    sd a3, 24(s0)
137; RV64-WITHFP-NEXT:    sd a4, 32(s0)
138; RV64-WITHFP-NEXT:    addi a0, s0, -24
139; RV64-WITHFP-NEXT:    addi a1, s0, 8
140; RV64-WITHFP-NEXT:    sd a1, -24(s0)
141; RV64-WITHFP-NEXT:    lw a0, 4(a0)
142; RV64-WITHFP-NEXT:    lwu a1, -24(s0)
143; RV64-WITHFP-NEXT:    sd a5, 40(s0)
144; RV64-WITHFP-NEXT:    sd a6, 48(s0)
145; RV64-WITHFP-NEXT:    sd a7, 56(s0)
146; RV64-WITHFP-NEXT:    slli a0, a0, 32
147; RV64-WITHFP-NEXT:    or a0, a0, a1
148; RV64-WITHFP-NEXT:    addi a1, a0, 4
149; RV64-WITHFP-NEXT:    srli a2, a1, 32
150; RV64-WITHFP-NEXT:    sw a1, -24(s0)
151; RV64-WITHFP-NEXT:    sw a2, -20(s0)
152; RV64-WITHFP-NEXT:    lw a0, 0(a0)
153; RV64-WITHFP-NEXT:    .cfi_def_cfa sp, 96
154; RV64-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
155; RV64-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
156; RV64-WITHFP-NEXT:    .cfi_restore ra
157; RV64-WITHFP-NEXT:    .cfi_restore s0
158; RV64-WITHFP-NEXT:    addi sp, sp, 96
159; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 0
160; RV64-WITHFP-NEXT:    ret
161  %va = alloca ptr
162  call void @llvm.va_start(ptr %va)
163  %argp.cur = load ptr, ptr %va, align 4
164  %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
165  store ptr %argp.next, ptr %va, align 4
166  %1 = load i32, ptr %argp.cur, align 4
167  call void @llvm.va_end(ptr %va)
168  ret i32 %1
169}
170
171define iXLen @va1_va_arg(ptr %fmt, ...) nounwind {
172; RV32-LABEL: va1_va_arg:
173; RV32:       # %bb.0:
174; RV32-NEXT:    addi sp, sp, -48
175; RV32-NEXT:    sw a1, 20(sp)
176; RV32-NEXT:    sw a2, 24(sp)
177; RV32-NEXT:    sw a3, 28(sp)
178; RV32-NEXT:    sw a4, 32(sp)
179; RV32-NEXT:    sw a5, 36(sp)
180; RV32-NEXT:    sw a6, 40(sp)
181; RV32-NEXT:    sw a7, 44(sp)
182; RV32-NEXT:    addi a0, sp, 20
183; RV32-NEXT:    sw a0, 12(sp)
184; RV32-NEXT:    lw a0, 12(sp)
185; RV32-NEXT:    addi a0, a0, 3
186; RV32-NEXT:    andi a0, a0, -4
187; RV32-NEXT:    addi a1, a0, 4
188; RV32-NEXT:    sw a1, 12(sp)
189; RV32-NEXT:    lw a0, 0(a0)
190; RV32-NEXT:    addi sp, sp, 48
191; RV32-NEXT:    ret
192;
193; RV64-LABEL: va1_va_arg:
194; RV64:       # %bb.0:
195; RV64-NEXT:    addi sp, sp, -80
196; RV64-NEXT:    sd a1, 24(sp)
197; RV64-NEXT:    sd a2, 32(sp)
198; RV64-NEXT:    sd a3, 40(sp)
199; RV64-NEXT:    sd a4, 48(sp)
200; RV64-NEXT:    sd a5, 56(sp)
201; RV64-NEXT:    sd a6, 64(sp)
202; RV64-NEXT:    sd a7, 72(sp)
203; RV64-NEXT:    addi a0, sp, 24
204; RV64-NEXT:    sd a0, 8(sp)
205; RV64-NEXT:    ld a0, 8(sp)
206; RV64-NEXT:    addi a0, a0, 7
207; RV64-NEXT:    andi a0, a0, -8
208; RV64-NEXT:    addi a1, a0, 8
209; RV64-NEXT:    sd a1, 8(sp)
210; RV64-NEXT:    ld a0, 0(a0)
211; RV64-NEXT:    addi sp, sp, 80
212; RV64-NEXT:    ret
213;
214; RV32-WITHFP-LABEL: va1_va_arg:
215; RV32-WITHFP:       # %bb.0:
216; RV32-WITHFP-NEXT:    addi sp, sp, -48
217; RV32-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
218; RV32-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
219; RV32-WITHFP-NEXT:    addi s0, sp, 16
220; RV32-WITHFP-NEXT:    sw a1, 4(s0)
221; RV32-WITHFP-NEXT:    sw a2, 8(s0)
222; RV32-WITHFP-NEXT:    sw a3, 12(s0)
223; RV32-WITHFP-NEXT:    sw a4, 16(s0)
224; RV32-WITHFP-NEXT:    sw a5, 20(s0)
225; RV32-WITHFP-NEXT:    sw a6, 24(s0)
226; RV32-WITHFP-NEXT:    sw a7, 28(s0)
227; RV32-WITHFP-NEXT:    addi a0, s0, 4
228; RV32-WITHFP-NEXT:    sw a0, -12(s0)
229; RV32-WITHFP-NEXT:    lw a0, -12(s0)
230; RV32-WITHFP-NEXT:    addi a0, a0, 3
231; RV32-WITHFP-NEXT:    andi a0, a0, -4
232; RV32-WITHFP-NEXT:    addi a1, a0, 4
233; RV32-WITHFP-NEXT:    sw a1, -12(s0)
234; RV32-WITHFP-NEXT:    lw a0, 0(a0)
235; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
236; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
237; RV32-WITHFP-NEXT:    addi sp, sp, 48
238; RV32-WITHFP-NEXT:    ret
239;
240; RV64-WITHFP-LABEL: va1_va_arg:
241; RV64-WITHFP:       # %bb.0:
242; RV64-WITHFP-NEXT:    addi sp, sp, -96
243; RV64-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
244; RV64-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
245; RV64-WITHFP-NEXT:    addi s0, sp, 32
246; RV64-WITHFP-NEXT:    sd a1, 8(s0)
247; RV64-WITHFP-NEXT:    sd a2, 16(s0)
248; RV64-WITHFP-NEXT:    sd a3, 24(s0)
249; RV64-WITHFP-NEXT:    sd a4, 32(s0)
250; RV64-WITHFP-NEXT:    sd a5, 40(s0)
251; RV64-WITHFP-NEXT:    sd a6, 48(s0)
252; RV64-WITHFP-NEXT:    sd a7, 56(s0)
253; RV64-WITHFP-NEXT:    addi a0, s0, 8
254; RV64-WITHFP-NEXT:    sd a0, -24(s0)
255; RV64-WITHFP-NEXT:    ld a0, -24(s0)
256; RV64-WITHFP-NEXT:    addi a0, a0, 7
257; RV64-WITHFP-NEXT:    andi a0, a0, -8
258; RV64-WITHFP-NEXT:    addi a1, a0, 8
259; RV64-WITHFP-NEXT:    sd a1, -24(s0)
260; RV64-WITHFP-NEXT:    ld a0, 0(a0)
261; RV64-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
262; RV64-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
263; RV64-WITHFP-NEXT:    addi sp, sp, 96
264; RV64-WITHFP-NEXT:    ret
265  %va = alloca ptr
266  call void @llvm.va_start(ptr %va)
267  %1 = va_arg ptr %va, iXLen
268  call void @llvm.va_end(ptr %va)
269  ret iXLen %1
270}
271
272; Ensure the adjustment when restoring the stack pointer using the frame
273; pointer is correct
274define iXLen @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
275; RV32-LABEL: va1_va_arg_alloca:
276; RV32:       # %bb.0:
277; RV32-NEXT:    addi sp, sp, -48
278; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
279; RV32-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
280; RV32-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
281; RV32-NEXT:    addi s0, sp, 16
282; RV32-NEXT:    sw a1, 4(s0)
283; RV32-NEXT:    sw a2, 8(s0)
284; RV32-NEXT:    sw a3, 12(s0)
285; RV32-NEXT:    sw a4, 16(s0)
286; RV32-NEXT:    sw a5, 20(s0)
287; RV32-NEXT:    sw a6, 24(s0)
288; RV32-NEXT:    sw a7, 28(s0)
289; RV32-NEXT:    addi a0, s0, 4
290; RV32-NEXT:    sw a0, -16(s0)
291; RV32-NEXT:    lw a0, -16(s0)
292; RV32-NEXT:    addi a0, a0, 3
293; RV32-NEXT:    andi a0, a0, -4
294; RV32-NEXT:    addi a1, a0, 4
295; RV32-NEXT:    sw a1, -16(s0)
296; RV32-NEXT:    lw s1, 0(a0)
297; RV32-NEXT:    addi a0, s1, 15
298; RV32-NEXT:    andi a0, a0, -16
299; RV32-NEXT:    sub a0, sp, a0
300; RV32-NEXT:    mv sp, a0
301; RV32-NEXT:    call notdead
302; RV32-NEXT:    mv a0, s1
303; RV32-NEXT:    addi sp, s0, -16
304; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
305; RV32-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
306; RV32-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
307; RV32-NEXT:    addi sp, sp, 48
308; RV32-NEXT:    ret
309;
310; RV64-LABEL: va1_va_arg_alloca:
311; RV64:       # %bb.0:
312; RV64-NEXT:    addi sp, sp, -96
313; RV64-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
314; RV64-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
315; RV64-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
316; RV64-NEXT:    addi s0, sp, 32
317; RV64-NEXT:    sd a1, 8(s0)
318; RV64-NEXT:    sd a2, 16(s0)
319; RV64-NEXT:    sd a3, 24(s0)
320; RV64-NEXT:    sd a4, 32(s0)
321; RV64-NEXT:    sd a5, 40(s0)
322; RV64-NEXT:    sd a6, 48(s0)
323; RV64-NEXT:    sd a7, 56(s0)
324; RV64-NEXT:    addi a0, s0, 8
325; RV64-NEXT:    sd a0, -32(s0)
326; RV64-NEXT:    ld a0, -32(s0)
327; RV64-NEXT:    addi a0, a0, 7
328; RV64-NEXT:    andi a0, a0, -8
329; RV64-NEXT:    addi a1, a0, 8
330; RV64-NEXT:    sd a1, -32(s0)
331; RV64-NEXT:    ld s1, 0(a0)
332; RV64-NEXT:    addi a0, s1, 15
333; RV64-NEXT:    andi a0, a0, -16
334; RV64-NEXT:    sub a0, sp, a0
335; RV64-NEXT:    mv sp, a0
336; RV64-NEXT:    call notdead
337; RV64-NEXT:    mv a0, s1
338; RV64-NEXT:    addi sp, s0, -32
339; RV64-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
340; RV64-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
341; RV64-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
342; RV64-NEXT:    addi sp, sp, 96
343; RV64-NEXT:    ret
344;
345; RV32-WITHFP-LABEL: va1_va_arg_alloca:
346; RV32-WITHFP:       # %bb.0:
347; RV32-WITHFP-NEXT:    addi sp, sp, -48
348; RV32-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
349; RV32-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
350; RV32-WITHFP-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
351; RV32-WITHFP-NEXT:    addi s0, sp, 16
352; RV32-WITHFP-NEXT:    sw a1, 4(s0)
353; RV32-WITHFP-NEXT:    sw a2, 8(s0)
354; RV32-WITHFP-NEXT:    sw a3, 12(s0)
355; RV32-WITHFP-NEXT:    sw a4, 16(s0)
356; RV32-WITHFP-NEXT:    sw a5, 20(s0)
357; RV32-WITHFP-NEXT:    sw a6, 24(s0)
358; RV32-WITHFP-NEXT:    sw a7, 28(s0)
359; RV32-WITHFP-NEXT:    addi a0, s0, 4
360; RV32-WITHFP-NEXT:    sw a0, -16(s0)
361; RV32-WITHFP-NEXT:    lw a0, -16(s0)
362; RV32-WITHFP-NEXT:    addi a0, a0, 3
363; RV32-WITHFP-NEXT:    andi a0, a0, -4
364; RV32-WITHFP-NEXT:    addi a1, a0, 4
365; RV32-WITHFP-NEXT:    sw a1, -16(s0)
366; RV32-WITHFP-NEXT:    lw s1, 0(a0)
367; RV32-WITHFP-NEXT:    addi a0, s1, 15
368; RV32-WITHFP-NEXT:    andi a0, a0, -16
369; RV32-WITHFP-NEXT:    sub a0, sp, a0
370; RV32-WITHFP-NEXT:    mv sp, a0
371; RV32-WITHFP-NEXT:    call notdead
372; RV32-WITHFP-NEXT:    mv a0, s1
373; RV32-WITHFP-NEXT:    addi sp, s0, -16
374; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
375; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
376; RV32-WITHFP-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
377; RV32-WITHFP-NEXT:    addi sp, sp, 48
378; RV32-WITHFP-NEXT:    ret
379;
380; RV64-WITHFP-LABEL: va1_va_arg_alloca:
381; RV64-WITHFP:       # %bb.0:
382; RV64-WITHFP-NEXT:    addi sp, sp, -96
383; RV64-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
384; RV64-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
385; RV64-WITHFP-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
386; RV64-WITHFP-NEXT:    addi s0, sp, 32
387; RV64-WITHFP-NEXT:    sd a1, 8(s0)
388; RV64-WITHFP-NEXT:    sd a2, 16(s0)
389; RV64-WITHFP-NEXT:    sd a3, 24(s0)
390; RV64-WITHFP-NEXT:    sd a4, 32(s0)
391; RV64-WITHFP-NEXT:    sd a5, 40(s0)
392; RV64-WITHFP-NEXT:    sd a6, 48(s0)
393; RV64-WITHFP-NEXT:    sd a7, 56(s0)
394; RV64-WITHFP-NEXT:    addi a0, s0, 8
395; RV64-WITHFP-NEXT:    sd a0, -32(s0)
396; RV64-WITHFP-NEXT:    ld a0, -32(s0)
397; RV64-WITHFP-NEXT:    addi a0, a0, 7
398; RV64-WITHFP-NEXT:    andi a0, a0, -8
399; RV64-WITHFP-NEXT:    addi a1, a0, 8
400; RV64-WITHFP-NEXT:    sd a1, -32(s0)
401; RV64-WITHFP-NEXT:    ld s1, 0(a0)
402; RV64-WITHFP-NEXT:    addi a0, s1, 15
403; RV64-WITHFP-NEXT:    andi a0, a0, -16
404; RV64-WITHFP-NEXT:    sub a0, sp, a0
405; RV64-WITHFP-NEXT:    mv sp, a0
406; RV64-WITHFP-NEXT:    call notdead
407; RV64-WITHFP-NEXT:    mv a0, s1
408; RV64-WITHFP-NEXT:    addi sp, s0, -32
409; RV64-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
410; RV64-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
411; RV64-WITHFP-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
412; RV64-WITHFP-NEXT:    addi sp, sp, 96
413; RV64-WITHFP-NEXT:    ret
414  %va = alloca ptr
415  call void @llvm.va_start(ptr %va)
416  %1 = va_arg ptr %va, iXLen
417  %2 = alloca i8, iXLen %1
418  call void @notdead(ptr %2)
419  call void @llvm.va_end(ptr %va)
420  ret iXLen %1
421}
422
423define void @va1_caller() nounwind {
424; RV32-LABEL: va1_caller:
425; RV32:       # %bb.0:
426; RV32-NEXT:    addi sp, sp, -16
427; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
428; RV32-NEXT:    lui a3, 261888
429; RV32-NEXT:    li a4, 2
430; RV32-NEXT:    li a2, 0
431; RV32-NEXT:    call va1
432; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
433; RV32-NEXT:    addi sp, sp, 16
434; RV32-NEXT:    ret
435;
436; LP64-LABEL: va1_caller:
437; LP64:       # %bb.0:
438; LP64-NEXT:    addi sp, sp, -16
439; LP64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
440; LP64-NEXT:    lui a0, %hi(.LCPI3_0)
441; LP64-NEXT:    ld a1, %lo(.LCPI3_0)(a0)
442; LP64-NEXT:    li a2, 2
443; LP64-NEXT:    call va1
444; LP64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
445; LP64-NEXT:    addi sp, sp, 16
446; LP64-NEXT:    ret
447;
448; LP64F-LABEL: va1_caller:
449; LP64F:       # %bb.0:
450; LP64F-NEXT:    addi sp, sp, -16
451; LP64F-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
452; LP64F-NEXT:    li a0, 1023
453; LP64F-NEXT:    slli a0, a0, 52
454; LP64F-NEXT:    fmv.d.x fa5, a0
455; LP64F-NEXT:    li a2, 2
456; LP64F-NEXT:    fmv.x.d a1, fa5
457; LP64F-NEXT:    call va1
458; LP64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
459; LP64F-NEXT:    addi sp, sp, 16
460; LP64F-NEXT:    ret
461;
462; LP64D-LABEL: va1_caller:
463; LP64D:       # %bb.0:
464; LP64D-NEXT:    addi sp, sp, -16
465; LP64D-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
466; LP64D-NEXT:    li a0, 1023
467; LP64D-NEXT:    slli a0, a0, 52
468; LP64D-NEXT:    fmv.d.x fa5, a0
469; LP64D-NEXT:    li a2, 2
470; LP64D-NEXT:    fmv.x.d a1, fa5
471; LP64D-NEXT:    call va1
472; LP64D-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
473; LP64D-NEXT:    addi sp, sp, 16
474; LP64D-NEXT:    ret
475;
476; RV32-WITHFP-LABEL: va1_caller:
477; RV32-WITHFP:       # %bb.0:
478; RV32-WITHFP-NEXT:    addi sp, sp, -16
479; RV32-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
480; RV32-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
481; RV32-WITHFP-NEXT:    addi s0, sp, 16
482; RV32-WITHFP-NEXT:    lui a3, 261888
483; RV32-WITHFP-NEXT:    li a4, 2
484; RV32-WITHFP-NEXT:    li a2, 0
485; RV32-WITHFP-NEXT:    call va1
486; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
487; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
488; RV32-WITHFP-NEXT:    addi sp, sp, 16
489; RV32-WITHFP-NEXT:    ret
490;
491; RV64-WITHFP-LABEL: va1_caller:
492; RV64-WITHFP:       # %bb.0:
493; RV64-WITHFP-NEXT:    addi sp, sp, -16
494; RV64-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
495; RV64-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
496; RV64-WITHFP-NEXT:    addi s0, sp, 16
497; RV64-WITHFP-NEXT:    lui a0, %hi(.LCPI3_0)
498; RV64-WITHFP-NEXT:    ld a1, %lo(.LCPI3_0)(a0)
499; RV64-WITHFP-NEXT:    li a2, 2
500; RV64-WITHFP-NEXT:    call va1
501; RV64-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
502; RV64-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
503; RV64-WITHFP-NEXT:    addi sp, sp, 16
504; RV64-WITHFP-NEXT:    ret
505  %1 = call i32 (ptr, ...) @va1(ptr undef, double 1.0, i32 2)
506  ret void
507}
508
509; Ensure that 2x xlen size+alignment varargs are accessed via an "aligned"
510; register pair (where the first register is even-numbered).
511
512define i64 @va2(ptr %fmt, ...) nounwind {
513; ILP32-LABEL: va2:
514; ILP32:       # %bb.0:
515; ILP32-NEXT:    addi sp, sp, -48
516; ILP32-NEXT:    sw a1, 20(sp)
517; ILP32-NEXT:    sw a2, 24(sp)
518; ILP32-NEXT:    sw a3, 28(sp)
519; ILP32-NEXT:    sw a4, 32(sp)
520; ILP32-NEXT:    addi a0, sp, 20
521; ILP32-NEXT:    sw a0, 12(sp)
522; ILP32-NEXT:    lw a0, 12(sp)
523; ILP32-NEXT:    sw a5, 36(sp)
524; ILP32-NEXT:    sw a6, 40(sp)
525; ILP32-NEXT:    sw a7, 44(sp)
526; ILP32-NEXT:    addi a1, a0, 7
527; ILP32-NEXT:    addi a0, a0, 15
528; ILP32-NEXT:    andi a1, a1, -8
529; ILP32-NEXT:    sw a0, 12(sp)
530; ILP32-NEXT:    lw a0, 0(a1)
531; ILP32-NEXT:    lw a1, 4(a1)
532; ILP32-NEXT:    addi sp, sp, 48
533; ILP32-NEXT:    ret
534;
535; RV32D-ILP32-LABEL: va2:
536; RV32D-ILP32:       # %bb.0:
537; RV32D-ILP32-NEXT:    addi sp, sp, -48
538; RV32D-ILP32-NEXT:    sw a1, 20(sp)
539; RV32D-ILP32-NEXT:    sw a2, 24(sp)
540; RV32D-ILP32-NEXT:    sw a3, 28(sp)
541; RV32D-ILP32-NEXT:    sw a4, 32(sp)
542; RV32D-ILP32-NEXT:    addi a0, sp, 20
543; RV32D-ILP32-NEXT:    sw a0, 12(sp)
544; RV32D-ILP32-NEXT:    lw a0, 12(sp)
545; RV32D-ILP32-NEXT:    sw a5, 36(sp)
546; RV32D-ILP32-NEXT:    sw a6, 40(sp)
547; RV32D-ILP32-NEXT:    sw a7, 44(sp)
548; RV32D-ILP32-NEXT:    addi a1, a0, 7
549; RV32D-ILP32-NEXT:    andi a1, a1, -8
550; RV32D-ILP32-NEXT:    fld fa5, 0(a1)
551; RV32D-ILP32-NEXT:    addi a0, a0, 15
552; RV32D-ILP32-NEXT:    sw a0, 12(sp)
553; RV32D-ILP32-NEXT:    fsd fa5, 0(sp)
554; RV32D-ILP32-NEXT:    lw a0, 0(sp)
555; RV32D-ILP32-NEXT:    lw a1, 4(sp)
556; RV32D-ILP32-NEXT:    addi sp, sp, 48
557; RV32D-ILP32-NEXT:    ret
558;
559; RV32D-ILP32F-LABEL: va2:
560; RV32D-ILP32F:       # %bb.0:
561; RV32D-ILP32F-NEXT:    addi sp, sp, -48
562; RV32D-ILP32F-NEXT:    sw a1, 20(sp)
563; RV32D-ILP32F-NEXT:    sw a2, 24(sp)
564; RV32D-ILP32F-NEXT:    sw a3, 28(sp)
565; RV32D-ILP32F-NEXT:    sw a4, 32(sp)
566; RV32D-ILP32F-NEXT:    addi a0, sp, 20
567; RV32D-ILP32F-NEXT:    sw a0, 12(sp)
568; RV32D-ILP32F-NEXT:    lw a0, 12(sp)
569; RV32D-ILP32F-NEXT:    sw a5, 36(sp)
570; RV32D-ILP32F-NEXT:    sw a6, 40(sp)
571; RV32D-ILP32F-NEXT:    sw a7, 44(sp)
572; RV32D-ILP32F-NEXT:    addi a1, a0, 7
573; RV32D-ILP32F-NEXT:    andi a1, a1, -8
574; RV32D-ILP32F-NEXT:    fld fa5, 0(a1)
575; RV32D-ILP32F-NEXT:    addi a0, a0, 15
576; RV32D-ILP32F-NEXT:    sw a0, 12(sp)
577; RV32D-ILP32F-NEXT:    fsd fa5, 0(sp)
578; RV32D-ILP32F-NEXT:    lw a0, 0(sp)
579; RV32D-ILP32F-NEXT:    lw a1, 4(sp)
580; RV32D-ILP32F-NEXT:    addi sp, sp, 48
581; RV32D-ILP32F-NEXT:    ret
582;
583; RV32D-ILP32D-LABEL: va2:
584; RV32D-ILP32D:       # %bb.0:
585; RV32D-ILP32D-NEXT:    addi sp, sp, -48
586; RV32D-ILP32D-NEXT:    sw a1, 20(sp)
587; RV32D-ILP32D-NEXT:    sw a2, 24(sp)
588; RV32D-ILP32D-NEXT:    sw a3, 28(sp)
589; RV32D-ILP32D-NEXT:    sw a4, 32(sp)
590; RV32D-ILP32D-NEXT:    addi a0, sp, 20
591; RV32D-ILP32D-NEXT:    sw a0, 12(sp)
592; RV32D-ILP32D-NEXT:    lw a0, 12(sp)
593; RV32D-ILP32D-NEXT:    sw a5, 36(sp)
594; RV32D-ILP32D-NEXT:    sw a6, 40(sp)
595; RV32D-ILP32D-NEXT:    sw a7, 44(sp)
596; RV32D-ILP32D-NEXT:    addi a1, a0, 7
597; RV32D-ILP32D-NEXT:    andi a1, a1, -8
598; RV32D-ILP32D-NEXT:    fld fa5, 0(a1)
599; RV32D-ILP32D-NEXT:    addi a0, a0, 15
600; RV32D-ILP32D-NEXT:    sw a0, 12(sp)
601; RV32D-ILP32D-NEXT:    fsd fa5, 0(sp)
602; RV32D-ILP32D-NEXT:    lw a0, 0(sp)
603; RV32D-ILP32D-NEXT:    lw a1, 4(sp)
604; RV32D-ILP32D-NEXT:    addi sp, sp, 48
605; RV32D-ILP32D-NEXT:    ret
606;
607; RV64-LABEL: va2:
608; RV64:       # %bb.0:
609; RV64-NEXT:    addi sp, sp, -80
610; RV64-NEXT:    sd a1, 24(sp)
611; RV64-NEXT:    sd a2, 32(sp)
612; RV64-NEXT:    sd a3, 40(sp)
613; RV64-NEXT:    sd a4, 48(sp)
614; RV64-NEXT:    addi a0, sp, 24
615; RV64-NEXT:    sd a0, 8(sp)
616; RV64-NEXT:    ld a0, 8(sp)
617; RV64-NEXT:    sd a5, 56(sp)
618; RV64-NEXT:    sd a6, 64(sp)
619; RV64-NEXT:    sd a7, 72(sp)
620; RV64-NEXT:    addi a1, a0, 7
621; RV64-NEXT:    addi a0, a0, 15
622; RV64-NEXT:    andi a1, a1, -8
623; RV64-NEXT:    sd a0, 8(sp)
624; RV64-NEXT:    ld a0, 0(a1)
625; RV64-NEXT:    addi sp, sp, 80
626; RV64-NEXT:    ret
627;
628; RV32-WITHFP-LABEL: va2:
629; RV32-WITHFP:       # %bb.0:
630; RV32-WITHFP-NEXT:    addi sp, sp, -48
631; RV32-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
632; RV32-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
633; RV32-WITHFP-NEXT:    addi s0, sp, 16
634; RV32-WITHFP-NEXT:    sw a1, 4(s0)
635; RV32-WITHFP-NEXT:    sw a2, 8(s0)
636; RV32-WITHFP-NEXT:    sw a3, 12(s0)
637; RV32-WITHFP-NEXT:    sw a4, 16(s0)
638; RV32-WITHFP-NEXT:    addi a0, s0, 4
639; RV32-WITHFP-NEXT:    sw a0, -12(s0)
640; RV32-WITHFP-NEXT:    lw a0, -12(s0)
641; RV32-WITHFP-NEXT:    sw a5, 20(s0)
642; RV32-WITHFP-NEXT:    sw a6, 24(s0)
643; RV32-WITHFP-NEXT:    sw a7, 28(s0)
644; RV32-WITHFP-NEXT:    addi a1, a0, 7
645; RV32-WITHFP-NEXT:    addi a0, a0, 15
646; RV32-WITHFP-NEXT:    andi a1, a1, -8
647; RV32-WITHFP-NEXT:    sw a0, -12(s0)
648; RV32-WITHFP-NEXT:    lw a0, 0(a1)
649; RV32-WITHFP-NEXT:    lw a1, 4(a1)
650; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
651; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
652; RV32-WITHFP-NEXT:    addi sp, sp, 48
653; RV32-WITHFP-NEXT:    ret
654;
655; RV64-WITHFP-LABEL: va2:
656; RV64-WITHFP:       # %bb.0:
657; RV64-WITHFP-NEXT:    addi sp, sp, -96
658; RV64-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
659; RV64-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
660; RV64-WITHFP-NEXT:    addi s0, sp, 32
661; RV64-WITHFP-NEXT:    sd a1, 8(s0)
662; RV64-WITHFP-NEXT:    sd a2, 16(s0)
663; RV64-WITHFP-NEXT:    sd a3, 24(s0)
664; RV64-WITHFP-NEXT:    sd a4, 32(s0)
665; RV64-WITHFP-NEXT:    addi a0, s0, 8
666; RV64-WITHFP-NEXT:    sd a0, -24(s0)
667; RV64-WITHFP-NEXT:    ld a0, -24(s0)
668; RV64-WITHFP-NEXT:    sd a5, 40(s0)
669; RV64-WITHFP-NEXT:    sd a6, 48(s0)
670; RV64-WITHFP-NEXT:    sd a7, 56(s0)
671; RV64-WITHFP-NEXT:    addi a1, a0, 7
672; RV64-WITHFP-NEXT:    addi a0, a0, 15
673; RV64-WITHFP-NEXT:    andi a1, a1, -8
674; RV64-WITHFP-NEXT:    sd a0, -24(s0)
675; RV64-WITHFP-NEXT:    ld a0, 0(a1)
676; RV64-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
677; RV64-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
678; RV64-WITHFP-NEXT:    addi sp, sp, 96
679; RV64-WITHFP-NEXT:    ret
680  %va = alloca ptr
681  call void @llvm.va_start(ptr %va)
682  %argp.cur = load ptr, ptr %va
683  %ptrint = ptrtoint ptr %argp.cur to iXLen
684  %1 = add iXLen %ptrint, 7
685  %2 = and iXLen %1, -8
686  %argp.cur.aligned = inttoptr iXLen %1 to ptr
687  %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8
688  store ptr %argp.next, ptr %va
689  %3 = inttoptr iXLen %2 to ptr
690  %4 = load double, ptr %3, align 8
691  %5 = bitcast double %4 to i64
692  call void @llvm.va_end(ptr %va)
693  ret i64 %5
694}
695
696; This test is slightly different than the SelectionDAG counterpart because
697; narrowScalar and widenScalar for G_VAARG on types other than sXLen
698; are not implemented yet.
699define iXLen @va2_va_arg(ptr %fmt, ...) nounwind {
700; RV32-LABEL: va2_va_arg:
701; RV32:       # %bb.0:
702; RV32-NEXT:    addi sp, sp, -48
703; RV32-NEXT:    sw a1, 20(sp)
704; RV32-NEXT:    sw a2, 24(sp)
705; RV32-NEXT:    sw a3, 28(sp)
706; RV32-NEXT:    sw a4, 32(sp)
707; RV32-NEXT:    sw a5, 36(sp)
708; RV32-NEXT:    sw a6, 40(sp)
709; RV32-NEXT:    sw a7, 44(sp)
710; RV32-NEXT:    addi a0, sp, 20
711; RV32-NEXT:    sw a0, 12(sp)
712; RV32-NEXT:    lw a0, 12(sp)
713; RV32-NEXT:    addi a0, a0, 3
714; RV32-NEXT:    andi a0, a0, -4
715; RV32-NEXT:    addi a1, a0, 4
716; RV32-NEXT:    sw a1, 12(sp)
717; RV32-NEXT:    lw a0, 0(a0)
718; RV32-NEXT:    addi sp, sp, 48
719; RV32-NEXT:    ret
720;
721; RV64-LABEL: va2_va_arg:
722; RV64:       # %bb.0:
723; RV64-NEXT:    addi sp, sp, -80
724; RV64-NEXT:    sd a1, 24(sp)
725; RV64-NEXT:    sd a2, 32(sp)
726; RV64-NEXT:    sd a3, 40(sp)
727; RV64-NEXT:    sd a4, 48(sp)
728; RV64-NEXT:    sd a5, 56(sp)
729; RV64-NEXT:    sd a6, 64(sp)
730; RV64-NEXT:    sd a7, 72(sp)
731; RV64-NEXT:    addi a0, sp, 24
732; RV64-NEXT:    sd a0, 8(sp)
733; RV64-NEXT:    ld a0, 8(sp)
734; RV64-NEXT:    addi a0, a0, 7
735; RV64-NEXT:    andi a0, a0, -8
736; RV64-NEXT:    addi a1, a0, 8
737; RV64-NEXT:    sd a1, 8(sp)
738; RV64-NEXT:    ld a0, 0(a0)
739; RV64-NEXT:    addi sp, sp, 80
740; RV64-NEXT:    ret
741;
742; RV32-WITHFP-LABEL: va2_va_arg:
743; RV32-WITHFP:       # %bb.0:
744; RV32-WITHFP-NEXT:    addi sp, sp, -48
745; RV32-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
746; RV32-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
747; RV32-WITHFP-NEXT:    addi s0, sp, 16
748; RV32-WITHFP-NEXT:    sw a1, 4(s0)
749; RV32-WITHFP-NEXT:    sw a2, 8(s0)
750; RV32-WITHFP-NEXT:    sw a3, 12(s0)
751; RV32-WITHFP-NEXT:    sw a4, 16(s0)
752; RV32-WITHFP-NEXT:    sw a5, 20(s0)
753; RV32-WITHFP-NEXT:    sw a6, 24(s0)
754; RV32-WITHFP-NEXT:    sw a7, 28(s0)
755; RV32-WITHFP-NEXT:    addi a0, s0, 4
756; RV32-WITHFP-NEXT:    sw a0, -12(s0)
757; RV32-WITHFP-NEXT:    lw a0, -12(s0)
758; RV32-WITHFP-NEXT:    addi a0, a0, 3
759; RV32-WITHFP-NEXT:    andi a0, a0, -4
760; RV32-WITHFP-NEXT:    addi a1, a0, 4
761; RV32-WITHFP-NEXT:    sw a1, -12(s0)
762; RV32-WITHFP-NEXT:    lw a0, 0(a0)
763; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
764; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
765; RV32-WITHFP-NEXT:    addi sp, sp, 48
766; RV32-WITHFP-NEXT:    ret
767;
768; RV64-WITHFP-LABEL: va2_va_arg:
769; RV64-WITHFP:       # %bb.0:
770; RV64-WITHFP-NEXT:    addi sp, sp, -96
771; RV64-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
772; RV64-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
773; RV64-WITHFP-NEXT:    addi s0, sp, 32
774; RV64-WITHFP-NEXT:    sd a1, 8(s0)
775; RV64-WITHFP-NEXT:    sd a2, 16(s0)
776; RV64-WITHFP-NEXT:    sd a3, 24(s0)
777; RV64-WITHFP-NEXT:    sd a4, 32(s0)
778; RV64-WITHFP-NEXT:    sd a5, 40(s0)
779; RV64-WITHFP-NEXT:    sd a6, 48(s0)
780; RV64-WITHFP-NEXT:    sd a7, 56(s0)
781; RV64-WITHFP-NEXT:    addi a0, s0, 8
782; RV64-WITHFP-NEXT:    sd a0, -24(s0)
783; RV64-WITHFP-NEXT:    ld a0, -24(s0)
784; RV64-WITHFP-NEXT:    addi a0, a0, 7
785; RV64-WITHFP-NEXT:    andi a0, a0, -8
786; RV64-WITHFP-NEXT:    addi a1, a0, 8
787; RV64-WITHFP-NEXT:    sd a1, -24(s0)
788; RV64-WITHFP-NEXT:    ld a0, 0(a0)
789; RV64-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
790; RV64-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
791; RV64-WITHFP-NEXT:    addi sp, sp, 96
792; RV64-WITHFP-NEXT:    ret
793  %va = alloca ptr
794  call void @llvm.va_start(ptr %va)
795  %1 = va_arg ptr %va, iXLen
796  call void @llvm.va_end(ptr %va)
797  ret iXLen %1
798}
799
800define void @va2_caller() nounwind {
801; RV32-LABEL: va2_caller:
802; RV32:       # %bb.0:
803; RV32-NEXT:    addi sp, sp, -16
804; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
805; RV32-NEXT:    li a1, 1
806; RV32-NEXT:    call va2
807; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
808; RV32-NEXT:    addi sp, sp, 16
809; RV32-NEXT:    ret
810;
811; RV64-LABEL: va2_caller:
812; RV64:       # %bb.0:
813; RV64-NEXT:    addi sp, sp, -16
814; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
815; RV64-NEXT:    li a1, 1
816; RV64-NEXT:    call va2
817; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
818; RV64-NEXT:    addi sp, sp, 16
819; RV64-NEXT:    ret
820;
821; RV32-WITHFP-LABEL: va2_caller:
822; RV32-WITHFP:       # %bb.0:
823; RV32-WITHFP-NEXT:    addi sp, sp, -16
824; RV32-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
825; RV32-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
826; RV32-WITHFP-NEXT:    addi s0, sp, 16
827; RV32-WITHFP-NEXT:    li a1, 1
828; RV32-WITHFP-NEXT:    call va2
829; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
830; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
831; RV32-WITHFP-NEXT:    addi sp, sp, 16
832; RV32-WITHFP-NEXT:    ret
833;
834; RV64-WITHFP-LABEL: va2_caller:
835; RV64-WITHFP:       # %bb.0:
836; RV64-WITHFP-NEXT:    addi sp, sp, -16
837; RV64-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
838; RV64-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
839; RV64-WITHFP-NEXT:    addi s0, sp, 16
840; RV64-WITHFP-NEXT:    li a1, 1
841; RV64-WITHFP-NEXT:    call va2
842; RV64-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
843; RV64-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
844; RV64-WITHFP-NEXT:    addi sp, sp, 16
845; RV64-WITHFP-NEXT:    ret
846 %1 = call i64 (ptr, ...) @va2(ptr undef, i32 1)
847 ret void
848}
849
850; On RV32, Ensure a named 2*xlen argument is passed in a1 and a2, while the
851; vararg double is passed in a4 and a5 (rather than a3 and a4)
852
853define i64 @va3(i32 %a, i64 %b, ...) nounwind {
854; ILP32-LABEL: va3:
855; ILP32:       # %bb.0:
856; ILP32-NEXT:    addi sp, sp, -32
857; ILP32-NEXT:    addi a0, sp, 12
858; ILP32-NEXT:    sw a0, 4(sp)
859; ILP32-NEXT:    lw a0, 4(sp)
860; ILP32-NEXT:    sw a3, 12(sp)
861; ILP32-NEXT:    sw a4, 16(sp)
862; ILP32-NEXT:    sw a5, 20(sp)
863; ILP32-NEXT:    sw a6, 24(sp)
864; ILP32-NEXT:    sw a7, 28(sp)
865; ILP32-NEXT:    addi a3, a0, 7
866; ILP32-NEXT:    addi a0, a0, 15
867; ILP32-NEXT:    andi a3, a3, -8
868; ILP32-NEXT:    sw a0, 4(sp)
869; ILP32-NEXT:    lw a4, 0(a3)
870; ILP32-NEXT:    lw a3, 4(a3)
871; ILP32-NEXT:    add a0, a1, a4
872; ILP32-NEXT:    sltu a1, a0, a4
873; ILP32-NEXT:    add a2, a2, a3
874; ILP32-NEXT:    add a1, a2, a1
875; ILP32-NEXT:    addi sp, sp, 32
876; ILP32-NEXT:    ret
877;
878; RV32D-ILP32-LABEL: va3:
879; RV32D-ILP32:       # %bb.0:
880; RV32D-ILP32-NEXT:    addi sp, sp, -48
881; RV32D-ILP32-NEXT:    addi a0, sp, 28
882; RV32D-ILP32-NEXT:    sw a0, 20(sp)
883; RV32D-ILP32-NEXT:    lw a0, 20(sp)
884; RV32D-ILP32-NEXT:    sw a3, 28(sp)
885; RV32D-ILP32-NEXT:    sw a4, 32(sp)
886; RV32D-ILP32-NEXT:    sw a5, 36(sp)
887; RV32D-ILP32-NEXT:    sw a6, 40(sp)
888; RV32D-ILP32-NEXT:    sw a7, 44(sp)
889; RV32D-ILP32-NEXT:    addi a3, a0, 7
890; RV32D-ILP32-NEXT:    andi a3, a3, -8
891; RV32D-ILP32-NEXT:    fld fa5, 0(a3)
892; RV32D-ILP32-NEXT:    addi a0, a0, 15
893; RV32D-ILP32-NEXT:    sw a0, 20(sp)
894; RV32D-ILP32-NEXT:    fsd fa5, 8(sp)
895; RV32D-ILP32-NEXT:    lw a3, 8(sp)
896; RV32D-ILP32-NEXT:    lw a4, 12(sp)
897; RV32D-ILP32-NEXT:    add a0, a1, a3
898; RV32D-ILP32-NEXT:    sltu a1, a0, a3
899; RV32D-ILP32-NEXT:    add a2, a2, a4
900; RV32D-ILP32-NEXT:    add a1, a2, a1
901; RV32D-ILP32-NEXT:    addi sp, sp, 48
902; RV32D-ILP32-NEXT:    ret
903;
904; RV32D-ILP32F-LABEL: va3:
905; RV32D-ILP32F:       # %bb.0:
906; RV32D-ILP32F-NEXT:    addi sp, sp, -48
907; RV32D-ILP32F-NEXT:    addi a0, sp, 28
908; RV32D-ILP32F-NEXT:    sw a0, 20(sp)
909; RV32D-ILP32F-NEXT:    lw a0, 20(sp)
910; RV32D-ILP32F-NEXT:    sw a3, 28(sp)
911; RV32D-ILP32F-NEXT:    sw a4, 32(sp)
912; RV32D-ILP32F-NEXT:    sw a5, 36(sp)
913; RV32D-ILP32F-NEXT:    sw a6, 40(sp)
914; RV32D-ILP32F-NEXT:    sw a7, 44(sp)
915; RV32D-ILP32F-NEXT:    addi a3, a0, 7
916; RV32D-ILP32F-NEXT:    andi a3, a3, -8
917; RV32D-ILP32F-NEXT:    fld fa5, 0(a3)
918; RV32D-ILP32F-NEXT:    addi a0, a0, 15
919; RV32D-ILP32F-NEXT:    sw a0, 20(sp)
920; RV32D-ILP32F-NEXT:    fsd fa5, 8(sp)
921; RV32D-ILP32F-NEXT:    lw a3, 8(sp)
922; RV32D-ILP32F-NEXT:    lw a4, 12(sp)
923; RV32D-ILP32F-NEXT:    add a0, a1, a3
924; RV32D-ILP32F-NEXT:    sltu a1, a0, a3
925; RV32D-ILP32F-NEXT:    add a2, a2, a4
926; RV32D-ILP32F-NEXT:    add a1, a2, a1
927; RV32D-ILP32F-NEXT:    addi sp, sp, 48
928; RV32D-ILP32F-NEXT:    ret
929;
930; RV32D-ILP32D-LABEL: va3:
931; RV32D-ILP32D:       # %bb.0:
932; RV32D-ILP32D-NEXT:    addi sp, sp, -48
933; RV32D-ILP32D-NEXT:    addi a0, sp, 28
934; RV32D-ILP32D-NEXT:    sw a0, 20(sp)
935; RV32D-ILP32D-NEXT:    lw a0, 20(sp)
936; RV32D-ILP32D-NEXT:    sw a3, 28(sp)
937; RV32D-ILP32D-NEXT:    sw a4, 32(sp)
938; RV32D-ILP32D-NEXT:    sw a5, 36(sp)
939; RV32D-ILP32D-NEXT:    sw a6, 40(sp)
940; RV32D-ILP32D-NEXT:    sw a7, 44(sp)
941; RV32D-ILP32D-NEXT:    addi a3, a0, 7
942; RV32D-ILP32D-NEXT:    andi a3, a3, -8
943; RV32D-ILP32D-NEXT:    fld fa5, 0(a3)
944; RV32D-ILP32D-NEXT:    addi a0, a0, 15
945; RV32D-ILP32D-NEXT:    sw a0, 20(sp)
946; RV32D-ILP32D-NEXT:    fsd fa5, 8(sp)
947; RV32D-ILP32D-NEXT:    lw a3, 8(sp)
948; RV32D-ILP32D-NEXT:    lw a4, 12(sp)
949; RV32D-ILP32D-NEXT:    add a0, a1, a3
950; RV32D-ILP32D-NEXT:    sltu a1, a0, a3
951; RV32D-ILP32D-NEXT:    add a2, a2, a4
952; RV32D-ILP32D-NEXT:    add a1, a2, a1
953; RV32D-ILP32D-NEXT:    addi sp, sp, 48
954; RV32D-ILP32D-NEXT:    ret
955;
956; RV64-LABEL: va3:
957; RV64:       # %bb.0:
958; RV64-NEXT:    addi sp, sp, -64
959; RV64-NEXT:    addi a0, sp, 16
960; RV64-NEXT:    sd a0, 8(sp)
961; RV64-NEXT:    ld a0, 8(sp)
962; RV64-NEXT:    sd a2, 16(sp)
963; RV64-NEXT:    sd a3, 24(sp)
964; RV64-NEXT:    sd a4, 32(sp)
965; RV64-NEXT:    sd a5, 40(sp)
966; RV64-NEXT:    sd a6, 48(sp)
967; RV64-NEXT:    sd a7, 56(sp)
968; RV64-NEXT:    addi a2, a0, 7
969; RV64-NEXT:    addi a0, a0, 15
970; RV64-NEXT:    andi a2, a2, -8
971; RV64-NEXT:    sd a0, 8(sp)
972; RV64-NEXT:    ld a0, 0(a2)
973; RV64-NEXT:    add a0, a1, a0
974; RV64-NEXT:    addi sp, sp, 64
975; RV64-NEXT:    ret
976;
977; RV32-WITHFP-LABEL: va3:
978; RV32-WITHFP:       # %bb.0:
979; RV32-WITHFP-NEXT:    addi sp, sp, -48
980; RV32-WITHFP-NEXT:    sw ra, 20(sp) # 4-byte Folded Spill
981; RV32-WITHFP-NEXT:    sw s0, 16(sp) # 4-byte Folded Spill
982; RV32-WITHFP-NEXT:    addi s0, sp, 24
983; RV32-WITHFP-NEXT:    addi a0, s0, 4
984; RV32-WITHFP-NEXT:    sw a0, -12(s0)
985; RV32-WITHFP-NEXT:    lw a0, -12(s0)
986; RV32-WITHFP-NEXT:    sw a3, 4(s0)
987; RV32-WITHFP-NEXT:    sw a4, 8(s0)
988; RV32-WITHFP-NEXT:    sw a5, 12(s0)
989; RV32-WITHFP-NEXT:    sw a6, 16(s0)
990; RV32-WITHFP-NEXT:    sw a7, 20(s0)
991; RV32-WITHFP-NEXT:    addi a3, a0, 7
992; RV32-WITHFP-NEXT:    addi a0, a0, 15
993; RV32-WITHFP-NEXT:    andi a3, a3, -8
994; RV32-WITHFP-NEXT:    sw a0, -12(s0)
995; RV32-WITHFP-NEXT:    lw a4, 0(a3)
996; RV32-WITHFP-NEXT:    lw a3, 4(a3)
997; RV32-WITHFP-NEXT:    add a0, a1, a4
998; RV32-WITHFP-NEXT:    sltu a1, a0, a4
999; RV32-WITHFP-NEXT:    add a2, a2, a3
1000; RV32-WITHFP-NEXT:    add a1, a2, a1
1001; RV32-WITHFP-NEXT:    lw ra, 20(sp) # 4-byte Folded Reload
1002; RV32-WITHFP-NEXT:    lw s0, 16(sp) # 4-byte Folded Reload
1003; RV32-WITHFP-NEXT:    addi sp, sp, 48
1004; RV32-WITHFP-NEXT:    ret
1005;
1006; RV64-WITHFP-LABEL: va3:
1007; RV64-WITHFP:       # %bb.0:
1008; RV64-WITHFP-NEXT:    addi sp, sp, -80
1009; RV64-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
1010; RV64-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
1011; RV64-WITHFP-NEXT:    addi s0, sp, 32
1012; RV64-WITHFP-NEXT:    mv a0, s0
1013; RV64-WITHFP-NEXT:    sd a0, -24(s0)
1014; RV64-WITHFP-NEXT:    ld a0, -24(s0)
1015; RV64-WITHFP-NEXT:    sd a2, 0(s0)
1016; RV64-WITHFP-NEXT:    sd a3, 8(s0)
1017; RV64-WITHFP-NEXT:    sd a4, 16(s0)
1018; RV64-WITHFP-NEXT:    sd a5, 24(s0)
1019; RV64-WITHFP-NEXT:    sd a6, 32(s0)
1020; RV64-WITHFP-NEXT:    sd a7, 40(s0)
1021; RV64-WITHFP-NEXT:    addi a2, a0, 7
1022; RV64-WITHFP-NEXT:    addi a0, a0, 15
1023; RV64-WITHFP-NEXT:    andi a2, a2, -8
1024; RV64-WITHFP-NEXT:    sd a0, -24(s0)
1025; RV64-WITHFP-NEXT:    ld a0, 0(a2)
1026; RV64-WITHFP-NEXT:    add a0, a1, a0
1027; RV64-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1028; RV64-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1029; RV64-WITHFP-NEXT:    addi sp, sp, 80
1030; RV64-WITHFP-NEXT:    ret
1031  %va = alloca ptr
1032  call void @llvm.va_start(ptr %va)
1033  %argp.cur = load ptr, ptr %va
1034  %ptrint = ptrtoint ptr %argp.cur to iXLen
1035  %1 = add iXLen %ptrint, 7
1036  %2 = and iXLen %1, -8
1037  %argp.cur.aligned = inttoptr iXLen %1 to ptr
1038  %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8
1039  store ptr %argp.next, ptr %va
1040  %3 = inttoptr iXLen %2 to ptr
1041  %4 = load double, ptr %3, align 8
1042  call void @llvm.va_end(ptr %va)
1043  %5 = bitcast double %4 to i64
1044  %6 = add i64 %b, %5
1045  ret i64 %6
1046}
1047
1048; This test is slightly different than the SelectionDAG counterpart because
1049; narrowScalar and widenScalar for G_VAARG on types outside of [s32, sXLen]
1050; are not implemented yet.
1051define iXLen @va3_va_arg(iXLen %a, iXLen %b, ...) nounwind {
1052; RV32-LABEL: va3_va_arg:
1053; RV32:       # %bb.0:
1054; RV32-NEXT:    addi sp, sp, -32
1055; RV32-NEXT:    sw a2, 8(sp)
1056; RV32-NEXT:    sw a3, 12(sp)
1057; RV32-NEXT:    sw a4, 16(sp)
1058; RV32-NEXT:    sw a5, 20(sp)
1059; RV32-NEXT:    sw a6, 24(sp)
1060; RV32-NEXT:    sw a7, 28(sp)
1061; RV32-NEXT:    addi a0, sp, 8
1062; RV32-NEXT:    sw a0, 4(sp)
1063; RV32-NEXT:    lw a0, 4(sp)
1064; RV32-NEXT:    addi a0, a0, 3
1065; RV32-NEXT:    andi a0, a0, -4
1066; RV32-NEXT:    addi a2, a0, 4
1067; RV32-NEXT:    sw a2, 4(sp)
1068; RV32-NEXT:    lw a0, 0(a0)
1069; RV32-NEXT:    add a0, a1, a0
1070; RV32-NEXT:    addi sp, sp, 32
1071; RV32-NEXT:    ret
1072;
1073; RV64-LABEL: va3_va_arg:
1074; RV64:       # %bb.0:
1075; RV64-NEXT:    addi sp, sp, -64
1076; RV64-NEXT:    sd a2, 16(sp)
1077; RV64-NEXT:    sd a3, 24(sp)
1078; RV64-NEXT:    sd a4, 32(sp)
1079; RV64-NEXT:    sd a5, 40(sp)
1080; RV64-NEXT:    sd a6, 48(sp)
1081; RV64-NEXT:    sd a7, 56(sp)
1082; RV64-NEXT:    addi a0, sp, 16
1083; RV64-NEXT:    sd a0, 8(sp)
1084; RV64-NEXT:    ld a0, 8(sp)
1085; RV64-NEXT:    addi a0, a0, 7
1086; RV64-NEXT:    andi a0, a0, -8
1087; RV64-NEXT:    addi a2, a0, 8
1088; RV64-NEXT:    sd a2, 8(sp)
1089; RV64-NEXT:    ld a0, 0(a0)
1090; RV64-NEXT:    add a0, a1, a0
1091; RV64-NEXT:    addi sp, sp, 64
1092; RV64-NEXT:    ret
1093;
1094; RV32-WITHFP-LABEL: va3_va_arg:
1095; RV32-WITHFP:       # %bb.0:
1096; RV32-WITHFP-NEXT:    addi sp, sp, -48
1097; RV32-WITHFP-NEXT:    sw ra, 20(sp) # 4-byte Folded Spill
1098; RV32-WITHFP-NEXT:    sw s0, 16(sp) # 4-byte Folded Spill
1099; RV32-WITHFP-NEXT:    addi s0, sp, 24
1100; RV32-WITHFP-NEXT:    sw a2, 0(s0)
1101; RV32-WITHFP-NEXT:    sw a3, 4(s0)
1102; RV32-WITHFP-NEXT:    sw a4, 8(s0)
1103; RV32-WITHFP-NEXT:    sw a5, 12(s0)
1104; RV32-WITHFP-NEXT:    sw a6, 16(s0)
1105; RV32-WITHFP-NEXT:    sw a7, 20(s0)
1106; RV32-WITHFP-NEXT:    mv a0, s0
1107; RV32-WITHFP-NEXT:    sw a0, -12(s0)
1108; RV32-WITHFP-NEXT:    lw a0, -12(s0)
1109; RV32-WITHFP-NEXT:    addi a0, a0, 3
1110; RV32-WITHFP-NEXT:    andi a0, a0, -4
1111; RV32-WITHFP-NEXT:    addi a2, a0, 4
1112; RV32-WITHFP-NEXT:    sw a2, -12(s0)
1113; RV32-WITHFP-NEXT:    lw a0, 0(a0)
1114; RV32-WITHFP-NEXT:    add a0, a1, a0
1115; RV32-WITHFP-NEXT:    lw ra, 20(sp) # 4-byte Folded Reload
1116; RV32-WITHFP-NEXT:    lw s0, 16(sp) # 4-byte Folded Reload
1117; RV32-WITHFP-NEXT:    addi sp, sp, 48
1118; RV32-WITHFP-NEXT:    ret
1119;
1120; RV64-WITHFP-LABEL: va3_va_arg:
1121; RV64-WITHFP:       # %bb.0:
1122; RV64-WITHFP-NEXT:    addi sp, sp, -80
1123; RV64-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
1124; RV64-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
1125; RV64-WITHFP-NEXT:    addi s0, sp, 32
1126; RV64-WITHFP-NEXT:    sd a2, 0(s0)
1127; RV64-WITHFP-NEXT:    sd a3, 8(s0)
1128; RV64-WITHFP-NEXT:    sd a4, 16(s0)
1129; RV64-WITHFP-NEXT:    sd a5, 24(s0)
1130; RV64-WITHFP-NEXT:    sd a6, 32(s0)
1131; RV64-WITHFP-NEXT:    sd a7, 40(s0)
1132; RV64-WITHFP-NEXT:    mv a0, s0
1133; RV64-WITHFP-NEXT:    sd a0, -24(s0)
1134; RV64-WITHFP-NEXT:    ld a0, -24(s0)
1135; RV64-WITHFP-NEXT:    addi a0, a0, 7
1136; RV64-WITHFP-NEXT:    andi a0, a0, -8
1137; RV64-WITHFP-NEXT:    addi a2, a0, 8
1138; RV64-WITHFP-NEXT:    sd a2, -24(s0)
1139; RV64-WITHFP-NEXT:    ld a0, 0(a0)
1140; RV64-WITHFP-NEXT:    add a0, a1, a0
1141; RV64-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1142; RV64-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1143; RV64-WITHFP-NEXT:    addi sp, sp, 80
1144; RV64-WITHFP-NEXT:    ret
1145  %va = alloca ptr
1146  call void @llvm.va_start(ptr %va)
1147  %1 = va_arg ptr %va, iXLen
1148  call void @llvm.va_end(ptr %va)
1149  %3 = add iXLen %b, %1
1150  ret iXLen %3
1151}
1152
1153define void @va3_caller() nounwind {
1154; RV32-LABEL: va3_caller:
1155; RV32:       # %bb.0:
1156; RV32-NEXT:    addi sp, sp, -16
1157; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1158; RV32-NEXT:    lui a0, 5
1159; RV32-NEXT:    addi a3, a0, -480
1160; RV32-NEXT:    li a0, 2
1161; RV32-NEXT:    li a1, 1111
1162; RV32-NEXT:    li a2, 0
1163; RV32-NEXT:    call va3
1164; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1165; RV32-NEXT:    addi sp, sp, 16
1166; RV32-NEXT:    ret
1167;
1168; RV64-LABEL: va3_caller:
1169; RV64:       # %bb.0:
1170; RV64-NEXT:    addi sp, sp, -16
1171; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1172; RV64-NEXT:    lui a1, 5
1173; RV64-NEXT:    li a0, 2
1174; RV64-NEXT:    addiw a2, a1, -480
1175; RV64-NEXT:    li a1, 1111
1176; RV64-NEXT:    call va3
1177; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1178; RV64-NEXT:    addi sp, sp, 16
1179; RV64-NEXT:    ret
1180;
1181; RV32-WITHFP-LABEL: va3_caller:
1182; RV32-WITHFP:       # %bb.0:
1183; RV32-WITHFP-NEXT:    addi sp, sp, -16
1184; RV32-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1185; RV32-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1186; RV32-WITHFP-NEXT:    addi s0, sp, 16
1187; RV32-WITHFP-NEXT:    lui a0, 5
1188; RV32-WITHFP-NEXT:    addi a3, a0, -480
1189; RV32-WITHFP-NEXT:    li a0, 2
1190; RV32-WITHFP-NEXT:    li a1, 1111
1191; RV32-WITHFP-NEXT:    li a2, 0
1192; RV32-WITHFP-NEXT:    call va3
1193; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1194; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1195; RV32-WITHFP-NEXT:    addi sp, sp, 16
1196; RV32-WITHFP-NEXT:    ret
1197;
1198; RV64-WITHFP-LABEL: va3_caller:
1199; RV64-WITHFP:       # %bb.0:
1200; RV64-WITHFP-NEXT:    addi sp, sp, -16
1201; RV64-WITHFP-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1202; RV64-WITHFP-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
1203; RV64-WITHFP-NEXT:    addi s0, sp, 16
1204; RV64-WITHFP-NEXT:    lui a1, 5
1205; RV64-WITHFP-NEXT:    li a0, 2
1206; RV64-WITHFP-NEXT:    addiw a2, a1, -480
1207; RV64-WITHFP-NEXT:    li a1, 1111
1208; RV64-WITHFP-NEXT:    call va3
1209; RV64-WITHFP-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1210; RV64-WITHFP-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
1211; RV64-WITHFP-NEXT:    addi sp, sp, 16
1212; RV64-WITHFP-NEXT:    ret
1213 %1 = call i64 (i32, i64, ...) @va3(i32 2, i64 1111, i32 20000)
1214 ret void
1215}
1216
1217declare void @llvm.va_copy(ptr, ptr)
1218
1219define iXLen @va4_va_copy(i32 %argno, ...) nounwind {
1220; RV32-LABEL: va4_va_copy:
1221; RV32:       # %bb.0:
1222; RV32-NEXT:    addi sp, sp, -64
1223; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
1224; RV32-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
1225; RV32-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
1226; RV32-NEXT:    sw a1, 36(sp)
1227; RV32-NEXT:    sw a2, 40(sp)
1228; RV32-NEXT:    sw a3, 44(sp)
1229; RV32-NEXT:    sw a4, 48(sp)
1230; RV32-NEXT:    sw a5, 52(sp)
1231; RV32-NEXT:    sw a6, 56(sp)
1232; RV32-NEXT:    sw a7, 60(sp)
1233; RV32-NEXT:    addi a0, sp, 36
1234; RV32-NEXT:    sw a0, 16(sp)
1235; RV32-NEXT:    lw a0, 16(sp)
1236; RV32-NEXT:    addi a0, a0, 3
1237; RV32-NEXT:    li s0, -4
1238; RV32-NEXT:    and a0, a0, s0
1239; RV32-NEXT:    addi a1, a0, 4
1240; RV32-NEXT:    sw a1, 16(sp)
1241; RV32-NEXT:    lw a1, 16(sp)
1242; RV32-NEXT:    lw s1, 0(a0)
1243; RV32-NEXT:    sw a1, 12(sp)
1244; RV32-NEXT:    lw a0, 12(sp)
1245; RV32-NEXT:    call notdead
1246; RV32-NEXT:    lw a0, 16(sp)
1247; RV32-NEXT:    addi a0, a0, 3
1248; RV32-NEXT:    and a0, a0, s0
1249; RV32-NEXT:    addi a1, a0, 4
1250; RV32-NEXT:    sw a1, 16(sp)
1251; RV32-NEXT:    lw a1, 16(sp)
1252; RV32-NEXT:    lw a0, 0(a0)
1253; RV32-NEXT:    addi a1, a1, 3
1254; RV32-NEXT:    and a1, a1, s0
1255; RV32-NEXT:    addi a2, a1, 4
1256; RV32-NEXT:    sw a2, 16(sp)
1257; RV32-NEXT:    lw a2, 16(sp)
1258; RV32-NEXT:    lw a1, 0(a1)
1259; RV32-NEXT:    addi a2, a2, 3
1260; RV32-NEXT:    andi a2, a2, -4
1261; RV32-NEXT:    addi a3, a2, 4
1262; RV32-NEXT:    sw a3, 16(sp)
1263; RV32-NEXT:    lw a2, 0(a2)
1264; RV32-NEXT:    add a0, a0, s1
1265; RV32-NEXT:    add a1, a1, a2
1266; RV32-NEXT:    add a0, a0, a1
1267; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
1268; RV32-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
1269; RV32-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
1270; RV32-NEXT:    addi sp, sp, 64
1271; RV32-NEXT:    ret
1272;
1273; RV64-LABEL: va4_va_copy:
1274; RV64:       # %bb.0:
1275; RV64-NEXT:    addi sp, sp, -112
1276; RV64-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
1277; RV64-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
1278; RV64-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
1279; RV64-NEXT:    sd a1, 56(sp)
1280; RV64-NEXT:    sd a2, 64(sp)
1281; RV64-NEXT:    sd a3, 72(sp)
1282; RV64-NEXT:    sd a4, 80(sp)
1283; RV64-NEXT:    sd a5, 88(sp)
1284; RV64-NEXT:    sd a6, 96(sp)
1285; RV64-NEXT:    sd a7, 104(sp)
1286; RV64-NEXT:    addi a0, sp, 56
1287; RV64-NEXT:    sd a0, 16(sp)
1288; RV64-NEXT:    ld a0, 16(sp)
1289; RV64-NEXT:    addi a0, a0, 7
1290; RV64-NEXT:    li s0, -8
1291; RV64-NEXT:    and a0, a0, s0
1292; RV64-NEXT:    addi a1, a0, 8
1293; RV64-NEXT:    sd a1, 16(sp)
1294; RV64-NEXT:    ld a1, 16(sp)
1295; RV64-NEXT:    ld s1, 0(a0)
1296; RV64-NEXT:    sd a1, 8(sp)
1297; RV64-NEXT:    lw a0, 12(sp)
1298; RV64-NEXT:    lwu a1, 8(sp)
1299; RV64-NEXT:    slli a0, a0, 32
1300; RV64-NEXT:    or a0, a0, a1
1301; RV64-NEXT:    call notdead
1302; RV64-NEXT:    ld a0, 16(sp)
1303; RV64-NEXT:    addi a0, a0, 7
1304; RV64-NEXT:    and a0, a0, s0
1305; RV64-NEXT:    addi a1, a0, 8
1306; RV64-NEXT:    sd a1, 16(sp)
1307; RV64-NEXT:    ld a1, 16(sp)
1308; RV64-NEXT:    ld a0, 0(a0)
1309; RV64-NEXT:    addi a1, a1, 7
1310; RV64-NEXT:    and a1, a1, s0
1311; RV64-NEXT:    addi a2, a1, 8
1312; RV64-NEXT:    sd a2, 16(sp)
1313; RV64-NEXT:    ld a2, 16(sp)
1314; RV64-NEXT:    ld a1, 0(a1)
1315; RV64-NEXT:    addi a2, a2, 7
1316; RV64-NEXT:    andi a2, a2, -8
1317; RV64-NEXT:    addi a3, a2, 8
1318; RV64-NEXT:    sd a3, 16(sp)
1319; RV64-NEXT:    ld a2, 0(a2)
1320; RV64-NEXT:    add a0, a0, s1
1321; RV64-NEXT:    add a1, a1, a2
1322; RV64-NEXT:    add a0, a0, a1
1323; RV64-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
1324; RV64-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
1325; RV64-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
1326; RV64-NEXT:    addi sp, sp, 112
1327; RV64-NEXT:    ret
1328;
1329; RV32-WITHFP-LABEL: va4_va_copy:
1330; RV32-WITHFP:       # %bb.0:
1331; RV32-WITHFP-NEXT:    addi sp, sp, -64
1332; RV32-WITHFP-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
1333; RV32-WITHFP-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
1334; RV32-WITHFP-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
1335; RV32-WITHFP-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
1336; RV32-WITHFP-NEXT:    addi s0, sp, 32
1337; RV32-WITHFP-NEXT:    sw a1, 4(s0)
1338; RV32-WITHFP-NEXT:    sw a2, 8(s0)
1339; RV32-WITHFP-NEXT:    sw a3, 12(s0)
1340; RV32-WITHFP-NEXT:    sw a4, 16(s0)
1341; RV32-WITHFP-NEXT:    sw a5, 20(s0)
1342; RV32-WITHFP-NEXT:    sw a6, 24(s0)
1343; RV32-WITHFP-NEXT:    sw a7, 28(s0)
1344; RV32-WITHFP-NEXT:    addi a0, s0, 4
1345; RV32-WITHFP-NEXT:    sw a0, -20(s0)
1346; RV32-WITHFP-NEXT:    lw a0, -20(s0)
1347; RV32-WITHFP-NEXT:    addi a0, a0, 3
1348; RV32-WITHFP-NEXT:    li s1, -4
1349; RV32-WITHFP-NEXT:    and a0, a0, s1
1350; RV32-WITHFP-NEXT:    addi a1, a0, 4
1351; RV32-WITHFP-NEXT:    sw a1, -20(s0)
1352; RV32-WITHFP-NEXT:    lw a1, -20(s0)
1353; RV32-WITHFP-NEXT:    lw s2, 0(a0)
1354; RV32-WITHFP-NEXT:    sw a1, -24(s0)
1355; RV32-WITHFP-NEXT:    lw a0, -24(s0)
1356; RV32-WITHFP-NEXT:    call notdead
1357; RV32-WITHFP-NEXT:    lw a0, -20(s0)
1358; RV32-WITHFP-NEXT:    addi a0, a0, 3
1359; RV32-WITHFP-NEXT:    and a0, a0, s1
1360; RV32-WITHFP-NEXT:    addi a1, a0, 4
1361; RV32-WITHFP-NEXT:    sw a1, -20(s0)
1362; RV32-WITHFP-NEXT:    lw a1, -20(s0)
1363; RV32-WITHFP-NEXT:    lw a0, 0(a0)
1364; RV32-WITHFP-NEXT:    addi a1, a1, 3
1365; RV32-WITHFP-NEXT:    and a1, a1, s1
1366; RV32-WITHFP-NEXT:    addi a2, a1, 4
1367; RV32-WITHFP-NEXT:    sw a2, -20(s0)
1368; RV32-WITHFP-NEXT:    lw a2, -20(s0)
1369; RV32-WITHFP-NEXT:    lw a1, 0(a1)
1370; RV32-WITHFP-NEXT:    addi a2, a2, 3
1371; RV32-WITHFP-NEXT:    andi a2, a2, -4
1372; RV32-WITHFP-NEXT:    addi a3, a2, 4
1373; RV32-WITHFP-NEXT:    sw a3, -20(s0)
1374; RV32-WITHFP-NEXT:    lw a2, 0(a2)
1375; RV32-WITHFP-NEXT:    add a0, a0, s2
1376; RV32-WITHFP-NEXT:    add a1, a1, a2
1377; RV32-WITHFP-NEXT:    add a0, a0, a1
1378; RV32-WITHFP-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
1379; RV32-WITHFP-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
1380; RV32-WITHFP-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
1381; RV32-WITHFP-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
1382; RV32-WITHFP-NEXT:    addi sp, sp, 64
1383; RV32-WITHFP-NEXT:    ret
1384;
1385; RV64-WITHFP-LABEL: va4_va_copy:
1386; RV64-WITHFP:       # %bb.0:
1387; RV64-WITHFP-NEXT:    addi sp, sp, -112
1388; RV64-WITHFP-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
1389; RV64-WITHFP-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
1390; RV64-WITHFP-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
1391; RV64-WITHFP-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
1392; RV64-WITHFP-NEXT:    addi s0, sp, 48
1393; RV64-WITHFP-NEXT:    sd a1, 8(s0)
1394; RV64-WITHFP-NEXT:    sd a2, 16(s0)
1395; RV64-WITHFP-NEXT:    sd a3, 24(s0)
1396; RV64-WITHFP-NEXT:    sd a4, 32(s0)
1397; RV64-WITHFP-NEXT:    sd a5, 40(s0)
1398; RV64-WITHFP-NEXT:    sd a6, 48(s0)
1399; RV64-WITHFP-NEXT:    sd a7, 56(s0)
1400; RV64-WITHFP-NEXT:    addi a0, s0, 8
1401; RV64-WITHFP-NEXT:    sd a0, -40(s0)
1402; RV64-WITHFP-NEXT:    ld a0, -40(s0)
1403; RV64-WITHFP-NEXT:    addi a0, a0, 7
1404; RV64-WITHFP-NEXT:    li s1, -8
1405; RV64-WITHFP-NEXT:    and a0, a0, s1
1406; RV64-WITHFP-NEXT:    addi a1, a0, 8
1407; RV64-WITHFP-NEXT:    sd a1, -40(s0)
1408; RV64-WITHFP-NEXT:    ld a1, -40(s0)
1409; RV64-WITHFP-NEXT:    ld s2, 0(a0)
1410; RV64-WITHFP-NEXT:    sd a1, -48(s0)
1411; RV64-WITHFP-NEXT:    lw a0, -44(s0)
1412; RV64-WITHFP-NEXT:    lwu a1, -48(s0)
1413; RV64-WITHFP-NEXT:    slli a0, a0, 32
1414; RV64-WITHFP-NEXT:    or a0, a0, a1
1415; RV64-WITHFP-NEXT:    call notdead
1416; RV64-WITHFP-NEXT:    ld a0, -40(s0)
1417; RV64-WITHFP-NEXT:    addi a0, a0, 7
1418; RV64-WITHFP-NEXT:    and a0, a0, s1
1419; RV64-WITHFP-NEXT:    addi a1, a0, 8
1420; RV64-WITHFP-NEXT:    sd a1, -40(s0)
1421; RV64-WITHFP-NEXT:    ld a1, -40(s0)
1422; RV64-WITHFP-NEXT:    ld a0, 0(a0)
1423; RV64-WITHFP-NEXT:    addi a1, a1, 7
1424; RV64-WITHFP-NEXT:    and a1, a1, s1
1425; RV64-WITHFP-NEXT:    addi a2, a1, 8
1426; RV64-WITHFP-NEXT:    sd a2, -40(s0)
1427; RV64-WITHFP-NEXT:    ld a2, -40(s0)
1428; RV64-WITHFP-NEXT:    ld a1, 0(a1)
1429; RV64-WITHFP-NEXT:    addi a2, a2, 7
1430; RV64-WITHFP-NEXT:    andi a2, a2, -8
1431; RV64-WITHFP-NEXT:    addi a3, a2, 8
1432; RV64-WITHFP-NEXT:    sd a3, -40(s0)
1433; RV64-WITHFP-NEXT:    ld a2, 0(a2)
1434; RV64-WITHFP-NEXT:    add a0, a0, s2
1435; RV64-WITHFP-NEXT:    add a1, a1, a2
1436; RV64-WITHFP-NEXT:    add a0, a0, a1
1437; RV64-WITHFP-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
1438; RV64-WITHFP-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
1439; RV64-WITHFP-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
1440; RV64-WITHFP-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
1441; RV64-WITHFP-NEXT:    addi sp, sp, 112
1442; RV64-WITHFP-NEXT:    ret
1443  %vargs = alloca ptr
1444  %wargs = alloca ptr
1445  call void @llvm.va_start(ptr %vargs)
1446  %1 = va_arg ptr %vargs, iXLen
1447  call void @llvm.va_copy(ptr %wargs, ptr %vargs)
1448  %2 = load ptr, ptr %wargs, align 4
1449  call void @notdead(ptr %2)
1450  %3 = va_arg ptr %vargs, iXLen
1451  %4 = va_arg ptr %vargs, iXLen
1452  %5 = va_arg ptr %vargs, iXLen
1453  call void @llvm.va_end(ptr %vargs)
1454  call void @llvm.va_end(ptr %wargs)
1455  %add1 = add iXLen %3, %1
1456  %add2 = add iXLen %add1, %4
1457  %add3 = add iXLen %add2, %5
1458  ret iXLen %add3
1459}
1460
1461; The va5_aligned_stack_callee and caller function are ommitted from this file
1462; since they were not included in the IR lowering test when vararg calls were
1463; initially added.
1464
1465; A function with no fixed arguments is not valid C, but can be
1466; specified in LLVM IR. We must ensure the vararg save area is
1467; still set up correctly.
1468
1469define iXLen @va6_no_fixed_args(...) nounwind {
1470; RV32-LABEL: va6_no_fixed_args:
1471; RV32:       # %bb.0:
1472; RV32-NEXT:    addi sp, sp, -48
1473; RV32-NEXT:    sw a0, 16(sp)
1474; RV32-NEXT:    sw a1, 20(sp)
1475; RV32-NEXT:    sw a2, 24(sp)
1476; RV32-NEXT:    sw a3, 28(sp)
1477; RV32-NEXT:    sw a4, 32(sp)
1478; RV32-NEXT:    sw a5, 36(sp)
1479; RV32-NEXT:    sw a6, 40(sp)
1480; RV32-NEXT:    sw a7, 44(sp)
1481; RV32-NEXT:    addi a0, sp, 16
1482; RV32-NEXT:    sw a0, 12(sp)
1483; RV32-NEXT:    lw a0, 12(sp)
1484; RV32-NEXT:    addi a0, a0, 3
1485; RV32-NEXT:    andi a0, a0, -4
1486; RV32-NEXT:    addi a1, a0, 4
1487; RV32-NEXT:    sw a1, 12(sp)
1488; RV32-NEXT:    lw a0, 0(a0)
1489; RV32-NEXT:    addi sp, sp, 48
1490; RV32-NEXT:    ret
1491;
1492; RV64-LABEL: va6_no_fixed_args:
1493; RV64:       # %bb.0:
1494; RV64-NEXT:    addi sp, sp, -80
1495; RV64-NEXT:    sd a0, 16(sp)
1496; RV64-NEXT:    sd a1, 24(sp)
1497; RV64-NEXT:    sd a2, 32(sp)
1498; RV64-NEXT:    sd a3, 40(sp)
1499; RV64-NEXT:    sd a4, 48(sp)
1500; RV64-NEXT:    sd a5, 56(sp)
1501; RV64-NEXT:    sd a6, 64(sp)
1502; RV64-NEXT:    sd a7, 72(sp)
1503; RV64-NEXT:    addi a0, sp, 16
1504; RV64-NEXT:    sd a0, 8(sp)
1505; RV64-NEXT:    ld a0, 8(sp)
1506; RV64-NEXT:    addi a0, a0, 7
1507; RV64-NEXT:    andi a0, a0, -8
1508; RV64-NEXT:    addi a1, a0, 8
1509; RV64-NEXT:    sd a1, 8(sp)
1510; RV64-NEXT:    ld a0, 0(a0)
1511; RV64-NEXT:    addi sp, sp, 80
1512; RV64-NEXT:    ret
1513;
1514; RV32-WITHFP-LABEL: va6_no_fixed_args:
1515; RV32-WITHFP:       # %bb.0:
1516; RV32-WITHFP-NEXT:    addi sp, sp, -48
1517; RV32-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1518; RV32-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1519; RV32-WITHFP-NEXT:    addi s0, sp, 16
1520; RV32-WITHFP-NEXT:    sw a0, 0(s0)
1521; RV32-WITHFP-NEXT:    sw a1, 4(s0)
1522; RV32-WITHFP-NEXT:    sw a2, 8(s0)
1523; RV32-WITHFP-NEXT:    sw a3, 12(s0)
1524; RV32-WITHFP-NEXT:    sw a4, 16(s0)
1525; RV32-WITHFP-NEXT:    sw a5, 20(s0)
1526; RV32-WITHFP-NEXT:    sw a6, 24(s0)
1527; RV32-WITHFP-NEXT:    sw a7, 28(s0)
1528; RV32-WITHFP-NEXT:    mv a0, s0
1529; RV32-WITHFP-NEXT:    sw a0, -12(s0)
1530; RV32-WITHFP-NEXT:    lw a0, -12(s0)
1531; RV32-WITHFP-NEXT:    addi a0, a0, 3
1532; RV32-WITHFP-NEXT:    andi a0, a0, -4
1533; RV32-WITHFP-NEXT:    addi a1, a0, 4
1534; RV32-WITHFP-NEXT:    sw a1, -12(s0)
1535; RV32-WITHFP-NEXT:    lw a0, 0(a0)
1536; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1537; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1538; RV32-WITHFP-NEXT:    addi sp, sp, 48
1539; RV32-WITHFP-NEXT:    ret
1540;
1541; RV64-WITHFP-LABEL: va6_no_fixed_args:
1542; RV64-WITHFP:       # %bb.0:
1543; RV64-WITHFP-NEXT:    addi sp, sp, -96
1544; RV64-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
1545; RV64-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
1546; RV64-WITHFP-NEXT:    addi s0, sp, 32
1547; RV64-WITHFP-NEXT:    sd a0, 0(s0)
1548; RV64-WITHFP-NEXT:    sd a1, 8(s0)
1549; RV64-WITHFP-NEXT:    sd a2, 16(s0)
1550; RV64-WITHFP-NEXT:    sd a3, 24(s0)
1551; RV64-WITHFP-NEXT:    sd a4, 32(s0)
1552; RV64-WITHFP-NEXT:    sd a5, 40(s0)
1553; RV64-WITHFP-NEXT:    sd a6, 48(s0)
1554; RV64-WITHFP-NEXT:    sd a7, 56(s0)
1555; RV64-WITHFP-NEXT:    mv a0, s0
1556; RV64-WITHFP-NEXT:    sd a0, -24(s0)
1557; RV64-WITHFP-NEXT:    ld a0, -24(s0)
1558; RV64-WITHFP-NEXT:    addi a0, a0, 7
1559; RV64-WITHFP-NEXT:    andi a0, a0, -8
1560; RV64-WITHFP-NEXT:    addi a1, a0, 8
1561; RV64-WITHFP-NEXT:    sd a1, -24(s0)
1562; RV64-WITHFP-NEXT:    ld a0, 0(a0)
1563; RV64-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1564; RV64-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1565; RV64-WITHFP-NEXT:    addi sp, sp, 96
1566; RV64-WITHFP-NEXT:    ret
1567  %va = alloca ptr
1568  call void @llvm.va_start(ptr %va)
1569  %1 = va_arg ptr %va, iXLen
1570  call void @llvm.va_end(ptr %va)
1571  ret iXLen %1
1572}
1573
1574; TODO: improve constant materialization of stack addresses
1575
1576define i32 @va_large_stack(ptr %fmt, ...) {
1577; RV32-LABEL: va_large_stack:
1578; RV32:       # %bb.0:
1579; RV32-NEXT:    lui a0, 24414
1580; RV32-NEXT:    addi a0, a0, 304
1581; RV32-NEXT:    sub sp, sp, a0
1582; RV32-NEXT:    .cfi_def_cfa_offset 100000048
1583; RV32-NEXT:    lui a0, 24414
1584; RV32-NEXT:    add a0, sp, a0
1585; RV32-NEXT:    sw a1, 276(a0)
1586; RV32-NEXT:    lui a0, 24414
1587; RV32-NEXT:    add a0, sp, a0
1588; RV32-NEXT:    sw a2, 280(a0)
1589; RV32-NEXT:    lui a0, 24414
1590; RV32-NEXT:    add a0, sp, a0
1591; RV32-NEXT:    sw a3, 284(a0)
1592; RV32-NEXT:    lui a0, 24414
1593; RV32-NEXT:    add a0, sp, a0
1594; RV32-NEXT:    sw a4, 288(a0)
1595; RV32-NEXT:    lui a0, 24414
1596; RV32-NEXT:    addi a0, a0, 276
1597; RV32-NEXT:    add a0, sp, a0
1598; RV32-NEXT:    sw a0, 12(sp)
1599; RV32-NEXT:    lw a0, 12(sp)
1600; RV32-NEXT:    lui a1, 24414
1601; RV32-NEXT:    add a1, sp, a1
1602; RV32-NEXT:    sw a5, 292(a1)
1603; RV32-NEXT:    lui a1, 24414
1604; RV32-NEXT:    add a1, sp, a1
1605; RV32-NEXT:    sw a6, 296(a1)
1606; RV32-NEXT:    lui a1, 24414
1607; RV32-NEXT:    add a1, sp, a1
1608; RV32-NEXT:    sw a7, 300(a1)
1609; RV32-NEXT:    addi a1, a0, 4
1610; RV32-NEXT:    sw a1, 12(sp)
1611; RV32-NEXT:    lw a0, 0(a0)
1612; RV32-NEXT:    lui a1, 24414
1613; RV32-NEXT:    addi a1, a1, 304
1614; RV32-NEXT:    add sp, sp, a1
1615; RV32-NEXT:    .cfi_def_cfa_offset 0
1616; RV32-NEXT:    ret
1617;
1618; RV64-LABEL: va_large_stack:
1619; RV64:       # %bb.0:
1620; RV64-NEXT:    lui a0, 24414
1621; RV64-NEXT:    addiw a0, a0, 336
1622; RV64-NEXT:    sub sp, sp, a0
1623; RV64-NEXT:    .cfi_def_cfa_offset 100000080
1624; RV64-NEXT:    lui a0, 24414
1625; RV64-NEXT:    add a0, sp, a0
1626; RV64-NEXT:    sd a1, 280(a0)
1627; RV64-NEXT:    lui a0, 24414
1628; RV64-NEXT:    add a0, sp, a0
1629; RV64-NEXT:    sd a2, 288(a0)
1630; RV64-NEXT:    lui a0, 24414
1631; RV64-NEXT:    add a0, sp, a0
1632; RV64-NEXT:    sd a3, 296(a0)
1633; RV64-NEXT:    lui a0, 24414
1634; RV64-NEXT:    add a0, sp, a0
1635; RV64-NEXT:    sd a4, 304(a0)
1636; RV64-NEXT:    addi a0, sp, 8
1637; RV64-NEXT:    lui a1, 24414
1638; RV64-NEXT:    addiw a1, a1, 280
1639; RV64-NEXT:    add a1, sp, a1
1640; RV64-NEXT:    sd a1, 8(sp)
1641; RV64-NEXT:    lw a0, 4(a0)
1642; RV64-NEXT:    lwu a1, 8(sp)
1643; RV64-NEXT:    lui a2, 24414
1644; RV64-NEXT:    add a2, sp, a2
1645; RV64-NEXT:    sd a5, 312(a2)
1646; RV64-NEXT:    lui a2, 24414
1647; RV64-NEXT:    add a2, sp, a2
1648; RV64-NEXT:    sd a6, 320(a2)
1649; RV64-NEXT:    lui a2, 24414
1650; RV64-NEXT:    add a2, sp, a2
1651; RV64-NEXT:    sd a7, 328(a2)
1652; RV64-NEXT:    slli a0, a0, 32
1653; RV64-NEXT:    or a0, a0, a1
1654; RV64-NEXT:    addi a1, a0, 4
1655; RV64-NEXT:    srli a2, a1, 32
1656; RV64-NEXT:    sw a1, 8(sp)
1657; RV64-NEXT:    sw a2, 12(sp)
1658; RV64-NEXT:    lw a0, 0(a0)
1659; RV64-NEXT:    lui a1, 24414
1660; RV64-NEXT:    addiw a1, a1, 336
1661; RV64-NEXT:    add sp, sp, a1
1662; RV64-NEXT:    .cfi_def_cfa_offset 0
1663; RV64-NEXT:    ret
1664;
1665; RV32-WITHFP-LABEL: va_large_stack:
1666; RV32-WITHFP:       # %bb.0:
1667; RV32-WITHFP-NEXT:    addi sp, sp, -2032
1668; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 2032
1669; RV32-WITHFP-NEXT:    sw ra, 1996(sp) # 4-byte Folded Spill
1670; RV32-WITHFP-NEXT:    sw s0, 1992(sp) # 4-byte Folded Spill
1671; RV32-WITHFP-NEXT:    .cfi_offset ra, -36
1672; RV32-WITHFP-NEXT:    .cfi_offset s0, -40
1673; RV32-WITHFP-NEXT:    addi s0, sp, 2000
1674; RV32-WITHFP-NEXT:    .cfi_def_cfa s0, 32
1675; RV32-WITHFP-NEXT:    lui a0, 24414
1676; RV32-WITHFP-NEXT:    addi a0, a0, -1728
1677; RV32-WITHFP-NEXT:    sub sp, sp, a0
1678; RV32-WITHFP-NEXT:    lui a0, 24414
1679; RV32-WITHFP-NEXT:    addi a0, a0, 272
1680; RV32-WITHFP-NEXT:    sub a0, s0, a0
1681; RV32-WITHFP-NEXT:    sw a1, 4(s0)
1682; RV32-WITHFP-NEXT:    sw a2, 8(s0)
1683; RV32-WITHFP-NEXT:    sw a3, 12(s0)
1684; RV32-WITHFP-NEXT:    sw a4, 16(s0)
1685; RV32-WITHFP-NEXT:    addi a1, s0, 4
1686; RV32-WITHFP-NEXT:    sw a1, 0(a0)
1687; RV32-WITHFP-NEXT:    lw a1, 0(a0)
1688; RV32-WITHFP-NEXT:    sw a5, 20(s0)
1689; RV32-WITHFP-NEXT:    sw a6, 24(s0)
1690; RV32-WITHFP-NEXT:    sw a7, 28(s0)
1691; RV32-WITHFP-NEXT:    addi a2, a1, 4
1692; RV32-WITHFP-NEXT:    sw a2, 0(a0)
1693; RV32-WITHFP-NEXT:    lw a0, 0(a1)
1694; RV32-WITHFP-NEXT:    lui a1, 24414
1695; RV32-WITHFP-NEXT:    addi a1, a1, -1728
1696; RV32-WITHFP-NEXT:    add sp, sp, a1
1697; RV32-WITHFP-NEXT:    .cfi_def_cfa sp, 2032
1698; RV32-WITHFP-NEXT:    lw ra, 1996(sp) # 4-byte Folded Reload
1699; RV32-WITHFP-NEXT:    lw s0, 1992(sp) # 4-byte Folded Reload
1700; RV32-WITHFP-NEXT:    .cfi_restore ra
1701; RV32-WITHFP-NEXT:    .cfi_restore s0
1702; RV32-WITHFP-NEXT:    addi sp, sp, 2032
1703; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 0
1704; RV32-WITHFP-NEXT:    ret
1705;
1706; RV64-WITHFP-LABEL: va_large_stack:
1707; RV64-WITHFP:       # %bb.0:
1708; RV64-WITHFP-NEXT:    addi sp, sp, -2032
1709; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 2032
1710; RV64-WITHFP-NEXT:    sd ra, 1960(sp) # 8-byte Folded Spill
1711; RV64-WITHFP-NEXT:    sd s0, 1952(sp) # 8-byte Folded Spill
1712; RV64-WITHFP-NEXT:    .cfi_offset ra, -72
1713; RV64-WITHFP-NEXT:    .cfi_offset s0, -80
1714; RV64-WITHFP-NEXT:    addi s0, sp, 1968
1715; RV64-WITHFP-NEXT:    .cfi_def_cfa s0, 64
1716; RV64-WITHFP-NEXT:    lui a0, 24414
1717; RV64-WITHFP-NEXT:    addiw a0, a0, -1680
1718; RV64-WITHFP-NEXT:    sub sp, sp, a0
1719; RV64-WITHFP-NEXT:    lui a0, 24414
1720; RV64-WITHFP-NEXT:    addiw a0, a0, 288
1721; RV64-WITHFP-NEXT:    sub a0, s0, a0
1722; RV64-WITHFP-NEXT:    sd a1, 8(s0)
1723; RV64-WITHFP-NEXT:    sd a2, 16(s0)
1724; RV64-WITHFP-NEXT:    sd a3, 24(s0)
1725; RV64-WITHFP-NEXT:    sd a4, 32(s0)
1726; RV64-WITHFP-NEXT:    addi a1, s0, 8
1727; RV64-WITHFP-NEXT:    sd a1, 0(a0)
1728; RV64-WITHFP-NEXT:    lwu a1, 0(a0)
1729; RV64-WITHFP-NEXT:    lw a2, 4(a0)
1730; RV64-WITHFP-NEXT:    sd a5, 40(s0)
1731; RV64-WITHFP-NEXT:    sd a6, 48(s0)
1732; RV64-WITHFP-NEXT:    sd a7, 56(s0)
1733; RV64-WITHFP-NEXT:    slli a2, a2, 32
1734; RV64-WITHFP-NEXT:    or a1, a2, a1
1735; RV64-WITHFP-NEXT:    addi a2, a1, 4
1736; RV64-WITHFP-NEXT:    srli a3, a2, 32
1737; RV64-WITHFP-NEXT:    sw a2, 0(a0)
1738; RV64-WITHFP-NEXT:    sw a3, 4(a0)
1739; RV64-WITHFP-NEXT:    lw a0, 0(a1)
1740; RV64-WITHFP-NEXT:    lui a1, 24414
1741; RV64-WITHFP-NEXT:    addiw a1, a1, -1680
1742; RV64-WITHFP-NEXT:    add sp, sp, a1
1743; RV64-WITHFP-NEXT:    .cfi_def_cfa sp, 2032
1744; RV64-WITHFP-NEXT:    ld ra, 1960(sp) # 8-byte Folded Reload
1745; RV64-WITHFP-NEXT:    ld s0, 1952(sp) # 8-byte Folded Reload
1746; RV64-WITHFP-NEXT:    .cfi_restore ra
1747; RV64-WITHFP-NEXT:    .cfi_restore s0
1748; RV64-WITHFP-NEXT:    addi sp, sp, 2032
1749; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 0
1750; RV64-WITHFP-NEXT:    ret
1751  %large = alloca [ 100000000 x i8 ]
1752  %va = alloca ptr
1753  call void @llvm.va_start(ptr %va)
1754  %argp.cur = load ptr, ptr %va, align 4
1755  %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
1756  store ptr %argp.next, ptr %va, align 4
1757  %1 = load i32, ptr %argp.cur, align 4
1758  call void @llvm.va_end(ptr %va)
1759  ret i32 %1
1760}
1761
1762define iXLen @va_vprintf(ptr %fmt, ptr %arg_start) {
1763; RV32-LABEL: va_vprintf:
1764; RV32:       # %bb.0:
1765; RV32-NEXT:    addi sp, sp, -16
1766; RV32-NEXT:    .cfi_def_cfa_offset 16
1767; RV32-NEXT:    sw a1, 12(sp)
1768; RV32-NEXT:    lw a0, 12(sp)
1769; RV32-NEXT:    sw a0, 8(sp)
1770; RV32-NEXT:    lw a0, 8(sp)
1771; RV32-NEXT:    addi a0, a0, 3
1772; RV32-NEXT:    andi a0, a0, -4
1773; RV32-NEXT:    addi a1, a0, 4
1774; RV32-NEXT:    sw a1, 8(sp)
1775; RV32-NEXT:    lw a0, 0(a0)
1776; RV32-NEXT:    addi sp, sp, 16
1777; RV32-NEXT:    .cfi_def_cfa_offset 0
1778; RV32-NEXT:    ret
1779;
1780; RV64-LABEL: va_vprintf:
1781; RV64:       # %bb.0:
1782; RV64-NEXT:    addi sp, sp, -16
1783; RV64-NEXT:    .cfi_def_cfa_offset 16
1784; RV64-NEXT:    sd a1, 8(sp)
1785; RV64-NEXT:    ld a0, 8(sp)
1786; RV64-NEXT:    sd a0, 0(sp)
1787; RV64-NEXT:    ld a0, 0(sp)
1788; RV64-NEXT:    addi a0, a0, 7
1789; RV64-NEXT:    andi a0, a0, -8
1790; RV64-NEXT:    addi a1, a0, 8
1791; RV64-NEXT:    sd a1, 0(sp)
1792; RV64-NEXT:    ld a0, 0(a0)
1793; RV64-NEXT:    addi sp, sp, 16
1794; RV64-NEXT:    .cfi_def_cfa_offset 0
1795; RV64-NEXT:    ret
1796;
1797; RV32-WITHFP-LABEL: va_vprintf:
1798; RV32-WITHFP:       # %bb.0:
1799; RV32-WITHFP-NEXT:    addi sp, sp, -16
1800; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 16
1801; RV32-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1802; RV32-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1803; RV32-WITHFP-NEXT:    .cfi_offset ra, -4
1804; RV32-WITHFP-NEXT:    .cfi_offset s0, -8
1805; RV32-WITHFP-NEXT:    addi s0, sp, 16
1806; RV32-WITHFP-NEXT:    .cfi_def_cfa s0, 0
1807; RV32-WITHFP-NEXT:    sw a1, -12(s0)
1808; RV32-WITHFP-NEXT:    lw a0, -12(s0)
1809; RV32-WITHFP-NEXT:    sw a0, -16(s0)
1810; RV32-WITHFP-NEXT:    lw a0, -16(s0)
1811; RV32-WITHFP-NEXT:    addi a0, a0, 3
1812; RV32-WITHFP-NEXT:    andi a0, a0, -4
1813; RV32-WITHFP-NEXT:    addi a1, a0, 4
1814; RV32-WITHFP-NEXT:    sw a1, -16(s0)
1815; RV32-WITHFP-NEXT:    lw a0, 0(a0)
1816; RV32-WITHFP-NEXT:    .cfi_def_cfa sp, 16
1817; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1818; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1819; RV32-WITHFP-NEXT:    .cfi_restore ra
1820; RV32-WITHFP-NEXT:    .cfi_restore s0
1821; RV32-WITHFP-NEXT:    addi sp, sp, 16
1822; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 0
1823; RV32-WITHFP-NEXT:    ret
1824;
1825; RV64-WITHFP-LABEL: va_vprintf:
1826; RV64-WITHFP:       # %bb.0:
1827; RV64-WITHFP-NEXT:    addi sp, sp, -32
1828; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 32
1829; RV64-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
1830; RV64-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
1831; RV64-WITHFP-NEXT:    .cfi_offset ra, -8
1832; RV64-WITHFP-NEXT:    .cfi_offset s0, -16
1833; RV64-WITHFP-NEXT:    addi s0, sp, 32
1834; RV64-WITHFP-NEXT:    .cfi_def_cfa s0, 0
1835; RV64-WITHFP-NEXT:    sd a1, -24(s0)
1836; RV64-WITHFP-NEXT:    ld a0, -24(s0)
1837; RV64-WITHFP-NEXT:    sd a0, -32(s0)
1838; RV64-WITHFP-NEXT:    ld a0, -32(s0)
1839; RV64-WITHFP-NEXT:    addi a0, a0, 7
1840; RV64-WITHFP-NEXT:    andi a0, a0, -8
1841; RV64-WITHFP-NEXT:    addi a1, a0, 8
1842; RV64-WITHFP-NEXT:    sd a1, -32(s0)
1843; RV64-WITHFP-NEXT:    ld a0, 0(a0)
1844; RV64-WITHFP-NEXT:    .cfi_def_cfa sp, 32
1845; RV64-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1846; RV64-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1847; RV64-WITHFP-NEXT:    .cfi_restore ra
1848; RV64-WITHFP-NEXT:    .cfi_restore s0
1849; RV64-WITHFP-NEXT:    addi sp, sp, 32
1850; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 0
1851; RV64-WITHFP-NEXT:    ret
1852  %args = alloca ptr
1853  %args_cp = alloca ptr
1854  store ptr %arg_start, ptr %args
1855  call void @llvm.va_copy(ptr %args_cp, ptr %args)
1856  %width = va_arg ptr %args_cp, iXLen
1857  call void @llvm.va_end(ptr %args_cp)
1858  ret iXLen %width
1859}
1860
1861define i32 @va_printf(ptr %fmt, ...) {
1862; RV32-LABEL: va_printf:
1863; RV32:       # %bb.0:
1864; RV32-NEXT:    addi sp, sp, -48
1865; RV32-NEXT:    .cfi_def_cfa_offset 48
1866; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1867; RV32-NEXT:    .cfi_offset ra, -36
1868; RV32-NEXT:    sw a1, 20(sp)
1869; RV32-NEXT:    sw a2, 24(sp)
1870; RV32-NEXT:    sw a3, 28(sp)
1871; RV32-NEXT:    sw a4, 32(sp)
1872; RV32-NEXT:    addi a1, sp, 20
1873; RV32-NEXT:    sw a1, 8(sp)
1874; RV32-NEXT:    lw a1, 8(sp)
1875; RV32-NEXT:    sw a5, 36(sp)
1876; RV32-NEXT:    sw a6, 40(sp)
1877; RV32-NEXT:    sw a7, 44(sp)
1878; RV32-NEXT:    call va_vprintf
1879; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1880; RV32-NEXT:    .cfi_restore ra
1881; RV32-NEXT:    addi sp, sp, 48
1882; RV32-NEXT:    .cfi_def_cfa_offset 0
1883; RV32-NEXT:    ret
1884;
1885; RV64-LABEL: va_printf:
1886; RV64:       # %bb.0:
1887; RV64-NEXT:    addi sp, sp, -80
1888; RV64-NEXT:    .cfi_def_cfa_offset 80
1889; RV64-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1890; RV64-NEXT:    .cfi_offset ra, -72
1891; RV64-NEXT:    sd a1, 24(sp)
1892; RV64-NEXT:    sd a2, 32(sp)
1893; RV64-NEXT:    sd a3, 40(sp)
1894; RV64-NEXT:    sd a4, 48(sp)
1895; RV64-NEXT:    addi a1, sp, 24
1896; RV64-NEXT:    sd a1, 0(sp)
1897; RV64-NEXT:    ld a1, 0(sp)
1898; RV64-NEXT:    sd a5, 56(sp)
1899; RV64-NEXT:    sd a6, 64(sp)
1900; RV64-NEXT:    sd a7, 72(sp)
1901; RV64-NEXT:    call va_vprintf
1902; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1903; RV64-NEXT:    .cfi_restore ra
1904; RV64-NEXT:    addi sp, sp, 80
1905; RV64-NEXT:    .cfi_def_cfa_offset 0
1906; RV64-NEXT:    ret
1907;
1908; RV32-WITHFP-LABEL: va_printf:
1909; RV32-WITHFP:       # %bb.0:
1910; RV32-WITHFP-NEXT:    addi sp, sp, -48
1911; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 48
1912; RV32-WITHFP-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1913; RV32-WITHFP-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1914; RV32-WITHFP-NEXT:    .cfi_offset ra, -36
1915; RV32-WITHFP-NEXT:    .cfi_offset s0, -40
1916; RV32-WITHFP-NEXT:    addi s0, sp, 16
1917; RV32-WITHFP-NEXT:    .cfi_def_cfa s0, 32
1918; RV32-WITHFP-NEXT:    sw a1, 4(s0)
1919; RV32-WITHFP-NEXT:    sw a2, 8(s0)
1920; RV32-WITHFP-NEXT:    sw a3, 12(s0)
1921; RV32-WITHFP-NEXT:    sw a4, 16(s0)
1922; RV32-WITHFP-NEXT:    addi a1, s0, 4
1923; RV32-WITHFP-NEXT:    sw a1, -12(s0)
1924; RV32-WITHFP-NEXT:    lw a1, -12(s0)
1925; RV32-WITHFP-NEXT:    sw a5, 20(s0)
1926; RV32-WITHFP-NEXT:    sw a6, 24(s0)
1927; RV32-WITHFP-NEXT:    sw a7, 28(s0)
1928; RV32-WITHFP-NEXT:    call va_vprintf
1929; RV32-WITHFP-NEXT:    .cfi_def_cfa sp, 48
1930; RV32-WITHFP-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1931; RV32-WITHFP-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1932; RV32-WITHFP-NEXT:    .cfi_restore ra
1933; RV32-WITHFP-NEXT:    .cfi_restore s0
1934; RV32-WITHFP-NEXT:    addi sp, sp, 48
1935; RV32-WITHFP-NEXT:    .cfi_def_cfa_offset 0
1936; RV32-WITHFP-NEXT:    ret
1937;
1938; RV64-WITHFP-LABEL: va_printf:
1939; RV64-WITHFP:       # %bb.0:
1940; RV64-WITHFP-NEXT:    addi sp, sp, -96
1941; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 96
1942; RV64-WITHFP-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
1943; RV64-WITHFP-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
1944; RV64-WITHFP-NEXT:    .cfi_offset ra, -72
1945; RV64-WITHFP-NEXT:    .cfi_offset s0, -80
1946; RV64-WITHFP-NEXT:    addi s0, sp, 32
1947; RV64-WITHFP-NEXT:    .cfi_def_cfa s0, 64
1948; RV64-WITHFP-NEXT:    sd a1, 8(s0)
1949; RV64-WITHFP-NEXT:    sd a2, 16(s0)
1950; RV64-WITHFP-NEXT:    sd a3, 24(s0)
1951; RV64-WITHFP-NEXT:    sd a4, 32(s0)
1952; RV64-WITHFP-NEXT:    addi a1, s0, 8
1953; RV64-WITHFP-NEXT:    sd a1, -24(s0)
1954; RV64-WITHFP-NEXT:    ld a1, -24(s0)
1955; RV64-WITHFP-NEXT:    sd a5, 40(s0)
1956; RV64-WITHFP-NEXT:    sd a6, 48(s0)
1957; RV64-WITHFP-NEXT:    sd a7, 56(s0)
1958; RV64-WITHFP-NEXT:    call va_vprintf
1959; RV64-WITHFP-NEXT:    .cfi_def_cfa sp, 96
1960; RV64-WITHFP-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1961; RV64-WITHFP-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1962; RV64-WITHFP-NEXT:    .cfi_restore ra
1963; RV64-WITHFP-NEXT:    .cfi_restore s0
1964; RV64-WITHFP-NEXT:    addi sp, sp, 96
1965; RV64-WITHFP-NEXT:    .cfi_def_cfa_offset 0
1966; RV64-WITHFP-NEXT:    ret
1967  %args = alloca ptr
1968  call void @llvm.va_start(ptr %args)
1969  %arg_start = load ptr, ptr %args
1970  %ret_val = call i32 @va_vprintf(ptr %fmt, ptr %arg_start)
1971  call void @llvm.va_end(ptr %args)
1972  ret i32 %ret_val
1973}
1974