xref: /llvm-project/llvm/test/Transforms/PhaseOrdering/varargs.ll (revision f9060f1b7ee45ee770d3a7acdeed9f016dcc9931)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2; RUN: opt -mtriple=amdgcn-- -S -passes='lto<O2>' < %s | FileCheck %s
3target triple = "amdgcn-amd-amdhsa"
4
5; REQUIRES: amdgpu-registered-target
6
7; We use the ExpandVariadics pass to lower variadic functions so they can be
8; inlined.
9
10define i32 @foo() {
11; CHECK-LABEL: define i32 @foo(
12; CHECK-SAME: ) local_unnamed_addr #[[ATTR0:[0-9]+]] {
13; CHECK-NEXT:  [[ENTRY:.*:]]
14; CHECK-NEXT:    ret i32 6
15;
16entry:
17  %call = tail call i32 (i32, ...) @vararg(i32 poison, i32 noundef 1, i32 noundef 2, i32 noundef 3)
18  ret i32 %call
19}
20
21define internal i32 @vararg(i32 %first, ...) {
22entry:
23  %vlist.alloca = alloca ptr, align 8, addrspace(5)
24  %vlist = addrspacecast ptr addrspace(5) %vlist.alloca to ptr
25  call void @llvm.va_start.p0(ptr %vlist)
26  %vlist.promoted = load ptr, ptr %vlist, align 8
27  %argp.a = getelementptr inbounds i8, ptr %vlist.promoted, i64 4
28  store ptr %argp.a, ptr %vlist, align 8
29  %a = load i32, ptr %vlist.promoted, align 4
30  %argp.b = getelementptr inbounds i8, ptr %vlist.promoted, i64 8
31  store ptr %argp.b, ptr %vlist, align 8
32  %b = load i32, ptr %argp.a, align 4
33  %sum = add nsw i32 %b, %a
34  %argp.c = getelementptr inbounds i8, ptr %vlist.promoted, i64 12
35  store ptr %argp.c, ptr %vlist, align 8
36  %c = load i32, ptr %argp.b, align 4
37  %ret = add nsw i32 %c, %sum
38  call void @llvm.va_end.p0(ptr %vlist)
39  ret i32 %ret
40}
41
42declare void @llvm.va_start.p0(ptr)
43
44declare void @llvm.va_end.p0(ptr)
45