xref: /llvm-project/llvm/test/Transforms/IROutliner/illegal-vaarg.ll (revision ab7dba233a058cc8310ef829929238b5d8440b30)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -passes=verify,iroutliner -ir-outlining-no-cost -no-ir-sim-intrinsics < %s | FileCheck %s
3
4; This test ensures that we do not outline vararg instructions or intrinsics, as
5; they may cause inconsistencies when outlining.
6
7declare void @llvm.va_start(ptr)
8declare void @llvm.va_copy(ptr, ptr)
9declare void @llvm.va_end(ptr)
10
11define i32 @func1(i32 %a, double %b, ptr %v, ...) nounwind {
12; CHECK-LABEL: @func1(
13; CHECK-NEXT:  entry:
14; CHECK-NEXT:    [[TMP_LOC:%.*]] = alloca i32, align 4
15; CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
16; CHECK-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
17; CHECK-NEXT:    [[AP:%.*]] = alloca ptr, align 4
18; CHECK-NEXT:    [[C:%.*]] = alloca i32, align 4
19; CHECK-NEXT:    call void @outlined_ir_func_0(i32 [[A:%.*]], ptr [[A_ADDR]], double [[B:%.*]], ptr [[B_ADDR]])
20; CHECK-NEXT:    call void @llvm.va_start.p0(ptr [[AP]])
21; CHECK-NEXT:    [[TMP0:%.*]] = va_arg ptr [[AP]], i32
22; CHECK-NEXT:    call void @llvm.va_copy.p0(ptr [[V:%.*]], ptr [[AP]])
23; CHECK-NEXT:    call void @llvm.va_end.p0(ptr [[AP]])
24; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
25; CHECK-NEXT:    call void @outlined_ir_func_1(i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
26; CHECK-NEXT:    [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4
27; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]])
28; CHECK-NEXT:    ret i32 [[TMP_RELOAD]]
29;
30entry:
31  %a.addr = alloca i32, align 4
32  %b.addr = alloca double, align 8
33  %ap = alloca ptr, align 4
34  %c = alloca i32, align 4
35  store i32 %a, ptr %a.addr, align 4
36  store double %b, ptr %b.addr, align 8
37  call void @llvm.va_start(ptr %ap)
38  %0 = va_arg ptr %ap, i32
39  call void @llvm.va_copy(ptr %v, ptr %ap)
40  call void @llvm.va_end(ptr %ap)
41  store i32 %0, ptr %c, align 4
42  %tmp = load i32, ptr %c, align 4
43  ret i32 %tmp
44}
45
46define i32 @func2(i32 %a, double %b, ptr %v, ...) nounwind {
47; CHECK-LABEL: @func2(
48; CHECK-NEXT:  entry:
49; CHECK-NEXT:    [[TMP_LOC:%.*]] = alloca i32, align 4
50; CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
51; CHECK-NEXT:    [[B_ADDR:%.*]] = alloca double, align 8
52; CHECK-NEXT:    [[AP:%.*]] = alloca ptr, align 4
53; CHECK-NEXT:    [[C:%.*]] = alloca i32, align 4
54; CHECK-NEXT:    call void @outlined_ir_func_0(i32 [[A:%.*]], ptr [[A_ADDR]], double [[B:%.*]], ptr [[B_ADDR]])
55; CHECK-NEXT:    call void @llvm.va_start.p0(ptr [[AP]])
56; CHECK-NEXT:    [[TMP0:%.*]] = va_arg ptr [[AP]], i32
57; CHECK-NEXT:    call void @llvm.va_copy.p0(ptr [[V:%.*]], ptr [[AP]])
58; CHECK-NEXT:    call void @llvm.va_end.p0(ptr [[AP]])
59; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]])
60; CHECK-NEXT:    call void @outlined_ir_func_1(i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]])
61; CHECK-NEXT:    [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4
62; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]])
63; CHECK-NEXT:    ret i32 [[TMP_RELOAD]]
64;
65entry:
66  %a.addr = alloca i32, align 4
67  %b.addr = alloca double, align 8
68  %ap = alloca ptr, align 4
69  %c = alloca i32, align 4
70  store i32 %a, ptr %a.addr, align 4
71  store double %b, ptr %b.addr, align 8
72  call void @llvm.va_start(ptr %ap)
73  %0 = va_arg ptr %ap, i32
74  call void @llvm.va_copy(ptr %v, ptr %ap)
75  call void @llvm.va_end(ptr %ap)
76  store i32 %0, ptr %c, align 4
77  %tmp = load i32, ptr %c, align 4
78  ret i32 %tmp
79}
80