1; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s 2 3target datalayout = "e-m:e-i64:64-n32:64" 4target triple = "powerpc64le--linux" 5 6define i32 @foo(i32 %guard, ...) { 7 %vl = alloca ptr, align 8 8 call void @llvm.lifetime.start.p0(i64 32, ptr %vl) 9 call void @llvm.va_start(ptr %vl) 10 call void @llvm.va_end(ptr %vl) 11 call void @llvm.lifetime.end.p0(i64 32, ptr %vl) 12 ret i32 0 13} 14 15; First, check allocation of the save area. 16 17; CHECK-LABEL: @foo 18; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls 19; CHECK: [[C:%.*]] = alloca {{.*}} [[A]] 20 21; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[C]], i8 0, i64 [[A]], i1 false) 22 23; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[A]], i64 800) 24; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false) 25 26declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1 27declare void @llvm.va_start(ptr) #2 28declare void @llvm.va_end(ptr) #2 29declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1 30 31define i32 @bar() { 32 %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00) 33 ret i32 %1 34} 35 36; Save the incoming shadow value from the arguments in the __msan_va_arg_tls 37; array. 38; CHECK-LABEL: @bar 39; CHECK: store i32 0, ptr @__msan_va_arg_tls, align 8 40; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 41; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8 42; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls 43 44; Check vector argument. 45define i32 @bar2() { 46 %1 = call i32 (i32, ...) @foo(i32 0, <2 x i64> <i64 1, i64 2>) 47 ret i32 %1 48} 49 50; The vector is at offset 16 of parameter save area, but __msan_va_arg_tls 51; corresponds to offset 8+ of parameter save area - so the offset from 52; __msan_va_arg_tls is actually misaligned. 53; CHECK-LABEL: @bar2 54; CHECK: store <2 x i64> zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 55; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls 56 57; Check i64 array. 58define i32 @bar4() { 59 %1 = call i32 (i32, ...) @foo(i32 0, [2 x i64] [i64 1, i64 2]) 60 ret i32 %1 61} 62 63; CHECK-LABEL: @bar4 64; CHECK: store [2 x i64] zeroinitializer, ptr @__msan_va_arg_tls, align 8 65; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls 66 67; Check i128 array. 68define i32 @bar5() { 69 %1 = call i32 (i32, ...) @foo(i32 0, [2 x i128] [i128 1, i128 2]) 70 ret i32 %1 71} 72 73; CHECK-LABEL: @bar5 74; CHECK: store [2 x i128] zeroinitializer, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8 75; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls 76 77; Check 8-aligned byval. 78define i32 @bar6(ptr %arg) { 79 %1 = call i32 (i32, ...) @foo(i32 0, ptr byval([2 x i64]) align 8 %arg) 80 ret i32 %1 81} 82 83; CHECK-LABEL: @bar6 84; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 @__msan_va_arg_tls, ptr align 8 {{.*}}, i64 16, i1 false) 85; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls 86 87; Check 16-aligned byval. 88define i32 @bar7(ptr %arg) { 89 %1 = call i32 (i32, ...) @foo(i32 0, ptr byval([4 x i64]) align 16 %arg) 90 ret i32 %1 91} 92 93; CHECK-LABEL: @bar7 94; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), ptr align 8 {{.*}}, i64 32, i1 false) 95; CHECK: store {{.*}} 40, {{.*}} @__msan_va_arg_overflow_size_tls 96 97; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are 98; passed to a variadic function. 99define dso_local i64 @many_args() { 100entry: 101 %ret = call i64 (i64, ...) @sum(i64 120, 102 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 103 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 104 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 105 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 106 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 107 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 108 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 109 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 110 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 111 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 112 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, 113 i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1 114 ) 115 ret i64 %ret 116} 117 118; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed. 119; CHECK-LABEL: @many_args 120; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792) 121; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800) 122declare i64 @sum(i64 %n, ...) 123