1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3 2; RUN: opt < %s -passes=inline -S | FileCheck %s 3; RUN: opt < %s -passes='cgscc(inline)' -S | FileCheck %s 4 5; The verifier does catch problems with inlining of byval arguments that has a 6; different address space compared to the alloca. But running instcombine 7; after inline used to trigger asserts unless we disallow such inlining. 8; RUN: opt < %s -passes=inline,instcombine -disable-output 2>/dev/null 9 10target datalayout = "p:32:32-p1:64:64-p2:16:16-n16:32:64" 11 12; Inlining a byval struct should cause an explicit copy into an alloca. 13 14 %struct.ss = type { i32, i64 } 15@.str = internal constant [10 x i8] c"%d, %lld\0A\00" ; <ptr> [#uses=1] 16 17define internal void @f(ptr byval(%struct.ss) %b) nounwind { 18entry: 19 %tmp = getelementptr %struct.ss, ptr %b, i32 0, i32 0 ; <ptr> [#uses=2] 20 %tmp1 = load i32, ptr %tmp, align 4 ; <i32> [#uses=1] 21 %tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1] 22 store i32 %tmp2, ptr %tmp, align 4 23 ret void 24} 25 26declare i32 @printf(ptr, ...) nounwind 27 28define i32 @test1() nounwind { 29; CHECK-LABEL: define i32 @test1( 30; CHECK-SAME: ) #[[ATTR0:[0-9]+]] { 31; CHECK-NEXT: entry: 32; CHECK-NEXT: [[S1:%.*]] = alloca [[STRUCT_SS:%.*]], align 8 33; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_SS]], align 8 34; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [[STRUCT_SS]], ptr [[S]], i32 0, i32 0 35; CHECK-NEXT: store i32 1, ptr [[TMP1]], align 8 36; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [[STRUCT_SS]], ptr [[S]], i32 0, i32 1 37; CHECK-NEXT: store i64 2, ptr [[TMP4]], align 4 38; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr [[S1]]) 39; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[S1]], ptr align 1 [[S]], i64 12, i1 false) 40; CHECK-NEXT: [[TMP1_I:%.*]] = load i32, ptr [[S1]], align 4 41; CHECK-NEXT: [[TMP2_I:%.*]] = add i32 [[TMP1_I]], 1 42; CHECK-NEXT: store i32 [[TMP2_I]], ptr [[S1]], align 4 43; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr [[S1]]) 44; CHECK-NEXT: ret i32 0 45; 46entry: 47 %S = alloca %struct.ss ; <ptr> [#uses=4] 48 %tmp1 = getelementptr %struct.ss, ptr %S, i32 0, i32 0 ; <ptr> [#uses=1] 49 store i32 1, ptr %tmp1, align 8 50 %tmp4 = getelementptr %struct.ss, ptr %S, i32 0, i32 1 ; <ptr> [#uses=1] 51 store i64 2, ptr %tmp4, align 4 52 call void @f(ptr byval(%struct.ss) %S) nounwind 53 ret i32 0 54} 55 56; Inlining a byval struct should NOT cause an explicit copy 57; into an alloca if the function is readonly 58 59define internal i32 @f2(ptr byval(%struct.ss) %b) nounwind readonly { 60entry: 61 %tmp = getelementptr %struct.ss, ptr %b, i32 0, i32 0 ; <ptr> [#uses=2] 62 %tmp1 = load i32, ptr %tmp, align 4 ; <i32> [#uses=1] 63 %tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1] 64 ret i32 %tmp2 65} 66 67define i32 @test2() nounwind { 68; CHECK-LABEL: define i32 @test2( 69; CHECK-SAME: ) #[[ATTR0]] { 70; CHECK-NEXT: entry: 71; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_SS:%.*]], align 8 72; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [[STRUCT_SS]], ptr [[S]], i32 0, i32 0 73; CHECK-NEXT: store i32 1, ptr [[TMP1]], align 8 74; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [[STRUCT_SS]], ptr [[S]], i32 0, i32 1 75; CHECK-NEXT: store i64 2, ptr [[TMP4]], align 4 76; CHECK-NEXT: [[TMP1_I:%.*]] = load i32, ptr [[S]], align 4 77; CHECK-NEXT: [[TMP2_I:%.*]] = add i32 [[TMP1_I]], 1 78; CHECK-NEXT: ret i32 [[TMP2_I]] 79; 80entry: 81 %S = alloca %struct.ss ; <ptr> [#uses=4] 82 %tmp1 = getelementptr %struct.ss, ptr %S, i32 0, i32 0 ; <ptr> [#uses=1] 83 store i32 1, ptr %tmp1, align 8 84 %tmp4 = getelementptr %struct.ss, ptr %S, i32 0, i32 1 ; <ptr> [#uses=1] 85 store i64 2, ptr %tmp4, align 4 86 %X = call i32 @f2(ptr byval(%struct.ss) %S) nounwind 87 ret i32 %X 88} 89 90 91; Inlining a byval with an explicit alignment needs to use *at least* that 92; alignment on the generated alloca. 93; PR8769 94declare void @g3(ptr %p) 95 96define internal void @f3(ptr byval(%struct.ss) align 64 %b) nounwind { 97 call void @g3(ptr %b) ;; Could make alignment assumptions! 98 ret void 99} 100 101define void @test3() nounwind { 102; CHECK-LABEL: define void @test3( 103; CHECK-SAME: ) #[[ATTR0]] { 104; CHECK-NEXT: entry: 105; CHECK-NEXT: [[S1:%.*]] = alloca [[STRUCT_SS:%.*]], align 64 106; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_SS]], align 1 107; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr [[S1]]) 108; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[S1]], ptr align 1 [[S]], i64 12, i1 false) 109; CHECK-NEXT: call void @g3(ptr align 64 [[S1]]) #[[ATTR0]] 110; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr [[S1]]) 111; CHECK-NEXT: ret void 112; 113entry: 114 %S = alloca %struct.ss, align 1 ;; May not be aligned. 115 call void @f3(ptr byval(%struct.ss) align 64 %S) nounwind 116 ret void 117} 118 119 120; Inlining a byval struct should NOT cause an explicit copy 121; into an alloca if the function is readonly, but should increase an alloca's 122; alignment to satisfy an explicit alignment request. 123 124define internal i32 @f4(ptr byval(%struct.ss) align 64 %b) nounwind readonly { 125 call void @g3(ptr %b) 126 ret i32 4 127} 128 129define i32 @test4() nounwind { 130; CHECK-LABEL: define i32 @test4( 131; CHECK-SAME: ) #[[ATTR0]] { 132; CHECK-NEXT: entry: 133; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_SS:%.*]], align 64 134; CHECK-NEXT: call void @g3(ptr align 64 [[S]]) #[[ATTR0]] 135; CHECK-NEXT: ret i32 4 136; 137entry: 138 %S = alloca %struct.ss, align 2 ; <ptr> [#uses=4] 139 %X = call i32 @f4(ptr byval(%struct.ss) align 64 %S) nounwind 140 ret i32 %X 141} 142 143%struct.S0 = type { i32 } 144 145@b = global %struct.S0 { i32 1 }, align 4 146@a = common global i32 0, align 4 147 148define internal void @f5(ptr byval(%struct.S0) nocapture readonly align 4 %p) { 149entry: 150 store i32 0, ptr @b, align 4 151 %0 = load i32, ptr %p, align 4 152 store i32 %0, ptr @a, align 4 153 ret void 154} 155 156define i32 @test5() { 157; CHECK-LABEL: define i32 @test5() { 158; CHECK-NEXT: entry: 159; CHECK-NEXT: [[B:%.*]] = alloca [[STRUCT_S0:%.*]], align 8 160; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[B]]) 161; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[B]], ptr align 1 @b, i64 4, i1 false) 162; CHECK-NEXT: store i32 0, ptr @b, align 4 163; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[B]], align 4 164; CHECK-NEXT: store i32 [[TMP0]], ptr @a, align 4 165; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[B]]) 166; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @a, align 4 167; CHECK-NEXT: ret i32 [[TMP1]] 168; 169entry: 170 tail call void @f5(ptr byval(%struct.S0) align 4 @b) 171 %0 = load i32, ptr @a, align 4 172 ret i32 %0 173} 174 175; Inlining a byval struct that is in a different address space compared to the 176; alloca address space is at the moment not expected. That would need 177; adjustments inside the inlined function since the address space attribute of 178; the inlined argument changes. 179 180%struct.S1 = type { i32 } 181 182@d = addrspace(1) global %struct.S1 { i32 1 }, align 4 183@c = common addrspace(1) global i32 0, align 4 184 185define internal void @f5_as1(ptr addrspace(1) byval(%struct.S1) nocapture readonly align 4 %p) { 186; CHECK-LABEL: define internal void @f5_as1( 187; CHECK-SAME: ptr addrspace(1) readonly byval([[STRUCT_S1:%.*]]) align 4 captures(none) [[P:%.*]]) { 188; CHECK-NEXT: entry: 189; CHECK-NEXT: store i32 0, ptr addrspace(1) @d, align 4 190; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(1) [[P]], align 4 191; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) @c, align 4 192; CHECK-NEXT: ret void 193; 194entry: 195 store i32 0, ptr addrspace(1) @d, align 4 196 %0 = load i32, ptr addrspace(1) %p, align 4 197 store i32 %0, ptr addrspace(1) @c, align 4 198 ret void 199} 200 201define i32 @test5_as1() { 202; CHECK-LABEL: define i32 @test5_as1() { 203; CHECK-NEXT: entry: 204; CHECK-NEXT: tail call void @f5_as1(ptr addrspace(1) byval([[STRUCT_S1:%.*]]) align 4 @d) 205; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(1) @c, align 4 206; CHECK-NEXT: ret i32 [[TMP0]] 207; 208entry: 209 tail call void @f5_as1(ptr addrspace(1) byval(%struct.S1) align 4 @d) 210 %0 = load i32, ptr addrspace(1) @c, align 4 211 ret i32 %0 212} 213