1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -passes='sroa<preserve-cfg>' -S | FileCheck %s --check-prefixes=CHECK,CHECK-PRESERVE-CFG 3; RUN: opt < %s -passes='sroa<modify-cfg>' -S | FileCheck %s --check-prefixes=CHECK,CHECK-MODIFY-CFG 4 5target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64" 6 7define i8 @test1() { 8; We fully promote these to the i24 load or store size, resulting in just masks 9; and other operations that instcombine will fold, but no alloca. Note this is 10; the same as test12 in basictest.ll, but here we assert big-endian byte 11; ordering. 12; 13; CHECK-LABEL: @test1( 14; CHECK-NEXT: entry: 15; CHECK-NEXT: [[A_SROA_3_0_INSERT_EXT:%.*]] = zext i8 0 to i24 16; CHECK-NEXT: [[A_SROA_3_0_INSERT_MASK:%.*]] = and i24 undef, -256 17; CHECK-NEXT: [[A_SROA_3_0_INSERT_INSERT:%.*]] = or i24 [[A_SROA_3_0_INSERT_MASK]], [[A_SROA_3_0_INSERT_EXT]] 18; CHECK-NEXT: [[A_SROA_2_0_INSERT_EXT:%.*]] = zext i8 0 to i24 19; CHECK-NEXT: [[A_SROA_2_0_INSERT_SHIFT:%.*]] = shl i24 [[A_SROA_2_0_INSERT_EXT]], 8 20; CHECK-NEXT: [[A_SROA_2_0_INSERT_MASK:%.*]] = and i24 [[A_SROA_3_0_INSERT_INSERT]], -65281 21; CHECK-NEXT: [[A_SROA_2_0_INSERT_INSERT:%.*]] = or i24 [[A_SROA_2_0_INSERT_MASK]], [[A_SROA_2_0_INSERT_SHIFT]] 22; CHECK-NEXT: [[A_SROA_0_0_INSERT_EXT:%.*]] = zext i8 0 to i24 23; CHECK-NEXT: [[A_SROA_0_0_INSERT_SHIFT:%.*]] = shl i24 [[A_SROA_0_0_INSERT_EXT]], 16 24; CHECK-NEXT: [[A_SROA_0_0_INSERT_MASK:%.*]] = and i24 [[A_SROA_2_0_INSERT_INSERT]], 65535 25; CHECK-NEXT: [[A_SROA_0_0_INSERT_INSERT:%.*]] = or i24 [[A_SROA_0_0_INSERT_MASK]], [[A_SROA_0_0_INSERT_SHIFT]] 26; CHECK-NEXT: [[B_SROA_0_0_EXTRACT_SHIFT:%.*]] = lshr i24 [[A_SROA_0_0_INSERT_INSERT]], 16 27; CHECK-NEXT: [[B_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i24 [[B_SROA_0_0_EXTRACT_SHIFT]] to i8 28; CHECK-NEXT: [[B_SROA_2_0_EXTRACT_SHIFT:%.*]] = lshr i24 [[A_SROA_0_0_INSERT_INSERT]], 8 29; CHECK-NEXT: [[B_SROA_2_0_EXTRACT_TRUNC:%.*]] = trunc i24 [[B_SROA_2_0_EXTRACT_SHIFT]] to i8 30; CHECK-NEXT: [[B_SROA_3_0_EXTRACT_TRUNC:%.*]] = trunc i24 [[A_SROA_0_0_INSERT_INSERT]] to i8 31; CHECK-NEXT: [[BSUM0:%.*]] = add i8 [[B_SROA_0_0_EXTRACT_TRUNC]], [[B_SROA_2_0_EXTRACT_TRUNC]] 32; CHECK-NEXT: [[BSUM1:%.*]] = add i8 [[BSUM0]], [[B_SROA_3_0_EXTRACT_TRUNC]] 33; CHECK-NEXT: ret i8 [[BSUM1]] 34; 35entry: 36 %a = alloca [3 x i8] 37 %b = alloca [3 x i8] 38 39 store i8 0, ptr %a 40 %a1ptr = getelementptr [3 x i8], ptr %a, i64 0, i32 1 41 store i8 0, ptr %a1ptr 42 %a2ptr = getelementptr [3 x i8], ptr %a, i64 0, i32 2 43 store i8 0, ptr %a2ptr 44 %ai = load i24, ptr %a 45 46 store i24 %ai, ptr %b 47 %b0 = load i8, ptr %b 48 %b1ptr = getelementptr [3 x i8], ptr %b, i64 0, i32 1 49 %b1 = load i8, ptr %b1ptr 50 %b2ptr = getelementptr [3 x i8], ptr %b, i64 0, i32 2 51 %b2 = load i8, ptr %b2ptr 52 53 %bsum0 = add i8 %b0, %b1 54 %bsum1 = add i8 %bsum0, %b2 55 ret i8 %bsum1 56} 57 58define i64 @test2() { 59; Test for various mixed sizes of integer loads and stores all getting 60; promoted. 61; 62; CHECK-LABEL: @test2( 63; CHECK-NEXT: entry: 64; CHECK-NEXT: [[A_SROA_2_SROA_4_0_INSERT_EXT:%.*]] = zext i8 1 to i40 65; CHECK-NEXT: [[A_SROA_2_SROA_4_0_INSERT_MASK:%.*]] = and i40 undef, -256 66; CHECK-NEXT: [[A_SROA_2_SROA_4_0_INSERT_INSERT:%.*]] = or i40 [[A_SROA_2_SROA_4_0_INSERT_MASK]], [[A_SROA_2_SROA_4_0_INSERT_EXT]] 67; CHECK-NEXT: [[A_SROA_2_SROA_3_0_INSERT_EXT:%.*]] = zext i24 0 to i40 68; CHECK-NEXT: [[A_SROA_2_SROA_3_0_INSERT_SHIFT:%.*]] = shl i40 [[A_SROA_2_SROA_3_0_INSERT_EXT]], 8 69; CHECK-NEXT: [[A_SROA_2_SROA_3_0_INSERT_MASK:%.*]] = and i40 [[A_SROA_2_SROA_4_0_INSERT_INSERT]], -4294967041 70; CHECK-NEXT: [[A_SROA_2_SROA_3_0_INSERT_INSERT:%.*]] = or i40 [[A_SROA_2_SROA_3_0_INSERT_MASK]], [[A_SROA_2_SROA_3_0_INSERT_SHIFT]] 71; CHECK-NEXT: [[A_SROA_2_SROA_0_0_INSERT_EXT:%.*]] = zext i8 0 to i40 72; CHECK-NEXT: [[A_SROA_2_SROA_0_0_INSERT_SHIFT:%.*]] = shl i40 [[A_SROA_2_SROA_0_0_INSERT_EXT]], 32 73; CHECK-NEXT: [[A_SROA_2_SROA_0_0_INSERT_MASK:%.*]] = and i40 [[A_SROA_2_SROA_3_0_INSERT_INSERT]], 4294967295 74; CHECK-NEXT: [[A_SROA_2_SROA_0_0_INSERT_INSERT:%.*]] = or i40 [[A_SROA_2_SROA_0_0_INSERT_MASK]], [[A_SROA_2_SROA_0_0_INSERT_SHIFT]] 75; CHECK-NEXT: [[A_SROA_2_0_INSERT_EXT:%.*]] = zext i40 [[A_SROA_2_SROA_0_0_INSERT_INSERT]] to i56 76; CHECK-NEXT: [[A_SROA_2_0_INSERT_MASK:%.*]] = and i56 undef, -1099511627776 77; CHECK-NEXT: [[A_SROA_2_0_INSERT_INSERT:%.*]] = or i56 [[A_SROA_2_0_INSERT_MASK]], [[A_SROA_2_0_INSERT_EXT]] 78; CHECK-NEXT: [[A_SROA_0_0_INSERT_EXT:%.*]] = zext i16 1 to i56 79; CHECK-NEXT: [[A_SROA_0_0_INSERT_SHIFT:%.*]] = shl i56 [[A_SROA_0_0_INSERT_EXT]], 40 80; CHECK-NEXT: [[A_SROA_0_0_INSERT_MASK:%.*]] = and i56 [[A_SROA_2_0_INSERT_INSERT]], 1099511627775 81; CHECK-NEXT: [[A_SROA_0_0_INSERT_INSERT:%.*]] = or i56 [[A_SROA_0_0_INSERT_MASK]], [[A_SROA_0_0_INSERT_SHIFT]] 82; CHECK-NEXT: [[RET:%.*]] = zext i56 [[A_SROA_0_0_INSERT_INSERT]] to i64 83; CHECK-NEXT: ret i64 [[RET]] 84; 85entry: 86 %a = alloca [7 x i8] 87 88 %a1ptr = getelementptr [7 x i8], ptr %a, i64 0, i32 1 89 %a2ptr = getelementptr [7 x i8], ptr %a, i64 0, i32 2 90 %a3ptr = getelementptr [7 x i8], ptr %a, i64 0, i32 3 91 92 93 store i16 1, ptr %a 94 95 store i8 1, ptr %a2ptr 96 97 store i24 1, ptr %a3ptr 98 99 store i40 1, ptr %a2ptr 100 101; the alloca is splitted into multiple slices 102; Here, i8 1 is for %a[6] 103 104; Here, i24 0 is for %a[3] to %a[5] 105 106; Here, i8 0 is for %a[2] 107 108 109 110 %ai = load i56, ptr %a 111 %ret = zext i56 %ai to i64 112 ret i64 %ret 113; Here, i16 1 is for %a[0] to %a[1] 114} 115 116define i64 @PR14132(i1 %flag) { 117; Here we form a PHI-node by promoting the pointer alloca first, and then in 118; order to promote the other two allocas, we speculate the load of the 119; now-phi-node-pointer. In doing so we end up loading a 64-bit value from an i8 120; alloca. While this is a bit dubious, we were asserting on trying to 121; rewrite it. The trick is that the code using the value may carefully take 122; steps to only use the not-undef bits, and so we need to at least loosely 123; support this. This test is particularly interesting because how we handle 124; a load of an i64 from an i8 alloca is dependent on endianness. 125; CHECK-LABEL: @PR14132( 126; CHECK-NEXT: entry: 127; CHECK-NEXT: br i1 [[FLAG:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]] 128; CHECK: if.then: 129; CHECK-NEXT: [[B_0_LOAD_EXT:%.*]] = zext i8 1 to i64 130; CHECK-NEXT: [[B_0_ENDIAN_SHIFT:%.*]] = shl i64 [[B_0_LOAD_EXT]], 56 131; CHECK-NEXT: br label [[IF_END]] 132; CHECK: if.end: 133; CHECK-NEXT: [[PTR_0_SROA_SPECULATED:%.*]] = phi i64 [ [[B_0_ENDIAN_SHIFT]], [[IF_THEN]] ], [ 0, [[ENTRY:%.*]] ] 134; CHECK-NEXT: ret i64 [[PTR_0_SROA_SPECULATED]] 135; 136entry: 137 %a = alloca i64, align 8 138 %b = alloca i8, align 8 139 %ptr = alloca ptr, align 8 140 141 store i64 0, ptr %a 142 store i8 1, ptr %b 143 store ptr %a, ptr %ptr 144 br i1 %flag, label %if.then, label %if.end 145 146if.then: 147 store ptr %b, ptr %ptr 148 br label %if.end 149 150if.end: 151 %tmp = load ptr, ptr %ptr 152 %result = load i64, ptr %tmp 153 154 ret i64 %result 155} 156 157declare void @f(i64 %x, i32 %y) 158 159define void @test3() { 160; This is a test that specifically exercises the big-endian lowering because it 161; ends up splitting a 64-bit integer into two smaller integers and has a number 162; of tricky aspects (the i24 type) that make that hard. Historically, SROA 163; would miscompile this by either dropping a most significant byte or least 164; significant byte due to shrinking the [4,8) slice to an i24, or by failing to 165; move the bytes around correctly. 166; 167; The magical number 34494054408 is used because it has bits set in various 168; bytes so that it is clear if those bytes fail to be propagated. 169; 170; If you're debugging this, rather than using the direct magical numbers, run 171; the IR through '-sroa -instcombine'. With '-instcombine' these will be 172; constant folded, and if the i64 doesn't round-trip correctly, you've found 173; a bug! 174; 175; CHECK-LABEL: @test3( 176; CHECK-NEXT: entry: 177; CHECK-NEXT: [[A_SROA_3_0_INSERT_EXT:%.*]] = zext i32 134316040 to i64 178; CHECK-NEXT: [[A_SROA_3_0_INSERT_MASK:%.*]] = and i64 undef, -4294967296 179; CHECK-NEXT: [[A_SROA_3_0_INSERT_INSERT:%.*]] = or i64 [[A_SROA_3_0_INSERT_MASK]], [[A_SROA_3_0_INSERT_EXT]] 180; CHECK-NEXT: [[A_SROA_0_0_INSERT_EXT:%.*]] = zext i32 8 to i64 181; CHECK-NEXT: [[A_SROA_0_0_INSERT_SHIFT:%.*]] = shl i64 [[A_SROA_0_0_INSERT_EXT]], 32 182; CHECK-NEXT: [[A_SROA_0_0_INSERT_MASK:%.*]] = and i64 [[A_SROA_3_0_INSERT_INSERT]], 4294967295 183; CHECK-NEXT: [[A_SROA_0_0_INSERT_INSERT:%.*]] = or i64 [[A_SROA_0_0_INSERT_MASK]], [[A_SROA_0_0_INSERT_SHIFT]] 184; CHECK-NEXT: call void @f(i64 [[A_SROA_0_0_INSERT_INSERT]], i32 8) 185; CHECK-NEXT: ret void 186; 187entry: 188 %a = alloca { i32, i24 }, align 4 189 190 store i64 34494054408, ptr %a 191 %tmp1 = load i64, ptr %a, align 4 192 %tmp3 = load i32, ptr %a, align 4 193 194 call void @f(i64 %tmp1, i32 %tmp3) 195 ret void 196} 197 198define void @test4() { 199; Much like @test3, this is specifically testing big-endian management of data. 200; Also similarly, it uses constants with particular bits set to help track 201; whether values are corrupted, and can be easily evaluated by running through 202; -passes=instcombine to see that the i64 round-trips. 203; 204; CHECK-LABEL: @test4( 205; CHECK-NEXT: entry: 206; CHECK-NEXT: [[A_SROA_0_0_EXTRACT_SHIFT:%.*]] = lshr i64 34494054408, 32 207; CHECK-NEXT: [[A_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i64 [[A_SROA_0_0_EXTRACT_SHIFT]] to i32 208; CHECK-NEXT: [[A_SROA_3_0_EXTRACT_TRUNC:%.*]] = trunc i64 34494054408 to i32 209; CHECK-NEXT: [[A_SROA_3_0_INSERT_EXT:%.*]] = zext i32 [[A_SROA_3_0_EXTRACT_TRUNC]] to i64 210; CHECK-NEXT: [[A_SROA_3_0_INSERT_MASK:%.*]] = and i64 undef, -4294967296 211; CHECK-NEXT: [[A_SROA_3_0_INSERT_INSERT:%.*]] = or i64 [[A_SROA_3_0_INSERT_MASK]], [[A_SROA_3_0_INSERT_EXT]] 212; CHECK-NEXT: [[A_SROA_0_0_INSERT_EXT:%.*]] = zext i32 [[A_SROA_0_0_EXTRACT_TRUNC]] to i64 213; CHECK-NEXT: [[A_SROA_0_0_INSERT_SHIFT:%.*]] = shl i64 [[A_SROA_0_0_INSERT_EXT]], 32 214; CHECK-NEXT: [[A_SROA_0_0_INSERT_MASK:%.*]] = and i64 [[A_SROA_3_0_INSERT_INSERT]], 4294967295 215; CHECK-NEXT: [[A_SROA_0_0_INSERT_INSERT:%.*]] = or i64 [[A_SROA_0_0_INSERT_MASK]], [[A_SROA_0_0_INSERT_SHIFT]] 216; CHECK-NEXT: call void @f(i64 [[A_SROA_0_0_INSERT_INSERT]], i32 [[A_SROA_0_0_EXTRACT_TRUNC]]) 217; CHECK-NEXT: ret void 218; 219entry: 220 %a = alloca { i32, i24 }, align 4 221 %a2 = alloca i64, align 4 222 223 store i64 34494054408, ptr %a2 224 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %a, ptr align 4 %a2, i64 8, i1 false) 225 226 %tmp3 = load i64, ptr %a, align 4 227 %tmp5 = load i32, ptr %a, align 4 228 229 call void @f(i64 %tmp3, i32 %tmp5) 230 ret void 231} 232 233declare void @llvm.memcpy.p0.p0.i64(ptr, ptr, i64, i1) 234;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: 235; CHECK-MODIFY-CFG: {{.*}} 236; CHECK-PRESERVE-CFG: {{.*}} 237