1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -S -mcpu=swift -mtriple=thumbv7-apple-ios -passes=slp-vectorizer < %s | FileCheck %s 3 4target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32" 5 6%class.Complex = type { double, double } 7 8; Code like this is the result of SROA. Make sure we don't vectorize this 9; because the scalar version of the shl/or are handled by the 10; backend and disappear, the vectorized code stays. 11 12define void @SROAed(ptr noalias nocapture sret(%class.Complex) %agg.result, [4 x i32] %a.coerce, [4 x i32] %b.coerce) { 13; CHECK-LABEL: @SROAed( 14; CHECK-NEXT: entry: 15; CHECK-NEXT: [[A_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i32] [[A_COERCE:%.*]], 0 16; CHECK-NEXT: [[A_SROA_0_0_INSERT_EXT:%.*]] = zext i32 [[A_COERCE_FCA_0_EXTRACT]] to i64 17; CHECK-NEXT: [[A_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i32] [[A_COERCE]], 1 18; CHECK-NEXT: [[A_SROA_0_4_INSERT_EXT:%.*]] = zext i32 [[A_COERCE_FCA_1_EXTRACT]] to i64 19; CHECK-NEXT: [[A_SROA_0_4_INSERT_SHIFT:%.*]] = shl nuw i64 [[A_SROA_0_4_INSERT_EXT]], 32 20; CHECK-NEXT: [[A_SROA_0_4_INSERT_INSERT:%.*]] = or i64 [[A_SROA_0_4_INSERT_SHIFT]], [[A_SROA_0_0_INSERT_EXT]] 21; CHECK-NEXT: [[TMP0:%.*]] = bitcast i64 [[A_SROA_0_4_INSERT_INSERT]] to double 22; CHECK-NEXT: [[A_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i32] [[A_COERCE]], 2 23; CHECK-NEXT: [[A_SROA_3_8_INSERT_EXT:%.*]] = zext i32 [[A_COERCE_FCA_2_EXTRACT]] to i64 24; CHECK-NEXT: [[A_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i32] [[A_COERCE]], 3 25; CHECK-NEXT: [[A_SROA_3_12_INSERT_EXT:%.*]] = zext i32 [[A_COERCE_FCA_3_EXTRACT]] to i64 26; CHECK-NEXT: [[A_SROA_3_12_INSERT_SHIFT:%.*]] = shl nuw i64 [[A_SROA_3_12_INSERT_EXT]], 32 27; CHECK-NEXT: [[A_SROA_3_12_INSERT_INSERT:%.*]] = or i64 [[A_SROA_3_12_INSERT_SHIFT]], [[A_SROA_3_8_INSERT_EXT]] 28; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[A_SROA_3_12_INSERT_INSERT]] to double 29; CHECK-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i32] [[B_COERCE:%.*]], 0 30; CHECK-NEXT: [[B_SROA_0_0_INSERT_EXT:%.*]] = zext i32 [[B_COERCE_FCA_0_EXTRACT]] to i64 31; CHECK-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i32] [[B_COERCE]], 1 32; CHECK-NEXT: [[B_SROA_0_4_INSERT_EXT:%.*]] = zext i32 [[B_COERCE_FCA_1_EXTRACT]] to i64 33; CHECK-NEXT: [[B_SROA_0_4_INSERT_SHIFT:%.*]] = shl nuw i64 [[B_SROA_0_4_INSERT_EXT]], 32 34; CHECK-NEXT: [[B_SROA_0_4_INSERT_INSERT:%.*]] = or i64 [[B_SROA_0_4_INSERT_SHIFT]], [[B_SROA_0_0_INSERT_EXT]] 35; CHECK-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_SROA_0_4_INSERT_INSERT]] to double 36; CHECK-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i32] [[B_COERCE]], 2 37; CHECK-NEXT: [[B_SROA_3_8_INSERT_EXT:%.*]] = zext i32 [[B_COERCE_FCA_2_EXTRACT]] to i64 38; CHECK-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i32] [[B_COERCE]], 3 39; CHECK-NEXT: [[B_SROA_3_12_INSERT_EXT:%.*]] = zext i32 [[B_COERCE_FCA_3_EXTRACT]] to i64 40; CHECK-NEXT: [[B_SROA_3_12_INSERT_SHIFT:%.*]] = shl nuw i64 [[B_SROA_3_12_INSERT_EXT]], 32 41; CHECK-NEXT: [[B_SROA_3_12_INSERT_INSERT:%.*]] = or i64 [[B_SROA_3_12_INSERT_SHIFT]], [[B_SROA_3_8_INSERT_EXT]] 42; CHECK-NEXT: [[TMP3:%.*]] = bitcast i64 [[B_SROA_3_12_INSERT_INSERT]] to double 43; CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP0]], [[TMP2]] 44; CHECK-NEXT: [[ADD3:%.*]] = fadd double [[TMP1]], [[TMP3]] 45; CHECK-NEXT: store double [[ADD]], ptr [[AGG_RESULT:%.*]], align 4 46; CHECK-NEXT: [[IM_I_I:%.*]] = getelementptr inbounds [[CLASS_COMPLEX:%.*]], ptr [[AGG_RESULT]], i32 0, i32 1 47; CHECK-NEXT: store double [[ADD3]], ptr [[IM_I_I]], align 4 48; CHECK-NEXT: ret void 49; 50entry: 51 %a.coerce.fca.0.extract = extractvalue [4 x i32] %a.coerce, 0 52 %a.sroa.0.0.insert.ext = zext i32 %a.coerce.fca.0.extract to i64 53 %a.coerce.fca.1.extract = extractvalue [4 x i32] %a.coerce, 1 54 %a.sroa.0.4.insert.ext = zext i32 %a.coerce.fca.1.extract to i64 55 %a.sroa.0.4.insert.shift = shl nuw i64 %a.sroa.0.4.insert.ext, 32 56 %a.sroa.0.4.insert.insert = or i64 %a.sroa.0.4.insert.shift, %a.sroa.0.0.insert.ext 57 %0 = bitcast i64 %a.sroa.0.4.insert.insert to double 58 %a.coerce.fca.2.extract = extractvalue [4 x i32] %a.coerce, 2 59 %a.sroa.3.8.insert.ext = zext i32 %a.coerce.fca.2.extract to i64 60 %a.coerce.fca.3.extract = extractvalue [4 x i32] %a.coerce, 3 61 %a.sroa.3.12.insert.ext = zext i32 %a.coerce.fca.3.extract to i64 62 %a.sroa.3.12.insert.shift = shl nuw i64 %a.sroa.3.12.insert.ext, 32 63 %a.sroa.3.12.insert.insert = or i64 %a.sroa.3.12.insert.shift, %a.sroa.3.8.insert.ext 64 %1 = bitcast i64 %a.sroa.3.12.insert.insert to double 65 %b.coerce.fca.0.extract = extractvalue [4 x i32] %b.coerce, 0 66 %b.sroa.0.0.insert.ext = zext i32 %b.coerce.fca.0.extract to i64 67 %b.coerce.fca.1.extract = extractvalue [4 x i32] %b.coerce, 1 68 %b.sroa.0.4.insert.ext = zext i32 %b.coerce.fca.1.extract to i64 69 %b.sroa.0.4.insert.shift = shl nuw i64 %b.sroa.0.4.insert.ext, 32 70 %b.sroa.0.4.insert.insert = or i64 %b.sroa.0.4.insert.shift, %b.sroa.0.0.insert.ext 71 %2 = bitcast i64 %b.sroa.0.4.insert.insert to double 72 %b.coerce.fca.2.extract = extractvalue [4 x i32] %b.coerce, 2 73 %b.sroa.3.8.insert.ext = zext i32 %b.coerce.fca.2.extract to i64 74 %b.coerce.fca.3.extract = extractvalue [4 x i32] %b.coerce, 3 75 %b.sroa.3.12.insert.ext = zext i32 %b.coerce.fca.3.extract to i64 76 %b.sroa.3.12.insert.shift = shl nuw i64 %b.sroa.3.12.insert.ext, 32 77 %b.sroa.3.12.insert.insert = or i64 %b.sroa.3.12.insert.shift, %b.sroa.3.8.insert.ext 78 %3 = bitcast i64 %b.sroa.3.12.insert.insert to double 79 %add = fadd double %0, %2 80 %add3 = fadd double %1, %3 81 store double %add, ptr %agg.result, align 4 82 %im.i.i = getelementptr inbounds %class.Complex, ptr %agg.result, i32 0, i32 1 83 store double %add3, ptr %im.i.i, align 4 84 ret void 85} 86