1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux -mcpu=corei7-avx -slp-threshold=-999 < %s | FileCheck %s 3 4 5; S[0] = %v1 + %v2 6; S[1] = %v2 + %v1 7; S[2] = %v2 + %v1 8; S[3] = %v1 + %v2 9; 10; We broadcast %v1 and %v2 11; 12 13define void @bcast_vals(ptr %A, ptr %B, ptr %S) { 14; CHECK-LABEL: @bcast_vals( 15; CHECK-NEXT: entry: 16; CHECK-NEXT: [[A0:%.*]] = load i64, ptr [[A:%.*]], align 8 17; CHECK-NEXT: [[B0:%.*]] = load i64, ptr [[B:%.*]], align 8 18; CHECK-NEXT: [[V1:%.*]] = sub i64 [[A0]], 1 19; CHECK-NEXT: [[V2:%.*]] = sub i64 [[B0]], 1 20; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i64> poison, i64 [[V1]], i32 0 21; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> [[TMP0]], <4 x i64> poison, <4 x i32> zeroinitializer 22; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i64> poison, i64 [[V2]], i32 0 23; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i64> [[TMP2]], <4 x i64> poison, <4 x i32> zeroinitializer 24; CHECK-NEXT: [[TMP4:%.*]] = add <4 x i64> [[TMP1]], [[TMP3]] 25; CHECK-NEXT: store <4 x i64> [[TMP4]], ptr [[S:%.*]], align 8 26; CHECK-NEXT: ret void 27; 28entry: 29 %A0 = load i64, ptr %A, align 8 30 %B0 = load i64, ptr %B, align 8 31 32 %v1 = sub i64 %A0, 1 33 %v2 = sub i64 %B0, 1 34 35 %Add0 = add i64 %v1, %v2 36 %Add1 = add i64 %v2, %v1 37 %Add2 = add i64 %v2, %v1 38 %Add3 = add i64 %v1, %v2 39 40 %idxS1 = getelementptr inbounds i64, ptr %S, i64 1 41 %idxS2 = getelementptr inbounds i64, ptr %S, i64 2 42 %idxS3 = getelementptr inbounds i64, ptr %S, i64 3 43 44 store i64 %Add0, ptr %S, align 8 45 store i64 %Add1, ptr %idxS1, align 8 46 store i64 %Add2, ptr %idxS2, align 8 47 store i64 %Add3, ptr %idxS3, align 8 48 ret void 49} 50 51; S[0] = %v1 + %v2 52; S[1] = %v3 + %v1 53; S[2] = %v5 + %v1 54; S[3] = %v1 + %v4 55; 56; We broadcast %v1. 57 58; 59define void @bcast_vals2(ptr %A, ptr %B, ptr %C, ptr %D, ptr %E, ptr %S) { 60; CHECK-LABEL: @bcast_vals2( 61; CHECK-NEXT: entry: 62; CHECK-NEXT: [[A0:%.*]] = load i16, ptr [[A:%.*]], align 8 63; CHECK-NEXT: [[B0:%.*]] = load i16, ptr [[B:%.*]], align 8 64; CHECK-NEXT: [[C0:%.*]] = load i16, ptr [[C:%.*]], align 8 65; CHECK-NEXT: [[D0:%.*]] = load i16, ptr [[D:%.*]], align 8 66; CHECK-NEXT: [[E0:%.*]] = load i16, ptr [[E:%.*]], align 8 67; CHECK-NEXT: [[V1:%.*]] = sext i16 [[A0]] to i32 68; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i16> poison, i16 [[B0]], i32 0 69; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i16> [[TMP0]], i16 [[C0]], i32 1 70; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i16> [[TMP1]], i16 [[E0]], i32 2 71; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i16> [[TMP2]], i16 [[D0]], i32 3 72; CHECK-NEXT: [[TMP4:%.*]] = sext <4 x i16> [[TMP3]] to <4 x i32> 73; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> poison, i32 [[V1]], i32 0 74; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> poison, <4 x i32> zeroinitializer 75; CHECK-NEXT: [[TMP7:%.*]] = add <4 x i32> [[TMP6]], [[TMP4]] 76; CHECK-NEXT: store <4 x i32> [[TMP7]], ptr [[S:%.*]], align 8 77; CHECK-NEXT: ret void 78; 79entry: 80 %A0 = load i16, ptr %A, align 8 81 %B0 = load i16, ptr %B, align 8 82 %C0 = load i16, ptr %C, align 8 83 %D0 = load i16, ptr %D, align 8 84 %E0 = load i16, ptr %E, align 8 85 86 %v1 = sext i16 %A0 to i32 87 %v2 = sext i16 %B0 to i32 88 %v3 = sext i16 %C0 to i32 89 %v4 = sext i16 %D0 to i32 90 %v5 = sext i16 %E0 to i32 91 92 %Add0 = add i32 %v1, %v2 93 %Add1 = add i32 %v3, %v1 94 %Add2 = add i32 %v5, %v1 95 %Add3 = add i32 %v1, %v4 96 97 %idxS1 = getelementptr inbounds i32, ptr %S, i64 1 98 %idxS2 = getelementptr inbounds i32, ptr %S, i64 2 99 %idxS3 = getelementptr inbounds i32, ptr %S, i64 3 100 101 store i32 %Add0, ptr %S, align 8 102 store i32 %Add1, ptr %idxS1, align 8 103 store i32 %Add2, ptr %idxS2, align 8 104 store i32 %Add3, ptr %idxS3, align 8 105 ret void 106} 107