1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE 3; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX 4; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX 5 6define void @powof2div_uniform(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, ptr noalias nocapture readonly %c){ 7; CHECK-LABEL: @powof2div_uniform( 8; CHECK-NEXT: entry: 9; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 10; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[C:%.*]], align 4 11; CHECK-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[TMP3]], [[TMP1]] 12; CHECK-NEXT: [[TMP5:%.*]] = sdiv <4 x i32> [[TMP4]], splat (i32 2) 13; CHECK-NEXT: store <4 x i32> [[TMP5]], ptr [[A:%.*]], align 4 14; CHECK-NEXT: ret void 15; 16entry: 17 %0 = load i32, ptr %b, align 4 18 %1 = load i32, ptr %c, align 4 19 %add = add nsw i32 %1, %0 20 %div = sdiv i32 %add, 2 21 store i32 %div, ptr %a, align 4 22 %arrayidx3 = getelementptr inbounds i32, ptr %b, i64 1 23 %2 = load i32, ptr %arrayidx3, align 4 24 %arrayidx4 = getelementptr inbounds i32, ptr %c, i64 1 25 %3 = load i32, ptr %arrayidx4, align 4 26 %add5 = add nsw i32 %3, %2 27 %div6 = sdiv i32 %add5, 2 28 %arrayidx7 = getelementptr inbounds i32, ptr %a, i64 1 29 store i32 %div6, ptr %arrayidx7, align 4 30 %arrayidx8 = getelementptr inbounds i32, ptr %b, i64 2 31 %4 = load i32, ptr %arrayidx8, align 4 32 %arrayidx9 = getelementptr inbounds i32, ptr %c, i64 2 33 %5 = load i32, ptr %arrayidx9, align 4 34 %add10 = add nsw i32 %5, %4 35 %div11 = sdiv i32 %add10, 2 36 %arrayidx12 = getelementptr inbounds i32, ptr %a, i64 2 37 store i32 %div11, ptr %arrayidx12, align 4 38 %arrayidx13 = getelementptr inbounds i32, ptr %b, i64 3 39 %6 = load i32, ptr %arrayidx13, align 4 40 %arrayidx14 = getelementptr inbounds i32, ptr %c, i64 3 41 %7 = load i32, ptr %arrayidx14, align 4 42 %add15 = add nsw i32 %7, %6 43 %div16 = sdiv i32 %add15, 2 44 %arrayidx17 = getelementptr inbounds i32, ptr %a, i64 3 45 store i32 %div16, ptr %arrayidx17, align 4 46 ret void 47} 48 49define void @powof2div_nonuniform(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, ptr noalias nocapture readonly %c){ 50; SSE-LABEL: @powof2div_nonuniform( 51; SSE-NEXT: entry: 52; SSE-NEXT: [[TMP0:%.*]] = load i32, ptr [[B:%.*]], align 4 53; SSE-NEXT: [[TMP1:%.*]] = load i32, ptr [[C:%.*]], align 4 54; SSE-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP0]] 55; SSE-NEXT: [[DIV:%.*]] = sdiv i32 [[ADD]], 2 56; SSE-NEXT: store i32 [[DIV]], ptr [[A:%.*]], align 4 57; SSE-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 1 58; SSE-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4 59; SSE-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 1 60; SSE-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4 61; SSE-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP3]], [[TMP2]] 62; SSE-NEXT: [[DIV6:%.*]] = sdiv i32 [[ADD5]], 4 63; SSE-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 1 64; SSE-NEXT: store i32 [[DIV6]], ptr [[ARRAYIDX7]], align 4 65; SSE-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 2 66; SSE-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX8]], align 4 67; SSE-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 2 68; SSE-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX9]], align 4 69; SSE-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] 70; SSE-NEXT: [[DIV11:%.*]] = sdiv i32 [[ADD10]], 8 71; SSE-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 2 72; SSE-NEXT: store i32 [[DIV11]], ptr [[ARRAYIDX12]], align 4 73; SSE-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 3 74; SSE-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX13]], align 4 75; SSE-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 3 76; SSE-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX14]], align 4 77; SSE-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP7]], [[TMP6]] 78; SSE-NEXT: [[DIV16:%.*]] = sdiv i32 [[ADD15]], 16 79; SSE-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 3 80; SSE-NEXT: store i32 [[DIV16]], ptr [[ARRAYIDX17]], align 4 81; SSE-NEXT: ret void 82; 83; AVX-LABEL: @powof2div_nonuniform( 84; AVX-NEXT: entry: 85; AVX-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 86; AVX-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[C:%.*]], align 4 87; AVX-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[TMP3]], [[TMP1]] 88; AVX-NEXT: [[TMP5:%.*]] = sdiv <4 x i32> [[TMP4]], <i32 2, i32 4, i32 8, i32 16> 89; AVX-NEXT: store <4 x i32> [[TMP5]], ptr [[A:%.*]], align 4 90; AVX-NEXT: ret void 91; 92entry: 93 %0 = load i32, ptr %b, align 4 94 %1 = load i32, ptr %c, align 4 95 %add = add nsw i32 %1, %0 96 %div = sdiv i32 %add, 2 97 store i32 %div, ptr %a, align 4 98 %arrayidx3 = getelementptr inbounds i32, ptr %b, i64 1 99 %2 = load i32, ptr %arrayidx3, align 4 100 %arrayidx4 = getelementptr inbounds i32, ptr %c, i64 1 101 %3 = load i32, ptr %arrayidx4, align 4 102 %add5 = add nsw i32 %3, %2 103 %div6 = sdiv i32 %add5, 4 104 %arrayidx7 = getelementptr inbounds i32, ptr %a, i64 1 105 store i32 %div6, ptr %arrayidx7, align 4 106 %arrayidx8 = getelementptr inbounds i32, ptr %b, i64 2 107 %4 = load i32, ptr %arrayidx8, align 4 108 %arrayidx9 = getelementptr inbounds i32, ptr %c, i64 2 109 %5 = load i32, ptr %arrayidx9, align 4 110 %add10 = add nsw i32 %5, %4 111 %div11 = sdiv i32 %add10, 8 112 %arrayidx12 = getelementptr inbounds i32, ptr %a, i64 2 113 store i32 %div11, ptr %arrayidx12, align 4 114 %arrayidx13 = getelementptr inbounds i32, ptr %b, i64 3 115 %6 = load i32, ptr %arrayidx13, align 4 116 %arrayidx14 = getelementptr inbounds i32, ptr %c, i64 3 117 %7 = load i32, ptr %arrayidx14, align 4 118 %add15 = add nsw i32 %7, %6 119 %div16 = sdiv i32 %add15, 16 120 %arrayidx17 = getelementptr inbounds i32, ptr %a, i64 3 121 store i32 %div16, ptr %arrayidx17, align 4 122 ret void 123} 124 125