1; RUN: opt %loadNPMPolly '-passes=polly-import-jscop,polly-opt-isl' \ 2; RUN: -polly-import-jscop-postfix=transformed \ 3; RUN: -polly-pattern-matching-based-opts=true \ 4; RUN: -polly-target-throughput-vector-fma=1 \ 5; RUN: -polly-target-latency-vector-fma=8 \ 6; RUN: -polly-target-1st-cache-level-associativity=8 \ 7; RUN: -polly-target-2nd-cache-level-associativity=8 \ 8; RUN: -polly-target-1st-cache-level-size=32768 \ 9; RUN: -polly-target-vector-register-bitwidth=256 \ 10; RUN: -polly-target-2nd-cache-level-size=262144 \ 11; RUN: -debug \ 12; RUN: -polly-tc-opt=true -disable-output < %s 2>&1 \ 13; RUN: | FileCheck %s 14; REQUIRES: asserts 15; 16; Check that the pattern matching detects the matrix multiplication pattern 17; in case scalar memory accesses were replaced by accesses to newly created 18; arrays. 19; 20; CHECK: The tensor contraction pattern was detected 21; CHECK: The matrix multiplication pattern was detected 22; 23target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 24target triple = "x86_64-unknown-unknown" 25 26define void @kernel_gemm(i32 %ni, i32 %nj, i32 %nk, double %A, ptr %B, ptr %C) { 27entry: 28 br label %entry.split 29 30entry.split: ; preds = %entry 31 br label %for.cond1.preheader 32 33for.cond1.preheader: ; preds = %for.inc16, %entry.split 34 %indvars.iv35 = phi i64 [ 0, %entry.split ], [ %indvars.iv.next36, %for.inc16 ] 35 br label %for.cond4.preheader 36 37for.cond4.preheader: ; preds = %for.inc13, %for.cond1.preheader 38 %indvars.iv32 = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next33, %for.inc13 ] 39 br label %for.body6 40 41for.body6: ; preds = %for.body6, %for.cond4.preheader 42 %indvars.iv = phi i64 [ 0, %for.cond4.preheader ], [ %indvars.iv.next, %for.body6 ] 43 %arrayidx8 = getelementptr inbounds [1024 x double], ptr %B, i64 %indvars.iv, i64 %indvars.iv32 44 %tmp = load double, ptr %arrayidx8, align 8 45 %mul = fmul double %tmp, %A 46 %arrayidx12 = getelementptr inbounds [1024 x double], ptr %C, i64 %indvars.iv35, i64 %indvars.iv32 47 %tmp1 = load double, ptr %arrayidx12, align 8 48 %add = fadd double %tmp1, %mul 49 store double %add, ptr %arrayidx12, align 8 50 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 51 %exitcond = icmp ne i64 %indvars.iv.next, 1024 52 br i1 %exitcond, label %for.body6, label %for.inc13 53 54for.inc13: ; preds = %for.body6 55 %indvars.iv.next33 = add nuw nsw i64 %indvars.iv32, 1 56 %exitcond34 = icmp ne i64 %indvars.iv.next33, 1024 57 br i1 %exitcond34, label %for.cond4.preheader, label %for.inc16 58 59for.inc16: ; preds = %for.inc13 60 %indvars.iv.next36 = add nuw nsw i64 %indvars.iv35, 1 61 %exitcond37 = icmp ne i64 %indvars.iv.next36, 1024 62 br i1 %exitcond37, label %for.cond1.preheader, label %for.end18 63 64for.end18: ; preds = %for.inc16 65 ret void 66} 67