1; RUN: opt %loadNPMPolly -polly-pattern-matching-based-opts=true \ 2; RUN: -polly-target-throughput-vector-fma=1 \ 3; RUN: -polly-target-latency-vector-fma=8 \ 4; RUN: -polly-target-1st-cache-level-associativity=8 \ 5; RUN: -polly-target-2nd-cache-level-associativity=8 \ 6; RUN: -polly-target-1st-cache-level-size=32768 \ 7; RUN: -polly-target-vector-register-bitwidth=256 \ 8; RUN: -polly-target-2nd-cache-level-size=262144 \ 9; RUN: -passes=polly-opt-isl -disable-output < %s 10; 11; RUN: opt %loadNPMPolly '-passes=print<polly-dependences>' -disable-output < %s | FileCheck %s --check-prefix=DEPENDENCES 12; 13; /* C := A * B + C */ 14; /* Elements of the matrices A, B, C have the char type. */ 15; /* The type size of elements of the matrix multiplication operands is used 16; to determine the parameters of the code produced by the optimization 17; of the matrix multiplication (e.g. bounds of the loops of the loop 18; nest, the innermost loop body). This test checks the form of 19; the generated loop nest. See getMicroKernelParams and 20; getMacroKernelParams from lib/Transform/ScheduleOptimizer.cpp 21; for details. 22; 23; This patch also checks that we can detect matrix multiplication 24; in case there are reduction dependencies and there are not RAW 25; dependencies. */ 26; for (i = 0; i < _PB_NI; i++) 27; for (j = 0; j < _PB_NJ; j++) 28; for (k = 0; k < _PB_NK; ++k) 29; C[i][j] += A[i][k] * B[k][j]; 30; 31; DEPENDENCES: RAW dependences: 32; DEPENDENCES-NEXT: { } 33; DEPENDENCES-NEXT: WAR dependences: 34; DEPENDENCES-NEXT: { } 35; DEPENDENCES-NEXT: WAW dependences: 36; DEPENDENCES-NEXT: { } 37; DEPENDENCES-NEXT: Reduction dependences: 38; DEPENDENCES-NEXT: { Stmt_for_body6[i0, i1, i2] -> Stmt_for_body6[i0, i1, 1 + i2] : 0 <= i0 <= 1023 and 0 <= i1 <= 1023 and 0 <= i2 <= 1022 } 39; DEPENDENCES-NEXT: Transitive closure of reduction dependences: 40; DEPENDENCES-NEXT: { Stmt_for_body6[i0, i1, i2] -> Stmt_for_body6[i0, i1, o2] : 0 <= i0 <= 1023 and 0 <= i1 <= 1023 and ((i2 >= 0 and i2 < o2 <= 1023) or (i2 <= 1023 and 0 <= o2 < i2)) } 41; 42target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 43target triple = "x86_64-unknown-unknown" 44 45define internal void @kernel_gemm(i32 %ni, i32 %nj, i32 %nk, i8 signext %alpha, i8 signext %beta, ptr %C, ptr %A, ptr %B) { 46entry: 47 br label %entry.split 48 49entry.split: ; preds = %entry 50 br label %for.cond1.preheader 51 52for.cond1.preheader: ; preds = %for.inc23, %entry.split 53 %indvars.iv45 = phi i64 [ 0, %entry.split ], [ %indvars.iv.next46, %for.inc23 ] 54 br label %for.cond4.preheader 55 56for.cond4.preheader: ; preds = %for.inc20, %for.cond1.preheader 57 %indvars.iv42 = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next43, %for.inc20 ] 58 br label %for.body6 59 60for.body6: ; preds = %for.body6, %for.cond4.preheader 61 %indvars.iv = phi i64 [ 0, %for.cond4.preheader ], [ %indvars.iv.next, %for.body6 ] 62 %arrayidx8 = getelementptr inbounds [1024 x i8], ptr %A, i64 %indvars.iv45, i64 %indvars.iv 63 %tmp = load i8, ptr %arrayidx8, align 1 64 %arrayidx12 = getelementptr inbounds [1024 x i8], ptr %B, i64 %indvars.iv, i64 %indvars.iv42 65 %tmp1 = load i8, ptr %arrayidx12, align 1 66 %mul = mul i8 %tmp1, %tmp 67 %arrayidx17 = getelementptr inbounds [1024 x i8], ptr %C, i64 %indvars.iv45, i64 %indvars.iv42 68 %tmp2 = load i8, ptr %arrayidx17, align 1 69 %add = add i8 %mul, %tmp2 70 store i8 %add, ptr %arrayidx17, align 1 71 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 72 %exitcond = icmp ne i64 %indvars.iv.next, 1024 73 br i1 %exitcond, label %for.body6, label %for.inc20 74 75for.inc20: ; preds = %for.body6 76 %indvars.iv.next43 = add nuw nsw i64 %indvars.iv42, 1 77 %exitcond44 = icmp ne i64 %indvars.iv.next43, 1024 78 br i1 %exitcond44, label %for.cond4.preheader, label %for.inc23 79 80for.inc23: ; preds = %for.inc20 81 %indvars.iv.next46 = add nuw nsw i64 %indvars.iv45, 1 82 %exitcond47 = icmp ne i64 %indvars.iv.next46, 1024 83 br i1 %exitcond47, label %for.cond1.preheader, label %for.end25 84 85for.end25: ; preds = %for.inc23 86 ret void 87} 88