xref: /llvm-project/polly/test/ScheduleOptimizer/pattern-matching-based-opts_2.ll (revision e1f056f692d869708c1898d9d65a69ac5584a0ed)
1; RUN: opt %loadNPMPolly -passes=polly-opt-isl -polly-pattern-matching-based-opts=true \
2; RUN: -polly-tc-opt=true -debug -disable-output < %s 2>&1 | FileCheck %s
3; REQUIRES: asserts
4;
5;    /* C := alpha*A*B + beta*C */
6;    for (i = 0; i < _PB_NI; i++)
7;      for (j = 0; j < _PB_NJ; j += 2)
8;        {
9;	   C[i][j] *= beta;
10;	   for (k = 0; k < _PB_NK; ++k)
11;	     C[i][j] += alpha * A[i][k] * B[k][j];
12;        }
13;
14; Check that we won’t detect the matrix multiplication pattern,
15; if, for example, there are memory accesses that have stride 2
16; after the interchanging of loops.
17;
18; CHECK-NOT: The matrix multiplication pattern was detected
19; CHECK-NOT: The tensor contraction pattern was detected
20;
21target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
22target triple = "x86_64-unknown-unknown"
23
24define internal void @kernel_gemm(i32 %arg, i32 %arg1, i32 %arg2, double %arg3, double %arg4, ptr %arg5, ptr %arg6, ptr %arg7) #0 {
25bb:
26  br label %bb8
27
28bb8:                                              ; preds = %bb29, %bb
29  %tmp = phi i64 [ 0, %bb ], [ %tmp30, %bb29 ]
30  br label %bb9
31
32bb9:                                              ; preds = %bb26, %bb8
33  %tmp10 = phi i64 [ 0, %bb8 ], [ %tmp27, %bb26 ]
34  %tmp11 = getelementptr inbounds [1056 x double], ptr %arg5, i64 %tmp, i64 %tmp10
35  %tmp12 = load double, ptr %tmp11, align 8
36  %tmp13 = fmul double %tmp12, %arg4
37  store double %tmp13, ptr %tmp11, align 8
38  br label %Copy_0
39
40Copy_0:                                             ; preds = %Copy_0, %bb9
41  %tmp15 = phi i64 [ 0, %bb9 ], [ %tmp24, %Copy_0 ]
42  %tmp16 = getelementptr inbounds [1024 x double], ptr %arg6, i64 %tmp, i64 %tmp15
43  %tmp17 = load double, ptr %tmp16, align 8
44  %tmp18 = fmul double %tmp17, %arg3
45  %tmp19 = getelementptr inbounds [1056 x double], ptr %arg7, i64 %tmp15, i64 %tmp10
46  %tmp20 = load double, ptr %tmp19, align 8
47  %tmp21 = fmul double %tmp18, %tmp20
48  %tmp22 = load double, ptr %tmp11, align 8
49  %tmp23 = fadd double %tmp22, %tmp21
50  store double %tmp23, ptr %tmp11, align 8
51  %tmp24 = add nuw nsw i64 %tmp15, 1
52  %tmp25 = icmp ne i64 %tmp24, 1024
53  br i1 %tmp25, label %Copy_0, label %bb26
54
55bb26:                                             ; preds = %Copy_0
56  %tmp27 = add nuw nsw i64 %tmp10, 2
57  %tmp28 = icmp ne i64 %tmp27, 1056
58  br i1 %tmp28, label %bb9, label %bb29
59
60bb29:                                             ; preds = %bb26
61  %tmp30 = add nuw nsw i64 %tmp, 1
62  %tmp31 = icmp ne i64 %tmp30, 1056
63  br i1 %tmp31, label %bb8, label %bb32
64
65bb32:                                             ; preds = %bb29
66  ret void
67}
68