xref: /llvm-project/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-minimal.ll (revision 1bb784a7489e901fd46ce9b77cdc0ab8840a4f3d)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -passes="lower-matrix-intrinsics<minimal>,instcombine,verify<domtree>" -fuse-matrix-tile-size=2 -matrix-allow-contract -force-fuse-matrix %s -S | FileCheck %s
3
4; Test for the minimal version of the matrix lowering pass, which does not
5; require DT or AA. Make sure no tiling is happening, even though it was
6; requested.
7
8; REQUIRES: aarch64-registered-target
9
10target datalayout = "e-m:o-i64:64-f80:128-n8:8:32:64-S128"
11target triple = "aarch64-apple-ios"
12
13define void @multiply(ptr %A, ptr %B, ptr %C) {
14; CHECK-LABEL: @multiply(
15; CHECK-NEXT:  entry:
16; CHECK-NEXT:    [[COL_LOAD:%.*]] = load <2 x double>, ptr [[A:%.*]], align 8
17; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 16
18; CHECK-NEXT:    [[COL_LOAD1:%.*]] = load <2 x double>, ptr [[VEC_GEP]], align 8
19; CHECK-NEXT:    [[VEC_GEP2:%.*]] = getelementptr i8, ptr [[A]], i64 32
20; CHECK-NEXT:    [[COL_LOAD3:%.*]] = load <2 x double>, ptr [[VEC_GEP2]], align 8
21; CHECK-NEXT:    [[VEC_GEP4:%.*]] = getelementptr i8, ptr [[A]], i64 48
22; CHECK-NEXT:    [[COL_LOAD5:%.*]] = load <2 x double>, ptr [[VEC_GEP4]], align 8
23; CHECK-NEXT:    [[COL_LOAD6:%.*]] = load <4 x double>, ptr [[B:%.*]], align 8
24; CHECK-NEXT:    [[VEC_GEP7:%.*]] = getelementptr i8, ptr [[B]], i64 32
25; CHECK-NEXT:    [[COL_LOAD8:%.*]] = load <4 x double>, ptr [[VEC_GEP7]], align 8
26; CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <4 x double> [[COL_LOAD6]], <4 x double> poison, <2 x i32> zeroinitializer
27; CHECK-NEXT:    [[TMP0:%.*]] = fmul contract <2 x double> [[COL_LOAD]], [[SPLAT_SPLAT]]
28; CHECK-NEXT:    [[SPLAT_SPLAT11:%.*]] = shufflevector <4 x double> [[COL_LOAD6]], <4 x double> poison, <2 x i32> <i32 1, i32 1>
29; CHECK-NEXT:    [[TMP1:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD1]], <2 x double> [[SPLAT_SPLAT11]], <2 x double> [[TMP0]])
30; CHECK-NEXT:    [[SPLAT_SPLAT14:%.*]] = shufflevector <4 x double> [[COL_LOAD6]], <4 x double> poison, <2 x i32> <i32 2, i32 2>
31; CHECK-NEXT:    [[TMP2:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD3]], <2 x double> [[SPLAT_SPLAT14]], <2 x double> [[TMP1]])
32; CHECK-NEXT:    [[SPLAT_SPLAT17:%.*]] = shufflevector <4 x double> [[COL_LOAD6]], <4 x double> poison, <2 x i32> <i32 3, i32 3>
33; CHECK-NEXT:    [[TMP3:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD5]], <2 x double> [[SPLAT_SPLAT17]], <2 x double> [[TMP2]])
34; CHECK-NEXT:    [[SPLAT_SPLAT20:%.*]] = shufflevector <4 x double> [[COL_LOAD8]], <4 x double> poison, <2 x i32> zeroinitializer
35; CHECK-NEXT:    [[TMP4:%.*]] = fmul contract <2 x double> [[COL_LOAD]], [[SPLAT_SPLAT20]]
36; CHECK-NEXT:    [[SPLAT_SPLAT23:%.*]] = shufflevector <4 x double> [[COL_LOAD8]], <4 x double> poison, <2 x i32> <i32 1, i32 1>
37; CHECK-NEXT:    [[TMP5:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD1]], <2 x double> [[SPLAT_SPLAT23]], <2 x double> [[TMP4]])
38; CHECK-NEXT:    [[SPLAT_SPLAT26:%.*]] = shufflevector <4 x double> [[COL_LOAD8]], <4 x double> poison, <2 x i32> <i32 2, i32 2>
39; CHECK-NEXT:    [[TMP6:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD3]], <2 x double> [[SPLAT_SPLAT26]], <2 x double> [[TMP5]])
40; CHECK-NEXT:    [[SPLAT_SPLAT29:%.*]] = shufflevector <4 x double> [[COL_LOAD8]], <4 x double> poison, <2 x i32> <i32 3, i32 3>
41; CHECK-NEXT:    [[TMP7:%.*]] = call contract <2 x double> @llvm.fmuladd.v2f64(<2 x double> [[COL_LOAD5]], <2 x double> [[SPLAT_SPLAT29]], <2 x double> [[TMP6]])
42; CHECK-NEXT:    store <2 x double> [[TMP3]], ptr [[C:%.*]], align 8
43; CHECK-NEXT:    [[VEC_GEP30:%.*]] = getelementptr i8, ptr [[C]], i64 16
44; CHECK-NEXT:    store <2 x double> [[TMP7]], ptr [[VEC_GEP30]], align 8
45; CHECK-NEXT:    ret void
46;
47entry:
48  %a = load <8 x double>, ptr %A, align 8
49  %b = load <8 x double>, ptr %B, align 8
50
51  %c = call <4 x double> @llvm.matrix.multiply(<8 x double> %a, <8 x double> %b, i32 2, i32 4, i32 2)
52
53  store <4 x double> %c, ptr %C, align 8
54  ret void
55}
56
57declare <4 x double> @llvm.matrix.multiply(<8 x double>, <8 x double>, i32, i32, i32)
58