xref: /llvm-project/llvm/test/Transforms/LowerMatrixIntrinsics/dot-product-float.ll (revision 7bc079c85219ad6e954fb6071cd108151203c85e)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; REQUIRES: aarch64-registered-target
3; RUN: opt -passes='lower-matrix-intrinsics' -mtriple=arm64-apple-iphoneos -S < %s | FileCheck %s
4
5define <1 x float> @dotproduct_float_v6(<6 x float> %a, <6 x float> %b) {
6; CHECK-LABEL: @dotproduct_float_v6(
7; CHECK-NEXT:  entry:
8; CHECK-NEXT:    [[TMP0:%.*]] = fmul <6 x float> [[A:%.*]], [[B:%.*]]
9; CHECK-NEXT:    [[TMP1:%.*]] = call fast float @llvm.vector.reduce.fadd.v6f32(float 0.000000e+00, <6 x float> [[TMP0]])
10; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <1 x float> poison, float [[TMP1]], i64 0
11; CHECK-NEXT:    ret <1 x float> [[TMP2]]
12;
13entry:
14  %c = tail call fast <1 x float> @llvm.matrix.multiply.v1f32.v6f32.v6f32(<6 x float> %a, <6 x float> %b, i32 1, i32 6, i32 1)
15  ret <1 x float> %c
16}
17
18declare <1 x float> @llvm.matrix.multiply.v1f32.v6f32.v6f32(<6 x float>, <6 x float>, i32, i32, i32)
19
20define <1 x float> @dotproduct_float_v1(<1 x float> %a, <1 x float> %b) {
21; CHECK-LABEL: @dotproduct_float_v1(
22; CHECK-NEXT:  entry:
23; CHECK-NEXT:    [[TMP0:%.*]] = fmul <1 x float> [[A:%.*]], [[B:%.*]]
24; CHECK-NEXT:    [[TMP1:%.*]] = call fast float @llvm.vector.reduce.fadd.v1f32(float 0.000000e+00, <1 x float> [[TMP0]])
25; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <1 x float> poison, float [[TMP1]], i64 0
26; CHECK-NEXT:    ret <1 x float> [[TMP2]]
27;
28entry:
29  %c = tail call fast <1 x float> @llvm.matrix.multiply.v1f32.v1f32.v1f32(<1 x float> %a, <1 x float> %b, i32 1, i32 1, i32 1)
30  ret <1 x float> %c
31}
32
33declare <1 x float> @llvm.matrix.multiply.v1f32.v1f32.v1f32(<1 x float>, <1 x float>, i32, i32, i32)
34
35define <1 x float> @dotproduct_float_v3(<3 x float> %a, <3 x float> %b) {
36; CHECK-LABEL: @dotproduct_float_v3(
37; CHECK-NEXT:  entry:
38; CHECK-NEXT:    [[TMP0:%.*]] = fmul <3 x float> [[A:%.*]], [[B:%.*]]
39; CHECK-NEXT:    [[TMP1:%.*]] = call fast float @llvm.vector.reduce.fadd.v3f32(float 0.000000e+00, <3 x float> [[TMP0]])
40; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <1 x float> poison, float [[TMP1]], i64 0
41; CHECK-NEXT:    ret <1 x float> [[TMP2]]
42;
43entry:
44  %c = tail call fast <1 x float> @llvm.matrix.multiply.v1f32.v3f32.v3f32(<3 x float> %a, <3 x float> %b, i32 1, i32 3, i32 1)
45  ret <1 x float> %c
46}
47
48declare <1 x float> @llvm.matrix.multiply.v1f32.v3f32.v3f32(<3 x float>, <3 x float>, i32, i32, i32)
49
50define <1 x float> @intrinsic_column_major_load_dot_product_float_v6(ptr %lhs_address, ptr %rhs_address) {
51; CHECK-LABEL: @intrinsic_column_major_load_dot_product_float_v6(
52; CHECK-NEXT:  entry:
53; CHECK-NEXT:    [[COL_LOAD:%.*]] = load <6 x float>, ptr [[RHS_ADDRESS:%.*]], align 4
54; CHECK-NEXT:    [[TMP0:%.*]] = load <6 x float>, ptr [[LHS_ADDRESS:%.*]], align 32
55; CHECK-NEXT:    [[TMP1:%.*]] = fmul <6 x float> [[TMP0]], [[COL_LOAD]]
56; CHECK-NEXT:    [[TMP2:%.*]] = call fast float @llvm.vector.reduce.fadd.v6f32(float 0.000000e+00, <6 x float> [[TMP1]])
57; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <1 x float> poison, float [[TMP2]], i64 0
58; CHECK-NEXT:    ret <1 x float> [[TMP3]]
59;
60entry:
61  %lhs = tail call fast <6 x float> @llvm.matrix.column.major.load.v6f32.i64(ptr nonnull align 4 %lhs_address, i64 1, i1 false, i32 1, i32 6)
62  %rhs = tail call fast <6 x float> @llvm.matrix.column.major.load.v6f32.i64(ptr nonnull align 4 %rhs_address, i64 6, i1 false, i32 6, i32 1)
63  %result = tail call fast <1 x float> @llvm.matrix.multiply.v1f32.v6f32.v6f32(<6 x float> %lhs, <6 x float> %rhs, i32 1, i32 6, i32 1)
64  ret <1 x float> %result
65}
66
67declare <6 x float> @llvm.matrix.column.major.load.v6f32.i64(ptr nonnull align 4, i64, i1, i32, i32)
68
69define <1 x float> @LoadInst_dot_product_float_v7(ptr %lhs_address, ptr %rhs_address) {
70; CHECK-LABEL: @LoadInst_dot_product_float_v7(
71; CHECK-NEXT:  entry:
72; CHECK-NEXT:    [[LHS:%.*]] = load <7 x float>, ptr [[LHS_ADDRESS:%.*]], align 32
73; CHECK-NEXT:    [[COL_LOAD:%.*]] = load <7 x float>, ptr [[RHS_ADDRESS:%.*]], align 32
74; CHECK-NEXT:    [[TMP0:%.*]] = fmul <7 x float> [[LHS]], [[COL_LOAD]]
75; CHECK-NEXT:    [[TMP1:%.*]] = call fast float @llvm.vector.reduce.fadd.v7f32(float 0.000000e+00, <7 x float> [[TMP0]])
76; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <1 x float> poison, float [[TMP1]], i64 0
77; CHECK-NEXT:    ret <1 x float> [[TMP2]]
78;
79entry:
80  %lhs = load <7 x float>, ptr %lhs_address
81  %rhs = load <7 x float>, ptr %rhs_address
82  %c = tail call fast <1 x float> @llvm.matrix.multiply.v1f32.v7f32.v7f32(<7 x float> %lhs, <7 x float> %rhs, i32 1, i32 7, i32 1)
83  ret <1 x float> %c
84}
85
86declare <1 x float> @llvm.matrix.multiply.v1f32.v7f32.v7f32(<7 x float>, <7 x float>, i32, i32, i32)
87
88define <1 x double> @dotproduct_double_v6(<6 x double> %a, <6 x double> %b) {
89; CHECK-LABEL: @dotproduct_double_v6(
90; CHECK-NEXT:  entry:
91; CHECK-NEXT:    [[TMP0:%.*]] = fmul <6 x double> [[A:%.*]], [[B:%.*]]
92; CHECK-NEXT:    [[TMP1:%.*]] = call fast double @llvm.vector.reduce.fadd.v6f64(double 0.000000e+00, <6 x double> [[TMP0]])
93; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <1 x double> poison, double [[TMP1]], i64 0
94; CHECK-NEXT:    ret <1 x double> [[TMP2]]
95;
96entry:
97  %c = tail call fast <1 x double> @llvm.matrix.multiply.v1f64.v6f64.v6f64(<6 x double> %a, <6 x double> %b, i32 1, i32 6, i32 1)
98  ret <1 x double> %c
99}
100
101declare <1 x double> @llvm.matrix.multiply.v1f64.v6f64.v6f64(<6 x double>, <6 x double>, i32, i32, i32)
102
103define <1 x double> @intrinsic_column_major_load_dot_product_double_v6(ptr %lhs_address, ptr %rhs_address) {
104; CHECK-LABEL: @intrinsic_column_major_load_dot_product_double_v6(
105; CHECK-NEXT:  entry:
106; CHECK-NEXT:    [[COL_LOAD:%.*]] = load <6 x double>, ptr [[RHS_ADDRESS:%.*]], align 4
107; CHECK-NEXT:    [[TMP0:%.*]] = load <6 x double>, ptr [[LHS_ADDRESS:%.*]], align 64
108; CHECK-NEXT:    [[TMP1:%.*]] = fmul <6 x double> [[TMP0]], [[COL_LOAD]]
109; CHECK-NEXT:    [[TMP2:%.*]] = call fast double @llvm.vector.reduce.fadd.v6f64(double 0.000000e+00, <6 x double> [[TMP1]])
110; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <1 x double> poison, double [[TMP2]], i64 0
111; CHECK-NEXT:    ret <1 x double> [[TMP3]]
112;
113entry:
114  %lhs = tail call fast <6 x double> @llvm.matrix.column.major.load.v6f64.i64(ptr nonnull align 4 %lhs_address, i64 1, i1 false, i32 1, i32 6)
115  %rhs = tail call fast <6 x double> @llvm.matrix.column.major.load.v6f64.i64(ptr nonnull align 4 %rhs_address, i64 6, i1 false, i32 6, i32 1)
116  %result = tail call fast <1 x double> @llvm.matrix.multiply.v1f64.v6f64.v6f64(<6 x double> %lhs, <6 x double> %rhs, i32 1, i32 6, i32 1)
117  ret <1 x double> %result
118}
119
120declare <6 x double> @llvm.matrix.column.major.load.v6f64.i64(ptr nonnull align 4, i64, i1, i32, i32)
121
122define <1 x double> @LoadInst_dot_product_double_v7(ptr %lhs_address, ptr %rhs_address) {
123; CHECK-LABEL: @LoadInst_dot_product_double_v7(
124; CHECK-NEXT:  entry:
125; CHECK-NEXT:    [[LHS:%.*]] = load <7 x double>, ptr [[LHS_ADDRESS:%.*]], align 64
126; CHECK-NEXT:    [[COL_LOAD:%.*]] = load <7 x double>, ptr [[RHS_ADDRESS:%.*]], align 64
127; CHECK-NEXT:    [[TMP0:%.*]] = fmul <7 x double> [[LHS]], [[COL_LOAD]]
128; CHECK-NEXT:    [[TMP1:%.*]] = call fast double @llvm.vector.reduce.fadd.v7f64(double 0.000000e+00, <7 x double> [[TMP0]])
129; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <1 x double> poison, double [[TMP1]], i64 0
130; CHECK-NEXT:    ret <1 x double> [[TMP2]]
131;
132entry:
133  %lhs = load <7 x double>, ptr %lhs_address
134  %rhs = load <7 x double>, ptr %rhs_address
135  %c = tail call fast <1 x double> @llvm.matrix.multiply.v1f64.v7f64.v7f64(<7 x double> %lhs, <7 x double> %rhs, i32 1, i32 7, i32 1)
136  ret <1 x double> %c
137}
138
139declare <1 x double> @llvm.matrix.multiply.v1f64.v7f64.v7f64(<7 x double>, <7 x double>, i32, i32, i32)
140