xref: /llvm-project/llvm/test/Transforms/LowerMatrixIntrinsics/dot-product-int.ll (revision f89fe08d770d912bc1e7b9b52c1859a44abea69a)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; REQUIRES: aarch64-registered-target
3; RUN: opt -passes='lower-matrix-intrinsics' -mtriple=arm64-apple-iphoneos -S < %s | FileCheck %s
4
5define <1 x i32> @dotproduct_i32_v8(<8 x i32> %a, <8 x i32> %b) {
6; CHECK-LABEL: @dotproduct_i32_v8(
7; CHECK-NEXT:  entry:
8; CHECK-NEXT:    [[TMP0:%.*]] = mul <8 x i32> [[A:%.*]], [[B:%.*]]
9; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP0]])
10; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <1 x i32> poison, i32 [[TMP1]], i64 0
11; CHECK-NEXT:    ret <1 x i32> [[TMP2]]
12;
13entry:
14  %c = tail call <1 x i32> @llvm.matrix.multiply.v1i32.v8i32.v8i32(<8 x i32> %a, <8 x i32> %b, i32 1, i32 8, i32 1)
15  ret <1 x i32> %c
16}
17
18declare <1 x i32> @llvm.matrix.multiply.v1i32.v8i32.v8i32(<8 x i32>, <8 x i32>, i32, i32, i32)
19
20declare void @use(<1 x i32>)
21
22define <1 x i32> @dotproduct_i32_v8_result_used_by_inst(<8 x i32> %a, <8 x i32> %b) {
23; CHECK-LABEL: @dotproduct_i32_v8_result_used_by_inst(
24; CHECK-NEXT:  entry:
25; CHECK-NEXT:    [[TMP0:%.*]] = mul <8 x i32> [[A:%.*]], [[B:%.*]]
26; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP0]])
27; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <1 x i32> poison, i32 [[TMP1]], i64 0
28; CHECK-NEXT:    call void @use(<1 x i32> [[TMP2]])
29; CHECK-NEXT:    ret <1 x i32> [[TMP2]]
30;
31entry:
32  %c = tail call <1 x i32> @llvm.matrix.multiply.v1i32.v8i32.v8i32(<8 x i32> %a, <8 x i32> %b, i32 1, i32 8, i32 1)
33  call void @use(<1 x i32> %c)
34  ret <1 x i32> %c
35}
36
37
38define <1 x i32> @dotproduct_i32_v8_constvector(<8 x i32> %a) {
39; CHECK-LABEL: @dotproduct_i32_v8_constvector(
40; CHECK-NEXT:  entry:
41; CHECK-NEXT:    [[TMP0:%.*]] = mul <8 x i32> [[A:%.*]], <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
42; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP0]])
43; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <1 x i32> poison, i32 [[TMP1]], i64 0
44; CHECK-NEXT:    ret <1 x i32> [[TMP2]]
45;
46entry:
47  %c = tail call <1 x i32> @llvm.matrix.multiply.v1i32.v8i32.v8i32(<8 x i32> %a, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>, i32 1, i32 8, i32 1)
48  ret <1 x i32> %c
49}
50
51define <1 x i32> @add_feeding_dotproduct_i32_v8_1(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c) {
52; CHECK-LABEL: @add_feeding_dotproduct_i32_v8_1(
53; CHECK-NEXT:  entry:
54; CHECK-NEXT:    [[SPLIT:%.*]] = shufflevector <8 x i32> [[A:%.*]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
55; CHECK-NEXT:    [[SPLIT1:%.*]] = shufflevector <8 x i32> [[B:%.*]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
56; CHECK-NEXT:    [[TMP0:%.*]] = add <8 x i32> [[SPLIT]], [[SPLIT1]]
57; CHECK-NEXT:    [[TMP1:%.*]] = mul <8 x i32> [[TMP0]], [[C:%.*]]
58; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP1]])
59; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <1 x i32> poison, i32 [[TMP2]], i64 0
60; CHECK-NEXT:    ret <1 x i32> [[TMP3]]
61;
62entry:
63  %add = add <8 x i32> %a, %b
64  %res = tail call <1 x i32> @llvm.matrix.multiply.v1i32.v8i32.v8i32(<8 x i32> %add, <8 x i32> %c, i32 1, i32 8, i32 1)
65  ret <1 x i32> %res
66}
67
68define <1 x i32> @add_feeding_dotproduct_i32_v8_2(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c) {
69; CHECK-LABEL: @add_feeding_dotproduct_i32_v8_2(
70; CHECK-NEXT:  entry:
71; CHECK-NEXT:    [[SPLIT:%.*]] = shufflevector <8 x i32> [[A:%.*]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
72; CHECK-NEXT:    [[SPLIT1:%.*]] = shufflevector <8 x i32> [[B:%.*]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
73; CHECK-NEXT:    [[TMP0:%.*]] = add <8 x i32> [[SPLIT]], [[SPLIT1]]
74; CHECK-NEXT:    [[TMP1:%.*]] = mul <8 x i32> [[C:%.*]], [[TMP0]]
75; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP1]])
76; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <1 x i32> poison, i32 [[TMP2]], i64 0
77; CHECK-NEXT:    ret <1 x i32> [[TMP3]]
78;
79entry:
80  %add = add <8 x i32> %a, %b
81  %res = tail call <1 x i32> @llvm.matrix.multiply.v1i32.v8i32.v8i32(<8 x i32> %c, <8 x i32> %add, i32 1, i32 8, i32 1)
82  ret <1 x i32> %res
83}
84
85define <1 x i32> @sub_feeding_dotproduct_i32_v8_1(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c) {
86; CHECK-LABEL: @sub_feeding_dotproduct_i32_v8_1(
87; CHECK-NEXT:  entry:
88; CHECK-NEXT:    [[SPLIT:%.*]] = shufflevector <8 x i32> [[A:%.*]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
89; CHECK-NEXT:    [[SPLIT1:%.*]] = shufflevector <8 x i32> [[B:%.*]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
90; CHECK-NEXT:    [[TMP0:%.*]] = sub <8 x i32> [[SPLIT]], [[SPLIT1]]
91; CHECK-NEXT:    [[TMP1:%.*]] = mul <8 x i32> [[TMP0]], [[C:%.*]]
92; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP1]])
93; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <1 x i32> poison, i32 [[TMP2]], i64 0
94; CHECK-NEXT:    ret <1 x i32> [[TMP3]]
95;
96entry:
97  %sub = sub <8 x i32> %a, %b
98  %res = tail call <1 x i32> @llvm.matrix.multiply.v1i32.v8i32.v8i32(<8 x i32> %sub, <8 x i32> %c, i32 1, i32 8, i32 1)
99  ret <1 x i32> %res
100}
101
102define <1 x i32> @sub_feeding_dotproduct_i32_v8_2(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c) {
103; CHECK-LABEL: @sub_feeding_dotproduct_i32_v8_2(
104; CHECK-NEXT:  entry:
105; CHECK-NEXT:    [[SPLIT:%.*]] = shufflevector <8 x i32> [[A:%.*]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
106; CHECK-NEXT:    [[SPLIT1:%.*]] = shufflevector <8 x i32> [[B:%.*]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
107; CHECK-NEXT:    [[TMP0:%.*]] = sub <8 x i32> [[SPLIT]], [[SPLIT1]]
108; CHECK-NEXT:    [[TMP1:%.*]] = mul <8 x i32> [[C:%.*]], [[TMP0]]
109; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP1]])
110; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <1 x i32> poison, i32 [[TMP2]], i64 0
111; CHECK-NEXT:    ret <1 x i32> [[TMP3]]
112;
113entry:
114  %sub = sub <8 x i32> %a, %b
115  %res = tail call <1 x i32> @llvm.matrix.multiply.v1i32.v8i32.v8i32(<8 x i32> %c, <8 x i32> %sub, i32 1, i32 8, i32 1)
116  ret <1 x i32> %res
117}
118
119define <1 x i32> @add_chain_feeding_dotproduct_i32_v8_1(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
120; CHECK-LABEL: @add_chain_feeding_dotproduct_i32_v8_1(
121; CHECK-NEXT:  entry:
122; CHECK-NEXT:    [[SPLIT:%.*]] = shufflevector <8 x i32> [[A:%.*]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
123; CHECK-NEXT:    [[SPLIT1:%.*]] = shufflevector <8 x i32> [[B:%.*]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
124; CHECK-NEXT:    [[TMP0:%.*]] = add <8 x i32> [[SPLIT]], [[SPLIT1]]
125; CHECK-NEXT:    [[SPLIT2:%.*]] = shufflevector <8 x i32> [[C:%.*]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
126; CHECK-NEXT:    [[TMP1:%.*]] = add <8 x i32> [[TMP0]], [[SPLIT2]]
127; CHECK-NEXT:    [[TMP2:%.*]] = mul <8 x i32> [[TMP1]], [[D:%.*]]
128; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]])
129; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <1 x i32> poison, i32 [[TMP3]], i64 0
130; CHECK-NEXT:    ret <1 x i32> [[TMP4]]
131;
132entry:
133  %add.1 = add <8 x i32> %a, %b
134  %add.2 = add <8 x i32> %add.1, %c
135  %res = tail call <1 x i32> @llvm.matrix.multiply.v1i32.v8i32.v8i32(<8 x i32> %add.2, <8 x i32> %d, i32 1, i32 8, i32 1)
136  ret <1 x i32> %res
137}
138
139define <1 x i32> @add_chain_feeding_dotproduct_i32_v8_2(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
140; CHECK-LABEL: @add_chain_feeding_dotproduct_i32_v8_2(
141; CHECK-NEXT:  entry:
142; CHECK-NEXT:    [[SPLIT:%.*]] = shufflevector <8 x i32> [[A:%.*]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
143; CHECK-NEXT:    [[SPLIT1:%.*]] = shufflevector <8 x i32> [[B:%.*]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
144; CHECK-NEXT:    [[TMP0:%.*]] = add <8 x i32> [[SPLIT]], [[SPLIT1]]
145; CHECK-NEXT:    [[SPLIT2:%.*]] = shufflevector <8 x i32> [[C:%.*]], <8 x i32> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
146; CHECK-NEXT:    [[TMP1:%.*]] = add <8 x i32> [[TMP0]], [[SPLIT2]]
147; CHECK-NEXT:    [[TMP2:%.*]] = mul <8 x i32> [[D:%.*]], [[TMP1]]
148; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]])
149; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <1 x i32> poison, i32 [[TMP3]], i64 0
150; CHECK-NEXT:    ret <1 x i32> [[TMP4]]
151;
152entry:
153  %add.1 = add <8 x i32> %a, %b
154  %add.2 = add <8 x i32> %add.1, %c
155  %res = tail call <1 x i32> @llvm.matrix.multiply.v1i32.v8i32.v8i32(<8 x i32> %d, <8 x i32> %add.2, i32 1, i32 8, i32 1)
156  ret <1 x i32> %res
157}
158
159define <1 x i64> @dotproduct_i64_v8(<8 x i64> %a, <8 x i64> %b) {
160; CHECK-LABEL: @dotproduct_i64_v8(
161; CHECK-NEXT:  entry:
162; CHECK-NEXT:    [[SPLIT:%.*]] = shufflevector <8 x i64> [[A:%.*]], <8 x i64> poison, <1 x i32> zeroinitializer
163; CHECK-NEXT:    [[SPLIT1:%.*]] = shufflevector <8 x i64> [[A]], <8 x i64> poison, <1 x i32> <i32 1>
164; CHECK-NEXT:    [[SPLIT2:%.*]] = shufflevector <8 x i64> [[A]], <8 x i64> poison, <1 x i32> <i32 2>
165; CHECK-NEXT:    [[SPLIT3:%.*]] = shufflevector <8 x i64> [[A]], <8 x i64> poison, <1 x i32> <i32 3>
166; CHECK-NEXT:    [[SPLIT4:%.*]] = shufflevector <8 x i64> [[A]], <8 x i64> poison, <1 x i32> <i32 4>
167; CHECK-NEXT:    [[SPLIT5:%.*]] = shufflevector <8 x i64> [[A]], <8 x i64> poison, <1 x i32> <i32 5>
168; CHECK-NEXT:    [[SPLIT6:%.*]] = shufflevector <8 x i64> [[A]], <8 x i64> poison, <1 x i32> <i32 6>
169; CHECK-NEXT:    [[SPLIT7:%.*]] = shufflevector <8 x i64> [[A]], <8 x i64> poison, <1 x i32> <i32 7>
170; CHECK-NEXT:    [[SPLIT8:%.*]] = shufflevector <8 x i64> [[B:%.*]], <8 x i64> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
171; CHECK-NEXT:    [[BLOCK:%.*]] = shufflevector <1 x i64> [[SPLIT]], <1 x i64> poison, <1 x i32> zeroinitializer
172; CHECK-NEXT:    [[TMP0:%.*]] = extractelement <8 x i64> [[SPLIT8]], i64 0
173; CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <1 x i64> poison, i64 [[TMP0]], i64 0
174; CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <1 x i64> [[SPLAT_SPLATINSERT]], <1 x i64> poison, <1 x i32> zeroinitializer
175; CHECK-NEXT:    [[TMP1:%.*]] = mul <1 x i64> [[BLOCK]], [[SPLAT_SPLAT]]
176; CHECK-NEXT:    [[BLOCK9:%.*]] = shufflevector <1 x i64> [[SPLIT1]], <1 x i64> poison, <1 x i32> zeroinitializer
177; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <8 x i64> [[SPLIT8]], i64 1
178; CHECK-NEXT:    [[SPLAT_SPLATINSERT10:%.*]] = insertelement <1 x i64> poison, i64 [[TMP2]], i64 0
179; CHECK-NEXT:    [[SPLAT_SPLAT11:%.*]] = shufflevector <1 x i64> [[SPLAT_SPLATINSERT10]], <1 x i64> poison, <1 x i32> zeroinitializer
180; CHECK-NEXT:    [[TMP3:%.*]] = mul <1 x i64> [[BLOCK9]], [[SPLAT_SPLAT11]]
181; CHECK-NEXT:    [[TMP4:%.*]] = add <1 x i64> [[TMP1]], [[TMP3]]
182; CHECK-NEXT:    [[BLOCK12:%.*]] = shufflevector <1 x i64> [[SPLIT2]], <1 x i64> poison, <1 x i32> zeroinitializer
183; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <8 x i64> [[SPLIT8]], i64 2
184; CHECK-NEXT:    [[SPLAT_SPLATINSERT13:%.*]] = insertelement <1 x i64> poison, i64 [[TMP5]], i64 0
185; CHECK-NEXT:    [[SPLAT_SPLAT14:%.*]] = shufflevector <1 x i64> [[SPLAT_SPLATINSERT13]], <1 x i64> poison, <1 x i32> zeroinitializer
186; CHECK-NEXT:    [[TMP6:%.*]] = mul <1 x i64> [[BLOCK12]], [[SPLAT_SPLAT14]]
187; CHECK-NEXT:    [[TMP7:%.*]] = add <1 x i64> [[TMP4]], [[TMP6]]
188; CHECK-NEXT:    [[BLOCK15:%.*]] = shufflevector <1 x i64> [[SPLIT3]], <1 x i64> poison, <1 x i32> zeroinitializer
189; CHECK-NEXT:    [[TMP8:%.*]] = extractelement <8 x i64> [[SPLIT8]], i64 3
190; CHECK-NEXT:    [[SPLAT_SPLATINSERT16:%.*]] = insertelement <1 x i64> poison, i64 [[TMP8]], i64 0
191; CHECK-NEXT:    [[SPLAT_SPLAT17:%.*]] = shufflevector <1 x i64> [[SPLAT_SPLATINSERT16]], <1 x i64> poison, <1 x i32> zeroinitializer
192; CHECK-NEXT:    [[TMP9:%.*]] = mul <1 x i64> [[BLOCK15]], [[SPLAT_SPLAT17]]
193; CHECK-NEXT:    [[TMP10:%.*]] = add <1 x i64> [[TMP7]], [[TMP9]]
194; CHECK-NEXT:    [[BLOCK18:%.*]] = shufflevector <1 x i64> [[SPLIT4]], <1 x i64> poison, <1 x i32> zeroinitializer
195; CHECK-NEXT:    [[TMP11:%.*]] = extractelement <8 x i64> [[SPLIT8]], i64 4
196; CHECK-NEXT:    [[SPLAT_SPLATINSERT19:%.*]] = insertelement <1 x i64> poison, i64 [[TMP11]], i64 0
197; CHECK-NEXT:    [[SPLAT_SPLAT20:%.*]] = shufflevector <1 x i64> [[SPLAT_SPLATINSERT19]], <1 x i64> poison, <1 x i32> zeroinitializer
198; CHECK-NEXT:    [[TMP12:%.*]] = mul <1 x i64> [[BLOCK18]], [[SPLAT_SPLAT20]]
199; CHECK-NEXT:    [[TMP13:%.*]] = add <1 x i64> [[TMP10]], [[TMP12]]
200; CHECK-NEXT:    [[BLOCK21:%.*]] = shufflevector <1 x i64> [[SPLIT5]], <1 x i64> poison, <1 x i32> zeroinitializer
201; CHECK-NEXT:    [[TMP14:%.*]] = extractelement <8 x i64> [[SPLIT8]], i64 5
202; CHECK-NEXT:    [[SPLAT_SPLATINSERT22:%.*]] = insertelement <1 x i64> poison, i64 [[TMP14]], i64 0
203; CHECK-NEXT:    [[SPLAT_SPLAT23:%.*]] = shufflevector <1 x i64> [[SPLAT_SPLATINSERT22]], <1 x i64> poison, <1 x i32> zeroinitializer
204; CHECK-NEXT:    [[TMP15:%.*]] = mul <1 x i64> [[BLOCK21]], [[SPLAT_SPLAT23]]
205; CHECK-NEXT:    [[TMP16:%.*]] = add <1 x i64> [[TMP13]], [[TMP15]]
206; CHECK-NEXT:    [[BLOCK24:%.*]] = shufflevector <1 x i64> [[SPLIT6]], <1 x i64> poison, <1 x i32> zeroinitializer
207; CHECK-NEXT:    [[TMP17:%.*]] = extractelement <8 x i64> [[SPLIT8]], i64 6
208; CHECK-NEXT:    [[SPLAT_SPLATINSERT25:%.*]] = insertelement <1 x i64> poison, i64 [[TMP17]], i64 0
209; CHECK-NEXT:    [[SPLAT_SPLAT26:%.*]] = shufflevector <1 x i64> [[SPLAT_SPLATINSERT25]], <1 x i64> poison, <1 x i32> zeroinitializer
210; CHECK-NEXT:    [[TMP18:%.*]] = mul <1 x i64> [[BLOCK24]], [[SPLAT_SPLAT26]]
211; CHECK-NEXT:    [[TMP19:%.*]] = add <1 x i64> [[TMP16]], [[TMP18]]
212; CHECK-NEXT:    [[BLOCK27:%.*]] = shufflevector <1 x i64> [[SPLIT7]], <1 x i64> poison, <1 x i32> zeroinitializer
213; CHECK-NEXT:    [[TMP20:%.*]] = extractelement <8 x i64> [[SPLIT8]], i64 7
214; CHECK-NEXT:    [[SPLAT_SPLATINSERT28:%.*]] = insertelement <1 x i64> poison, i64 [[TMP20]], i64 0
215; CHECK-NEXT:    [[SPLAT_SPLAT29:%.*]] = shufflevector <1 x i64> [[SPLAT_SPLATINSERT28]], <1 x i64> poison, <1 x i32> zeroinitializer
216; CHECK-NEXT:    [[TMP21:%.*]] = mul <1 x i64> [[BLOCK27]], [[SPLAT_SPLAT29]]
217; CHECK-NEXT:    [[TMP22:%.*]] = add <1 x i64> [[TMP19]], [[TMP21]]
218; CHECK-NEXT:    [[TMP23:%.*]] = shufflevector <1 x i64> [[TMP22]], <1 x i64> poison, <1 x i32> zeroinitializer
219; CHECK-NEXT:    [[TMP24:%.*]] = shufflevector <1 x i64> poison, <1 x i64> [[TMP23]], <1 x i32> <i32 1>
220; CHECK-NEXT:    ret <1 x i64> [[TMP24]]
221;
222entry:
223  %c = tail call <1 x i64> @llvm.matrix.multiply.v1i64.v8i64.v8i64(<8 x i64> %a, <8 x i64> %b, i32 1, i32 8, i32 1)
224  ret <1 x i64> %c
225}
226
227declare <1 x i64> @llvm.matrix.multiply.v1i64.v8i64.v8i64(<8 x i64>, <8 x i64>, i32, i32, i32)
228
229define <1 x i32> @intrinsic_column_major_load_dot_product_i32_v8(ptr %lhs_address, ptr %rhs_address) {
230; CHECK-LABEL: @intrinsic_column_major_load_dot_product_i32_v8(
231; CHECK-NEXT:  entry:
232; CHECK-NEXT:    [[COL_LOAD:%.*]] = load <8 x i32>, ptr [[RHS_ADDRESS:%.*]], align 4
233; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i32>, ptr [[LHS_ADDRESS:%.*]], align 32
234; CHECK-NEXT:    [[TMP1:%.*]] = mul <8 x i32> [[TMP0]], [[COL_LOAD]]
235; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP1]])
236; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <1 x i32> poison, i32 [[TMP2]], i64 0
237; CHECK-NEXT:    ret <1 x i32> [[TMP3]]
238;
239entry:
240  %lhs = tail call <8 x i32> @llvm.matrix.column.major.load.v8i32.i64(ptr nonnull align 4 %lhs_address, i64 1, i1 false, i32 1, i32 8)
241  %rhs = tail call <8 x i32> @llvm.matrix.column.major.load.v8i32.i64(ptr nonnull align 4 %rhs_address, i64 8, i1 false, i32 8, i32 1)
242  %result = tail call <1 x i32> @llvm.matrix.multiply.v1i32.v8i32.v8i32(<8 x i32> %lhs, <8 x i32> %rhs, i32 1, i32 8, i32 1)
243  ret <1 x i32> %result
244}
245
246declare <8 x i32> @llvm.matrix.column.major.load.v8i32.i64(ptr nonnull align 4, i64, i1, i32, i32)
247
248; This tests for a case where stride > columns in the load. Does not load all elements in the vector, so we
249; shouldn't use special lowering.
250define <1 x i32> @intrinsic_column_major_load_dot_product_i32_stride_too_large_1(ptr %lhs_address, ptr %rhs_address) {
251; CHECK-LABEL: @intrinsic_column_major_load_dot_product_i32_stride_too_large_1(
252; CHECK-NEXT:  entry:
253; CHECK-NEXT:    [[COL_LOAD:%.*]] = load <1 x i32>, ptr [[LHS_ADDRESS:%.*]], align 4
254; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr i32, ptr [[LHS_ADDRESS]], i64 4
255; CHECK-NEXT:    [[COL_LOAD1:%.*]] = load <1 x i32>, ptr [[VEC_GEP]], align 4
256; CHECK-NEXT:    [[VEC_GEP2:%.*]] = getelementptr i32, ptr [[LHS_ADDRESS]], i64 8
257; CHECK-NEXT:    [[COL_LOAD3:%.*]] = load <1 x i32>, ptr [[VEC_GEP2]], align 4
258; CHECK-NEXT:    [[VEC_GEP4:%.*]] = getelementptr i32, ptr [[LHS_ADDRESS]], i64 12
259; CHECK-NEXT:    [[COL_LOAD5:%.*]] = load <1 x i32>, ptr [[VEC_GEP4]], align 4
260; CHECK-NEXT:    [[TMP0:%.*]] = shufflevector <1 x i32> [[COL_LOAD]], <1 x i32> [[COL_LOAD1]], <2 x i32> <i32 0, i32 1>
261; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <1 x i32> [[COL_LOAD3]], <1 x i32> [[COL_LOAD5]], <2 x i32> <i32 0, i32 1>
262; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <2 x i32> [[TMP0]], <2 x i32> [[TMP1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
263; CHECK-NEXT:    [[COL_LOAD6:%.*]] = load <4 x i32>, ptr [[RHS_ADDRESS:%.*]], align 4
264; CHECK-NEXT:    [[TMP3:%.*]] = mul <4 x i32> [[TMP2]], [[COL_LOAD6]]
265; CHECK-NEXT:    [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP3]])
266; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <1 x i32> poison, i32 [[TMP4]], i64 0
267; CHECK-NEXT:    ret <1 x i32> [[TMP5]]
268;
269entry:
270  %lhs = tail call <4 x i32> @llvm.matrix.column.major.load.v4i32.i64(ptr nonnull align 4 %lhs_address, i64 4, i1 false, i32 1, i32 4)
271  %rhs = tail call <4 x i32> @llvm.matrix.column.major.load.v4i32.i64(ptr nonnull align 4 %rhs_address, i64 4, i1 false, i32 4, i32 1)
272  %result = tail call <1 x i32> @llvm.matrix.multiply.v1i32.v4i32.v4i32(<4 x i32> %lhs, <4 x i32> %rhs, i32 1, i32 4, i32 1)
273  ret <1 x i32> %result
274}
275
276declare <4 x i32> @llvm.matrix.column.major.load.v4i32.i64(ptr nonnull align 4, i64, i1, i32, i32)
277declare <1 x i32> @llvm.matrix.multiply.v1i32.v4i32.v4i32(<4 x i32>, <4 x i32>, i32, i32, i32)
278
279define <1 x i32> @intrinsic_column_major_load_dot_product_i32_stride_too_large_2(ptr %lhs_address, ptr %rhs_address) {
280; CHECK-LABEL: @intrinsic_column_major_load_dot_product_i32_stride_too_large_2(
281; CHECK-NEXT:  entry:
282; CHECK-NEXT:    [[COL_LOAD:%.*]] = load <1 x i32>, ptr [[LHS_ADDRESS:%.*]], align 4
283; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr i32, ptr [[LHS_ADDRESS]], i64 5
284; CHECK-NEXT:    [[COL_LOAD1:%.*]] = load <1 x i32>, ptr [[VEC_GEP]], align 4
285; CHECK-NEXT:    [[VEC_GEP2:%.*]] = getelementptr i32, ptr [[LHS_ADDRESS]], i64 10
286; CHECK-NEXT:    [[COL_LOAD3:%.*]] = load <1 x i32>, ptr [[VEC_GEP2]], align 4
287; CHECK-NEXT:    [[VEC_GEP4:%.*]] = getelementptr i32, ptr [[LHS_ADDRESS]], i64 15
288; CHECK-NEXT:    [[COL_LOAD5:%.*]] = load <1 x i32>, ptr [[VEC_GEP4]], align 4
289; CHECK-NEXT:    [[TMP0:%.*]] = shufflevector <1 x i32> [[COL_LOAD]], <1 x i32> [[COL_LOAD1]], <2 x i32> <i32 0, i32 1>
290; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <1 x i32> [[COL_LOAD3]], <1 x i32> [[COL_LOAD5]], <2 x i32> <i32 0, i32 1>
291; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <2 x i32> [[TMP0]], <2 x i32> [[TMP1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
292; CHECK-NEXT:    [[COL_LOAD6:%.*]] = load <4 x i32>, ptr [[RHS_ADDRESS:%.*]], align 4
293; CHECK-NEXT:    [[TMP3:%.*]] = mul <4 x i32> [[TMP2]], [[COL_LOAD6]]
294; CHECK-NEXT:    [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP3]])
295; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <1 x i32> poison, i32 [[TMP4]], i64 0
296; CHECK-NEXT:    ret <1 x i32> [[TMP5]]
297;
298entry:
299  %lhs = tail call <4 x i32> @llvm.matrix.column.major.load.v4i32.i64(ptr nonnull align 4 %lhs_address, i64 5, i1 false, i32 1, i32 4)
300  %rhs = tail call <4 x i32> @llvm.matrix.column.major.load.v4i32.i64(ptr nonnull align 4 %rhs_address, i64 4, i1 false, i32 4, i32 1)
301  %result = tail call <1 x i32> @llvm.matrix.multiply.v1i32.v4i32.v4i32(<4 x i32> %lhs, <4 x i32> %rhs, i32 1, i32 4, i32 1)
302  ret <1 x i32> %result
303}
304
305
306define <1 x i16> @LoadInst_dot_product_i16_v6(ptr %lhs_address, ptr %rhs_address) {
307; CHECK-LABEL: @LoadInst_dot_product_i16_v6(
308; CHECK-NEXT:  entry:
309; CHECK-NEXT:    [[LHS:%.*]] = load <6 x i16>, ptr [[LHS_ADDRESS:%.*]], align 16
310; CHECK-NEXT:    [[COL_LOAD:%.*]] = load <6 x i16>, ptr [[RHS_ADDRESS:%.*]], align 16
311; CHECK-NEXT:    [[TMP0:%.*]] = mul <6 x i16> [[LHS]], [[COL_LOAD]]
312; CHECK-NEXT:    [[TMP1:%.*]] = call i16 @llvm.vector.reduce.add.v6i16(<6 x i16> [[TMP0]])
313; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <1 x i16> poison, i16 [[TMP1]], i64 0
314; CHECK-NEXT:    ret <1 x i16> [[TMP2]]
315;
316entry:
317  %lhs = load <6 x i16>, ptr %lhs_address
318  %rhs = load <6 x i16>, ptr %rhs_address
319  %result = tail call <1 x i16> @llvm.matrix.multiply.v1i16.v6i16.v6i16(<6 x i16> %lhs, <6 x i16> %rhs, i32 1, i32 6, i32 1)
320  ret <1 x i16> %result
321}
322
323declare <1 x i16> @llvm.matrix.multiply.v1i16.v6i16.v6i16(<6 x i16>, <6 x i16>, i32, i32, i32)
324
325declare <4 x i32> @llvm.matrix.multiply.v4i32.v4i32.v4i32(<4 x i32>, <4 x i32>, i32 immarg, i32 immarg, i32 immarg)
326
327define <1 x i32> @test_builtin_column_major_variable_stride(ptr %src, <4 x i32> %a, i64 %stride) {
328; CHECK-LABEL: @test_builtin_column_major_variable_stride(
329; CHECK-NEXT:    [[VEC_START:%.*]] = mul i64 0, [[STRIDE:%.*]]
330; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr i32, ptr [[SRC:%.*]], i64 [[VEC_START]]
331; CHECK-NEXT:    [[COL_LOAD:%.*]] = load <1 x i32>, ptr [[VEC_GEP]], align 4
332; CHECK-NEXT:    [[VEC_START1:%.*]] = mul i64 1, [[STRIDE]]
333; CHECK-NEXT:    [[VEC_GEP2:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[VEC_START1]]
334; CHECK-NEXT:    [[COL_LOAD3:%.*]] = load <1 x i32>, ptr [[VEC_GEP2]], align 4
335; CHECK-NEXT:    [[VEC_START4:%.*]] = mul i64 2, [[STRIDE]]
336; CHECK-NEXT:    [[VEC_GEP5:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[VEC_START4]]
337; CHECK-NEXT:    [[COL_LOAD6:%.*]] = load <1 x i32>, ptr [[VEC_GEP5]], align 4
338; CHECK-NEXT:    [[VEC_START7:%.*]] = mul i64 3, [[STRIDE]]
339; CHECK-NEXT:    [[VEC_GEP8:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[VEC_START7]]
340; CHECK-NEXT:    [[COL_LOAD9:%.*]] = load <1 x i32>, ptr [[VEC_GEP8]], align 4
341; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <1 x i32> [[COL_LOAD]], <1 x i32> [[COL_LOAD3]], <2 x i32> <i32 0, i32 1>
342; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <1 x i32> [[COL_LOAD6]], <1 x i32> [[COL_LOAD9]], <2 x i32> <i32 0, i32 1>
343; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> [[TMP2]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
344; CHECK-NEXT:    [[TMP4:%.*]] = mul <4 x i32> [[TMP3]], [[A:%.*]]
345; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]])
346; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <1 x i32> poison, i32 [[TMP5]], i64 0
347; CHECK-NEXT:    ret <1 x i32> [[TMP6]]
348;
349  %l = call <4 x i32> @llvm.matrix.column.major.load.v4i32.i64(ptr %src, i64 %stride, i1 false, i32 1, i32 4)
350  %r = call <1 x i32> @llvm.matrix.multiply.v1i32.v4i32.v4i32(<4 x i32> %l, <4 x i32> %a, i32 1, i32 4, i32 1)
351  ret <1 x i32> %r
352}
353