xref: /llvm-project/llvm/test/CodeGen/ARM/arm-matmul.ll (revision 7da19051253219d4bee2c50fe13f250201f1f7ec)
1; RUN: llc -mtriple=arm-none-linux-gnu -mattr=+neon,+i8mm -float-abi=hard < %s -o -| FileCheck %s
2
3define <4 x i32> @smmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) {
4entry:
5; CHECK-LABEL: smmla.v4i32.v16i8
6; CHECK:        vsmmla.s8       q0, q1, q2
7  %vmmla1.i = tail call <4 x i32> @llvm.arm.neon.smmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) #3
8  ret <4 x i32> %vmmla1.i
9}
10
11define <4 x i32> @ummla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) {
12entry:
13; CHECK-LABEL: ummla.v4i32.v16i8
14; CHECK:        vummla.u8       q0, q1, q2
15  %vmmla1.i = tail call <4 x i32> @llvm.arm.neon.ummla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) #3
16  ret <4 x i32> %vmmla1.i
17}
18
19define <4 x i32> @usmmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) {
20entry:
21; CHECK-LABEL: usmmla.v4i32.v16i8
22; CHECK:        vusmmla.s8       q0, q1, q2
23  %vusmmla1.i = tail call <4 x i32> @llvm.arm.neon.usmmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b) #3
24  ret <4 x i32> %vusmmla1.i
25}
26
27define <2 x i32> @usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) {
28entry:
29; CHECK-LABEL: usdot.v2i32.v8i8
30; CHECK:        vusdot.s8       d0, d1, d2
31  %vusdot1.i = tail call <2 x i32> @llvm.arm.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) #3
32  ret <2 x i32> %vusdot1.i
33}
34
35define <2 x i32> @usdot_lane.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) {
36entry:
37; CHECK-LABEL: usdot_lane.v2i32.v8i8
38; CHECK:        vusdot.s8       d0, d1, d2[0]
39  %0 = bitcast <8 x i8> %b to <2 x i32>
40  %shuffle = shufflevector <2 x i32> %0, <2 x i32> undef, <2 x i32> zeroinitializer
41  %1 = bitcast <2 x i32> %shuffle to <8 x i8>
42  %vusdot1.i = tail call <2 x i32> @llvm.arm.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %1) #3
43  ret <2 x i32> %vusdot1.i
44}
45
46define <2 x i32> @sudot_lane.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b) {
47entry:
48; CHECK-LABEL: sudot_lane.v2i32.v8i8
49; CHECK:        vsudot.u8       d0, d1, d2[0]
50  %0 = bitcast <8 x i8> %b to <2 x i32>
51  %shuffle = shufflevector <2 x i32> %0, <2 x i32> undef, <2 x i32> zeroinitializer
52  %1 = bitcast <2 x i32> %shuffle to <8 x i8>
53  %vusdot1.i = tail call <2 x i32> @llvm.arm.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %1, <8 x i8> %a) #3
54  ret <2 x i32> %vusdot1.i
55}
56
57define <4 x i32> @usdotq_lane.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <8 x i8> %b) {
58entry:
59; CHECK-LABEL: usdotq_lane.v4i32.v16i8
60; CHECK:        vusdot.s8       q0, q1, d4[0]
61  %0 = bitcast <8 x i8> %b to <2 x i32>
62  %shuffle = shufflevector <2 x i32> %0, <2 x i32> undef, <4 x i32> zeroinitializer
63  %1 = bitcast <4 x i32> %shuffle to <16 x i8>
64  %vusdot1.i = tail call <4 x i32> @llvm.arm.neon.usdot.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %1) #3
65  ret <4 x i32> %vusdot1.i
66}
67
68define <4 x i32> @sudotq_lane.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <8 x i8> %b) {
69entry:
70; CHECK-LABEL: sudotq_lane.v4i32.v16i8
71; CHECK:        vsudot.u8       q0, q1, d4[0]
72  %0 = bitcast <8 x i8> %b to <2 x i32>
73  %shuffle = shufflevector <2 x i32> %0, <2 x i32> undef, <4 x i32> zeroinitializer
74  %1 = bitcast <4 x i32> %shuffle to <16 x i8>
75  %vusdot1.i = tail call <4 x i32> @llvm.arm.neon.usdot.v4i32.v16i8(<4 x i32> %r, <16 x i8> %1, <16 x i8> %a) #3
76  ret <4 x i32> %vusdot1.i
77}
78
79declare <4 x i32> @llvm.arm.neon.smmla.v4i32.v16i8(<4 x i32>, <16 x i8>, <16 x i8>) #2
80declare <4 x i32> @llvm.arm.neon.ummla.v4i32.v16i8(<4 x i32>, <16 x i8>, <16 x i8>) #2
81declare <4 x i32> @llvm.arm.neon.usmmla.v4i32.v16i8(<4 x i32>, <16 x i8>, <16 x i8>) #2
82declare <2 x i32> @llvm.arm.neon.usdot.v2i32.v8i8(<2 x i32>, <8 x i8>, <8 x i8>) #2
83declare <4 x i32> @llvm.arm.neon.usdot.v4i32.v16i8(<4 x i32>, <16 x i8>, <16 x i8>) #2
84