xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-int-mul-pred.ll (revision 672f673004663aeb15ece1af4b5b219994924167)
1; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
2
3define <vscale x 16 x i8> @mul_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
4; CHECK-LABEL: mul_i8:
5; CHECK: mul z0.b, p0/m, z0.b, z1.b
6; CHECK-NEXT: ret
7  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1> %pg,
8                                                               <vscale x 16 x i8> %a,
9                                                               <vscale x 16 x i8> %b)
10  ret <vscale x 16 x i8> %out
11}
12
13define <vscale x 8 x i16> @mul_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
14; CHECK-LABEL: mul_i16:
15; CHECK: mul z0.h, p0/m, z0.h, z1.h
16; CHECK-NEXT: ret
17  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x 8 x i1> %pg,
18                                                               <vscale x 8 x i16> %a,
19                                                               <vscale x 8 x i16> %b)
20  ret <vscale x 8 x i16> %out
21}
22
23define <vscale x 4 x i32> @mul_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
24; CHECK-LABEL: mul_i32:
25; CHECK: mul z0.s, p0/m, z0.s, z1.s
26; CHECK-NEXT: ret
27  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x 4 x i1> %pg,
28                                                               <vscale x 4 x i32> %a,
29                                                               <vscale x 4 x i32> %b)
30  ret <vscale x 4 x i32> %out
31}
32
33define <vscale x 2 x i64> @mul_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
34; CHECK-LABEL: mul_i64:
35; CHECK: mul z0.d, p0/m, z0.d, z1.d
36; CHECK-NEXT: ret
37  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x 2 x i1> %pg,
38                                                               <vscale x 2 x i64> %a,
39                                                               <vscale x 2 x i64> %b)
40  ret <vscale x 2 x i64> %out
41}
42
43define <vscale x 16 x i8> @smulh_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
44; CHECK-LABEL: smulh_i8:
45; CHECK: smulh z0.b, p0/m, z0.b, z1.b
46; CHECK-NEXT: ret
47  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smulh.nxv16i8(<vscale x 16 x i1> %pg,
48                                                                <vscale x 16 x i8> %a,
49                                                                <vscale x 16 x i8> %b)
50  ret <vscale x 16 x i8> %out
51}
52
53define <vscale x 8 x i16> @smulh_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
54; CHECK-LABEL: smulh_i16:
55; CHECK: smulh z0.h, p0/m, z0.h, z1.h
56; CHECK-NEXT: ret
57  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smulh.nxv8i16(<vscale x 8 x i1> %pg,
58                                                                <vscale x 8 x i16> %a,
59                                                                <vscale x 8 x i16> %b)
60  ret <vscale x 8 x i16> %out
61}
62
63define <vscale x 4 x i32> @smulh_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
64; CHECK-LABEL: smulh_i32:
65; CHECK: smulh z0.s, p0/m, z0.s, z1.s
66; CHECK-NEXT: ret
67  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smulh.nxv4i32(<vscale x 4 x i1> %pg,
68                                                                <vscale x 4 x i32> %a,
69                                                                <vscale x 4 x i32> %b)
70  ret <vscale x 4 x i32> %out
71}
72
73define <vscale x 2 x i64> @smulh_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
74; CHECK-LABEL: smulh_i64:
75; CHECK: smulh z0.d, p0/m, z0.d, z1.d
76; CHECK-NEXT: ret
77  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smulh.nxv2i64(<vscale x 2 x i1> %pg,
78                                                                <vscale x 2 x i64> %a,
79                                                                <vscale x 2 x i64> %b)
80  ret <vscale x 2 x i64> %out
81}
82
83define <vscale x 16 x i8> @umulh_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
84; CHECK-LABEL: umulh_i8:
85; CHECK: umulh z0.b, p0/m, z0.b, z1.b
86; CHECK-NEXT: ret
87  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umulh.nxv16i8(<vscale x 16 x i1> %pg,
88                                                                <vscale x 16 x i8> %a,
89                                                                <vscale x 16 x i8> %b)
90  ret <vscale x 16 x i8> %out
91}
92
93define <vscale x 8 x i16> @umulh_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
94; CHECK-LABEL: umulh_i16:
95; CHECK: umulh z0.h, p0/m, z0.h, z1.h
96; CHECK-NEXT: ret
97  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umulh.nxv8i16(<vscale x 8 x i1> %pg,
98                                                                <vscale x 8 x i16> %a,
99                                                                <vscale x 8 x i16> %b)
100  ret <vscale x 8 x i16> %out
101}
102
103define <vscale x 4 x i32> @umulh_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
104; CHECK-LABEL: umulh_i32:
105; CHECK: umulh z0.s, p0/m, z0.s, z1.s
106; CHECK-NEXT: ret
107  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umulh.nxv4i32(<vscale x 4 x i1> %pg,
108                                                                 <vscale x 4 x i32> %a,
109                                                                 <vscale x 4 x i32> %b)
110  ret <vscale x 4 x i32> %out
111}
112
113define <vscale x 2 x i64> @umulh_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
114; CHECK-LABEL: umulh_i64:
115; CHECK: umulh z0.d, p0/m, z0.d, z1.d
116; CHECK-NEXT: ret
117  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umulh.nxv2i64(<vscale x 2 x i1> %pg,
118                                                                 <vscale x 2 x i64> %a,
119                                                                 <vscale x 2 x i64> %b)
120  ret <vscale x 2 x i64> %out
121}
122
123declare <vscale x 16 x  i8> @llvm.aarch64.sve.mul.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x  i8>, <vscale x 16 x  i8>)
124declare <vscale x  8 x i16> @llvm.aarch64.sve.mul.nxv8i16(<vscale x  8 x i1>, <vscale x  8 x i16>, <vscale x  8 x i16>)
125declare <vscale x  4 x i32> @llvm.aarch64.sve.mul.nxv4i32(<vscale x  4 x i1>, <vscale x  4 x i32>, <vscale x  4 x i32>)
126declare <vscale x  2 x i64> @llvm.aarch64.sve.mul.nxv2i64(<vscale x  2 x i1>, <vscale x  2 x i64>, <vscale x  2 x i64>)
127declare <vscale x 16 x  i8> @llvm.aarch64.sve.smulh.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x  i8>, <vscale x 16 x  i8>)
128declare <vscale x  8 x i16> @llvm.aarch64.sve.smulh.nxv8i16(<vscale x  8 x i1>, <vscale x  8 x i16>, <vscale x  8 x i16>)
129declare <vscale x  4 x i32> @llvm.aarch64.sve.smulh.nxv4i32(<vscale x  4 x i1>, <vscale x  4 x i32>, <vscale x  4 x i32>)
130declare <vscale x  2 x i64> @llvm.aarch64.sve.smulh.nxv2i64(<vscale x  2 x i1>, <vscale x  2 x i64>, <vscale x  2 x i64>)
131declare <vscale x 16 x  i8> @llvm.aarch64.sve.umulh.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x  i8>, <vscale x 16 x  i8>)
132declare <vscale x  8 x i16> @llvm.aarch64.sve.umulh.nxv8i16(<vscale x  8 x i1>, <vscale x  8 x i16>, <vscale x  8 x i16>)
133declare <vscale x  4 x i32> @llvm.aarch64.sve.umulh.nxv4i32(<vscale x  4 x i1>, <vscale x  4 x i32>, <vscale x  4 x i32>)
134declare <vscale x  2 x i64> @llvm.aarch64.sve.umulh.nxv2i64(<vscale x  2 x i1>, <vscale x  2 x i64>, <vscale x  2 x i64>)
135