xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll (revision d8d131dfa99762ccdd2116661980b7d0493cd7b5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
4
5define <vscale x 1 x i32> @vmulhu_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
6; CHECK-LABEL: vmulhu_vv_nxv1i32:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
9; CHECK-NEXT:    vmulhu.vv v8, v9, v8
10; CHECK-NEXT:    ret
11  %vc = zext <vscale x 1 x i32> %vb to <vscale x 1 x i64>
12  %vd = zext <vscale x 1 x i32> %va to <vscale x 1 x i64>
13  %ve = mul <vscale x 1 x i64> %vc, %vd
14  %vf = lshr <vscale x 1 x i64> %ve, splat (i64 32)
15  %vg = trunc <vscale x 1 x i64> %vf to <vscale x 1 x i32>
16  ret <vscale x 1 x i32> %vg
17}
18
19define <vscale x 1 x i32> @vmulhu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %x) {
20; CHECK-LABEL: vmulhu_vx_nxv1i32:
21; CHECK:       # %bb.0:
22; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
23; CHECK-NEXT:    vmulhu.vx v8, v8, a0
24; CHECK-NEXT:    ret
25  %head1 = insertelement <vscale x 1 x i32> poison, i32 %x, i32 0
26  %splat1 = shufflevector <vscale x 1 x i32> %head1, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
27  %vb = zext <vscale x 1 x i32> %splat1 to <vscale x 1 x i64>
28  %vc = zext <vscale x 1 x i32> %va to <vscale x 1 x i64>
29  %vd = mul <vscale x 1 x i64> %vb, %vc
30  %ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
31  %vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
32  ret <vscale x 1 x i32> %vf
33}
34
35define <vscale x 1 x i32> @vmulhu_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
36; CHECK-LABEL: vmulhu_vi_nxv1i32_0:
37; CHECK:       # %bb.0:
38; CHECK-NEXT:    li a0, -7
39; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
40; CHECK-NEXT:    vmulhu.vx v8, v8, a0
41; CHECK-NEXT:    ret
42  %vb = zext <vscale x 1 x i32> splat (i32 -7) to <vscale x 1 x i64>
43  %vc = zext <vscale x 1 x i32> %va to <vscale x 1 x i64>
44  %vd = mul <vscale x 1 x i64> %vb, %vc
45  %ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
46  %vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
47  ret <vscale x 1 x i32> %vf
48}
49
50define <vscale x 1 x i32> @vmulhu_vi_nxv1i32_1(<vscale x 1 x i32> %va) {
51; RV32-LABEL: vmulhu_vi_nxv1i32_1:
52; RV32:       # %bb.0:
53; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
54; RV32-NEXT:    vsrl.vi v8, v8, 28
55; RV32-NEXT:    ret
56;
57; RV64-LABEL: vmulhu_vi_nxv1i32_1:
58; RV64:       # %bb.0:
59; RV64-NEXT:    li a0, 16
60; RV64-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
61; RV64-NEXT:    vmulhu.vx v8, v8, a0
62; RV64-NEXT:    ret
63  %vb = zext <vscale x 1 x i32> splat (i32 16) to <vscale x 1 x i64>
64  %vc = zext <vscale x 1 x i32> %va to <vscale x 1 x i64>
65  %vd = mul <vscale x 1 x i64> %vb, %vc
66  %ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
67  %vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
68  ret <vscale x 1 x i32> %vf
69}
70
71define <vscale x 2 x i32> @vmulhu_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
72; CHECK-LABEL: vmulhu_vv_nxv2i32:
73; CHECK:       # %bb.0:
74; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
75; CHECK-NEXT:    vmulhu.vv v8, v9, v8
76; CHECK-NEXT:    ret
77  %vc = zext <vscale x 2 x i32> %vb to <vscale x 2 x i64>
78  %vd = zext <vscale x 2 x i32> %va to <vscale x 2 x i64>
79  %ve = mul <vscale x 2 x i64> %vc, %vd
80  %vf = lshr <vscale x 2 x i64> %ve, splat (i64 32)
81  %vg = trunc <vscale x 2 x i64> %vf to <vscale x 2 x i32>
82  ret <vscale x 2 x i32> %vg
83}
84
85define <vscale x 2 x i32> @vmulhu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %x) {
86; CHECK-LABEL: vmulhu_vx_nxv2i32:
87; CHECK:       # %bb.0:
88; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
89; CHECK-NEXT:    vmulhu.vx v8, v8, a0
90; CHECK-NEXT:    ret
91  %head1 = insertelement <vscale x 2 x i32> poison, i32 %x, i32 0
92  %splat1 = shufflevector <vscale x 2 x i32> %head1, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
93  %vb = zext <vscale x 2 x i32> %splat1 to <vscale x 2 x i64>
94  %vc = zext <vscale x 2 x i32> %va to <vscale x 2 x i64>
95  %vd = mul <vscale x 2 x i64> %vb, %vc
96  %ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
97  %vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
98  ret <vscale x 2 x i32> %vf
99}
100
101define <vscale x 2 x i32> @vmulhu_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
102; CHECK-LABEL: vmulhu_vi_nxv2i32_0:
103; CHECK:       # %bb.0:
104; CHECK-NEXT:    li a0, -7
105; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
106; CHECK-NEXT:    vmulhu.vx v8, v8, a0
107; CHECK-NEXT:    ret
108  %vb = zext <vscale x 2 x i32> splat (i32 -7) to <vscale x 2 x i64>
109  %vc = zext <vscale x 2 x i32> %va to <vscale x 2 x i64>
110  %vd = mul <vscale x 2 x i64> %vb, %vc
111  %ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
112  %vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
113  ret <vscale x 2 x i32> %vf
114}
115
116define <vscale x 2 x i32> @vmulhu_vi_nxv2i32_1(<vscale x 2 x i32> %va) {
117; RV32-LABEL: vmulhu_vi_nxv2i32_1:
118; RV32:       # %bb.0:
119; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
120; RV32-NEXT:    vsrl.vi v8, v8, 28
121; RV32-NEXT:    ret
122;
123; RV64-LABEL: vmulhu_vi_nxv2i32_1:
124; RV64:       # %bb.0:
125; RV64-NEXT:    li a0, 16
126; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
127; RV64-NEXT:    vmulhu.vx v8, v8, a0
128; RV64-NEXT:    ret
129  %vb = zext <vscale x 2 x i32> splat (i32 16) to <vscale x 2 x i64>
130  %vc = zext <vscale x 2 x i32> %va to <vscale x 2 x i64>
131  %vd = mul <vscale x 2 x i64> %vb, %vc
132  %ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
133  %vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
134  ret <vscale x 2 x i32> %vf
135}
136
137define <vscale x 4 x i32> @vmulhu_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
138; CHECK-LABEL: vmulhu_vv_nxv4i32:
139; CHECK:       # %bb.0:
140; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
141; CHECK-NEXT:    vmulhu.vv v8, v10, v8
142; CHECK-NEXT:    ret
143  %vc = zext <vscale x 4 x i32> %vb to <vscale x 4 x i64>
144  %vd = zext <vscale x 4 x i32> %va to <vscale x 4 x i64>
145  %ve = mul <vscale x 4 x i64> %vc, %vd
146  %vf = lshr <vscale x 4 x i64> %ve, splat (i64 32)
147  %vg = trunc <vscale x 4 x i64> %vf to <vscale x 4 x i32>
148  ret <vscale x 4 x i32> %vg
149}
150
151define <vscale x 4 x i32> @vmulhu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %x) {
152; CHECK-LABEL: vmulhu_vx_nxv4i32:
153; CHECK:       # %bb.0:
154; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
155; CHECK-NEXT:    vmulhu.vx v8, v8, a0
156; CHECK-NEXT:    ret
157  %head1 = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
158  %splat1 = shufflevector <vscale x 4 x i32> %head1, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
159  %vb = zext <vscale x 4 x i32> %splat1 to <vscale x 4 x i64>
160  %vc = zext <vscale x 4 x i32> %va to <vscale x 4 x i64>
161  %vd = mul <vscale x 4 x i64> %vb, %vc
162  %ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
163  %vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
164  ret <vscale x 4 x i32> %vf
165}
166
167define <vscale x 4 x i32> @vmulhu_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
168; CHECK-LABEL: vmulhu_vi_nxv4i32_0:
169; CHECK:       # %bb.0:
170; CHECK-NEXT:    li a0, -7
171; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
172; CHECK-NEXT:    vmulhu.vx v8, v8, a0
173; CHECK-NEXT:    ret
174  %vb = zext <vscale x 4 x i32> splat (i32 -7) to <vscale x 4 x i64>
175  %vc = zext <vscale x 4 x i32> %va to <vscale x 4 x i64>
176  %vd = mul <vscale x 4 x i64> %vb, %vc
177  %ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
178  %vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
179  ret <vscale x 4 x i32> %vf
180}
181
182define <vscale x 4 x i32> @vmulhu_vi_nxv4i32_1(<vscale x 4 x i32> %va) {
183; RV32-LABEL: vmulhu_vi_nxv4i32_1:
184; RV32:       # %bb.0:
185; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
186; RV32-NEXT:    vsrl.vi v8, v8, 28
187; RV32-NEXT:    ret
188;
189; RV64-LABEL: vmulhu_vi_nxv4i32_1:
190; RV64:       # %bb.0:
191; RV64-NEXT:    li a0, 16
192; RV64-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
193; RV64-NEXT:    vmulhu.vx v8, v8, a0
194; RV64-NEXT:    ret
195  %vb = zext <vscale x 4 x i32> splat (i32 16) to <vscale x 4 x i64>
196  %vc = zext <vscale x 4 x i32> %va to <vscale x 4 x i64>
197  %vd = mul <vscale x 4 x i64> %vb, %vc
198  %ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
199  %vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
200  ret <vscale x 4 x i32> %vf
201}
202
203define <vscale x 8 x i32> @vmulhu_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
204; CHECK-LABEL: vmulhu_vv_nxv8i32:
205; CHECK:       # %bb.0:
206; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
207; CHECK-NEXT:    vmulhu.vv v8, v12, v8
208; CHECK-NEXT:    ret
209  %vc = zext <vscale x 8 x i32> %vb to <vscale x 8 x i64>
210  %vd = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
211  %ve = mul <vscale x 8 x i64> %vc, %vd
212  %vf = lshr <vscale x 8 x i64> %ve, splat (i64 32)
213  %vg = trunc <vscale x 8 x i64> %vf to <vscale x 8 x i32>
214  ret <vscale x 8 x i32> %vg
215}
216
217define <vscale x 8 x i32> @vmulhu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %x) {
218; CHECK-LABEL: vmulhu_vx_nxv8i32:
219; CHECK:       # %bb.0:
220; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
221; CHECK-NEXT:    vmulhu.vx v8, v8, a0
222; CHECK-NEXT:    ret
223  %head1 = insertelement <vscale x 8 x i32> poison, i32 %x, i32 0
224  %splat1 = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
225  %vb = zext <vscale x 8 x i32> %splat1 to <vscale x 8 x i64>
226  %vc = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
227  %vd = mul <vscale x 8 x i64> %vb, %vc
228  %ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
229  %vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
230  ret <vscale x 8 x i32> %vf
231}
232
233define <vscale x 8 x i32> @vmulhu_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
234; CHECK-LABEL: vmulhu_vi_nxv8i32_0:
235; CHECK:       # %bb.0:
236; CHECK-NEXT:    li a0, -7
237; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
238; CHECK-NEXT:    vmulhu.vx v8, v8, a0
239; CHECK-NEXT:    ret
240  %vb = zext <vscale x 8 x i32> splat (i32 -7) to <vscale x 8 x i64>
241  %vc = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
242  %vd = mul <vscale x 8 x i64> %vb, %vc
243  %ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
244  %vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
245  ret <vscale x 8 x i32> %vf
246}
247
248define <vscale x 8 x i32> @vmulhu_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
249; RV32-LABEL: vmulhu_vi_nxv8i32_1:
250; RV32:       # %bb.0:
251; RV32-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
252; RV32-NEXT:    vsrl.vi v8, v8, 28
253; RV32-NEXT:    ret
254;
255; RV64-LABEL: vmulhu_vi_nxv8i32_1:
256; RV64:       # %bb.0:
257; RV64-NEXT:    li a0, 16
258; RV64-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
259; RV64-NEXT:    vmulhu.vx v8, v8, a0
260; RV64-NEXT:    ret
261  %vb = zext <vscale x 8 x i32> splat (i32 16) to <vscale x 8 x i64>
262  %vc = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
263  %vd = mul <vscale x 8 x i64> %vb, %vc
264  %ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
265  %vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
266  ret <vscale x 8 x i32> %vf
267}
268