xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32NOM
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64NOM
4
5; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32M
6; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64M
7
8define <vscale x 1 x i8> @vmul_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
9; CHECK-LABEL: vmul_vv_nxv1i8:
10; CHECK:       # %bb.0:
11; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
12; CHECK-NEXT:    vmul.vv v8, v8, v9
13; CHECK-NEXT:    ret
14  %vc = mul <vscale x 1 x i8> %va, %vb
15  ret <vscale x 1 x i8> %vc
16}
17
18define <vscale x 1 x i8> @vmul_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
19; CHECK-LABEL: vmul_vx_nxv1i8:
20; CHECK:       # %bb.0:
21; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
22; CHECK-NEXT:    vmul.vx v8, v8, a0
23; CHECK-NEXT:    ret
24  %head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
25  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
26  %vc = mul <vscale x 1 x i8> %va, %splat
27  ret <vscale x 1 x i8> %vc
28}
29
30define <vscale x 1 x i8> @vmul_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
31; CHECK-LABEL: vmul_vi_nxv1i8_0:
32; CHECK:       # %bb.0:
33; CHECK-NEXT:    li a0, -7
34; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
35; CHECK-NEXT:    vmul.vx v8, v8, a0
36; CHECK-NEXT:    ret
37  %vc = mul <vscale x 1 x i8> %va, splat (i8 -7)
38  ret <vscale x 1 x i8> %vc
39}
40
41define <vscale x 2 x i8> @vmul_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
42; CHECK-LABEL: vmul_vv_nxv2i8:
43; CHECK:       # %bb.0:
44; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
45; CHECK-NEXT:    vmul.vv v8, v8, v9
46; CHECK-NEXT:    ret
47  %vc = mul <vscale x 2 x i8> %va, %vb
48  ret <vscale x 2 x i8> %vc
49}
50
51define <vscale x 2 x i8> @vmul_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
52; CHECK-LABEL: vmul_vx_nxv2i8:
53; CHECK:       # %bb.0:
54; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
55; CHECK-NEXT:    vmul.vx v8, v8, a0
56; CHECK-NEXT:    ret
57  %head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
58  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
59  %vc = mul <vscale x 2 x i8> %va, %splat
60  ret <vscale x 2 x i8> %vc
61}
62
63define <vscale x 2 x i8> @vmul_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
64; CHECK-LABEL: vmul_vi_nxv2i8_0:
65; CHECK:       # %bb.0:
66; CHECK-NEXT:    li a0, -7
67; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
68; CHECK-NEXT:    vmul.vx v8, v8, a0
69; CHECK-NEXT:    ret
70  %vc = mul <vscale x 2 x i8> %va, splat (i8 -7)
71  ret <vscale x 2 x i8> %vc
72}
73
74define <vscale x 4 x i8> @vmul_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
75; CHECK-LABEL: vmul_vv_nxv4i8:
76; CHECK:       # %bb.0:
77; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
78; CHECK-NEXT:    vmul.vv v8, v8, v9
79; CHECK-NEXT:    ret
80  %vc = mul <vscale x 4 x i8> %va, %vb
81  ret <vscale x 4 x i8> %vc
82}
83
84define <vscale x 4 x i8> @vmul_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
85; CHECK-LABEL: vmul_vx_nxv4i8:
86; CHECK:       # %bb.0:
87; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
88; CHECK-NEXT:    vmul.vx v8, v8, a0
89; CHECK-NEXT:    ret
90  %head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
91  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
92  %vc = mul <vscale x 4 x i8> %va, %splat
93  ret <vscale x 4 x i8> %vc
94}
95
96define <vscale x 4 x i8> @vmul_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
97; CHECK-LABEL: vmul_vi_nxv4i8_0:
98; CHECK:       # %bb.0:
99; CHECK-NEXT:    li a0, -7
100; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
101; CHECK-NEXT:    vmul.vx v8, v8, a0
102; CHECK-NEXT:    ret
103  %vc = mul <vscale x 4 x i8> %va, splat (i8 -7)
104  ret <vscale x 4 x i8> %vc
105}
106
107define <vscale x 8 x i8> @vmul_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
108; CHECK-LABEL: vmul_vv_nxv8i8:
109; CHECK:       # %bb.0:
110; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
111; CHECK-NEXT:    vmul.vv v8, v8, v9
112; CHECK-NEXT:    ret
113  %vc = mul <vscale x 8 x i8> %va, %vb
114  ret <vscale x 8 x i8> %vc
115}
116
117define <vscale x 8 x i8> @vmul_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
118; CHECK-LABEL: vmul_vx_nxv8i8:
119; CHECK:       # %bb.0:
120; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
121; CHECK-NEXT:    vmul.vx v8, v8, a0
122; CHECK-NEXT:    ret
123  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
124  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
125  %vc = mul <vscale x 8 x i8> %va, %splat
126  ret <vscale x 8 x i8> %vc
127}
128
129define <vscale x 8 x i8> @vmul_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
130; CHECK-LABEL: vmul_vi_nxv8i8_0:
131; CHECK:       # %bb.0:
132; CHECK-NEXT:    li a0, -7
133; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
134; CHECK-NEXT:    vmul.vx v8, v8, a0
135; CHECK-NEXT:    ret
136  %vc = mul <vscale x 8 x i8> %va, splat (i8 -7)
137  ret <vscale x 8 x i8> %vc
138}
139
140define <vscale x 16 x i8> @vmul_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
141; CHECK-LABEL: vmul_vv_nxv16i8:
142; CHECK:       # %bb.0:
143; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
144; CHECK-NEXT:    vmul.vv v8, v8, v10
145; CHECK-NEXT:    ret
146  %vc = mul <vscale x 16 x i8> %va, %vb
147  ret <vscale x 16 x i8> %vc
148}
149
150define <vscale x 16 x i8> @vmul_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
151; CHECK-LABEL: vmul_vx_nxv16i8:
152; CHECK:       # %bb.0:
153; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
154; CHECK-NEXT:    vmul.vx v8, v8, a0
155; CHECK-NEXT:    ret
156  %head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
157  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
158  %vc = mul <vscale x 16 x i8> %va, %splat
159  ret <vscale x 16 x i8> %vc
160}
161
162define <vscale x 16 x i8> @vmul_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
163; CHECK-LABEL: vmul_vi_nxv16i8_0:
164; CHECK:       # %bb.0:
165; CHECK-NEXT:    li a0, -7
166; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
167; CHECK-NEXT:    vmul.vx v8, v8, a0
168; CHECK-NEXT:    ret
169  %vc = mul <vscale x 16 x i8> %va, splat (i8 -7)
170  ret <vscale x 16 x i8> %vc
171}
172
173define <vscale x 32 x i8> @vmul_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
174; CHECK-LABEL: vmul_vv_nxv32i8:
175; CHECK:       # %bb.0:
176; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
177; CHECK-NEXT:    vmul.vv v8, v8, v12
178; CHECK-NEXT:    ret
179  %vc = mul <vscale x 32 x i8> %va, %vb
180  ret <vscale x 32 x i8> %vc
181}
182
183define <vscale x 32 x i8> @vmul_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
184; CHECK-LABEL: vmul_vx_nxv32i8:
185; CHECK:       # %bb.0:
186; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
187; CHECK-NEXT:    vmul.vx v8, v8, a0
188; CHECK-NEXT:    ret
189  %head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
190  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
191  %vc = mul <vscale x 32 x i8> %va, %splat
192  ret <vscale x 32 x i8> %vc
193}
194
195define <vscale x 32 x i8> @vmul_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
196; CHECK-LABEL: vmul_vi_nxv32i8_0:
197; CHECK:       # %bb.0:
198; CHECK-NEXT:    li a0, -7
199; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
200; CHECK-NEXT:    vmul.vx v8, v8, a0
201; CHECK-NEXT:    ret
202  %vc = mul <vscale x 32 x i8> %va, splat (i8 -7)
203  ret <vscale x 32 x i8> %vc
204}
205
206define <vscale x 64 x i8> @vmul_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) {
207; CHECK-LABEL: vmul_vv_nxv64i8:
208; CHECK:       # %bb.0:
209; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
210; CHECK-NEXT:    vmul.vv v8, v8, v16
211; CHECK-NEXT:    ret
212  %vc = mul <vscale x 64 x i8> %va, %vb
213  ret <vscale x 64 x i8> %vc
214}
215
216define <vscale x 64 x i8> @vmul_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
217; CHECK-LABEL: vmul_vx_nxv64i8:
218; CHECK:       # %bb.0:
219; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
220; CHECK-NEXT:    vmul.vx v8, v8, a0
221; CHECK-NEXT:    ret
222  %head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
223  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
224  %vc = mul <vscale x 64 x i8> %va, %splat
225  ret <vscale x 64 x i8> %vc
226}
227
228define <vscale x 64 x i8> @vmul_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
229; CHECK-LABEL: vmul_vi_nxv64i8_0:
230; CHECK:       # %bb.0:
231; CHECK-NEXT:    li a0, -7
232; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
233; CHECK-NEXT:    vmul.vx v8, v8, a0
234; CHECK-NEXT:    ret
235  %vc = mul <vscale x 64 x i8> %va, splat (i8 -7)
236  ret <vscale x 64 x i8> %vc
237}
238
239define <vscale x 1 x i16> @vmul_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
240; CHECK-LABEL: vmul_vv_nxv1i16:
241; CHECK:       # %bb.0:
242; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
243; CHECK-NEXT:    vmul.vv v8, v8, v9
244; CHECK-NEXT:    ret
245  %vc = mul <vscale x 1 x i16> %va, %vb
246  ret <vscale x 1 x i16> %vc
247}
248
249define <vscale x 1 x i16> @vmul_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
250; CHECK-LABEL: vmul_vx_nxv1i16:
251; CHECK:       # %bb.0:
252; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
253; CHECK-NEXT:    vmul.vx v8, v8, a0
254; CHECK-NEXT:    ret
255  %head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
256  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
257  %vc = mul <vscale x 1 x i16> %va, %splat
258  ret <vscale x 1 x i16> %vc
259}
260
261define <vscale x 1 x i16> @vmul_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
262; CHECK-LABEL: vmul_vi_nxv1i16_0:
263; CHECK:       # %bb.0:
264; CHECK-NEXT:    li a0, -7
265; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
266; CHECK-NEXT:    vmul.vx v8, v8, a0
267; CHECK-NEXT:    ret
268  %vc = mul <vscale x 1 x i16> %va, splat (i16 -7)
269  ret <vscale x 1 x i16> %vc
270}
271
272define <vscale x 2 x i16> @vmul_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
273; CHECK-LABEL: vmul_vv_nxv2i16:
274; CHECK:       # %bb.0:
275; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
276; CHECK-NEXT:    vmul.vv v8, v8, v9
277; CHECK-NEXT:    ret
278  %vc = mul <vscale x 2 x i16> %va, %vb
279  ret <vscale x 2 x i16> %vc
280}
281
282define <vscale x 2 x i16> @vmul_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
283; CHECK-LABEL: vmul_vx_nxv2i16:
284; CHECK:       # %bb.0:
285; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
286; CHECK-NEXT:    vmul.vx v8, v8, a0
287; CHECK-NEXT:    ret
288  %head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
289  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
290  %vc = mul <vscale x 2 x i16> %va, %splat
291  ret <vscale x 2 x i16> %vc
292}
293
294define <vscale x 2 x i16> @vmul_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
295; CHECK-LABEL: vmul_vi_nxv2i16_0:
296; CHECK:       # %bb.0:
297; CHECK-NEXT:    li a0, -7
298; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
299; CHECK-NEXT:    vmul.vx v8, v8, a0
300; CHECK-NEXT:    ret
301  %vc = mul <vscale x 2 x i16> %va, splat (i16 -7)
302  ret <vscale x 2 x i16> %vc
303}
304
305define <vscale x 4 x i16> @vmul_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
306; CHECK-LABEL: vmul_vv_nxv4i16:
307; CHECK:       # %bb.0:
308; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
309; CHECK-NEXT:    vmul.vv v8, v8, v9
310; CHECK-NEXT:    ret
311  %vc = mul <vscale x 4 x i16> %va, %vb
312  ret <vscale x 4 x i16> %vc
313}
314
315define <vscale x 4 x i16> @vmul_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
316; CHECK-LABEL: vmul_vx_nxv4i16:
317; CHECK:       # %bb.0:
318; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
319; CHECK-NEXT:    vmul.vx v8, v8, a0
320; CHECK-NEXT:    ret
321  %head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
322  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
323  %vc = mul <vscale x 4 x i16> %va, %splat
324  ret <vscale x 4 x i16> %vc
325}
326
327define <vscale x 4 x i16> @vmul_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
328; CHECK-LABEL: vmul_vi_nxv4i16_0:
329; CHECK:       # %bb.0:
330; CHECK-NEXT:    li a0, -7
331; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
332; CHECK-NEXT:    vmul.vx v8, v8, a0
333; CHECK-NEXT:    ret
334  %vc = mul <vscale x 4 x i16> %va, splat (i16 -7)
335  ret <vscale x 4 x i16> %vc
336}
337
338define <vscale x 8 x i16> @vmul_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
339; CHECK-LABEL: vmul_vv_nxv8i16:
340; CHECK:       # %bb.0:
341; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
342; CHECK-NEXT:    vmul.vv v8, v8, v10
343; CHECK-NEXT:    ret
344  %vc = mul <vscale x 8 x i16> %va, %vb
345  ret <vscale x 8 x i16> %vc
346}
347
348define <vscale x 8 x i16> @vmul_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
349; CHECK-LABEL: vmul_vx_nxv8i16:
350; CHECK:       # %bb.0:
351; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
352; CHECK-NEXT:    vmul.vx v8, v8, a0
353; CHECK-NEXT:    ret
354  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
355  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
356  %vc = mul <vscale x 8 x i16> %va, %splat
357  ret <vscale x 8 x i16> %vc
358}
359
360define <vscale x 8 x i16> @vmul_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
361; CHECK-LABEL: vmul_vi_nxv8i16_0:
362; CHECK:       # %bb.0:
363; CHECK-NEXT:    li a0, -7
364; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
365; CHECK-NEXT:    vmul.vx v8, v8, a0
366; CHECK-NEXT:    ret
367  %vc = mul <vscale x 8 x i16> %va, splat (i16 -7)
368  ret <vscale x 8 x i16> %vc
369}
370
371define <vscale x 16 x i16> @vmul_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
372; CHECK-LABEL: vmul_vv_nxv16i16:
373; CHECK:       # %bb.0:
374; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
375; CHECK-NEXT:    vmul.vv v8, v8, v12
376; CHECK-NEXT:    ret
377  %vc = mul <vscale x 16 x i16> %va, %vb
378  ret <vscale x 16 x i16> %vc
379}
380
381define <vscale x 16 x i16> @vmul_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
382; CHECK-LABEL: vmul_vx_nxv16i16:
383; CHECK:       # %bb.0:
384; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
385; CHECK-NEXT:    vmul.vx v8, v8, a0
386; CHECK-NEXT:    ret
387  %head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
388  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
389  %vc = mul <vscale x 16 x i16> %va, %splat
390  ret <vscale x 16 x i16> %vc
391}
392
393define <vscale x 16 x i16> @vmul_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
394; CHECK-LABEL: vmul_vi_nxv16i16_0:
395; CHECK:       # %bb.0:
396; CHECK-NEXT:    li a0, -7
397; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
398; CHECK-NEXT:    vmul.vx v8, v8, a0
399; CHECK-NEXT:    ret
400  %vc = mul <vscale x 16 x i16> %va, splat (i16 -7)
401  ret <vscale x 16 x i16> %vc
402}
403
404define <vscale x 32 x i16> @vmul_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) {
405; CHECK-LABEL: vmul_vv_nxv32i16:
406; CHECK:       # %bb.0:
407; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
408; CHECK-NEXT:    vmul.vv v8, v8, v16
409; CHECK-NEXT:    ret
410  %vc = mul <vscale x 32 x i16> %va, %vb
411  ret <vscale x 32 x i16> %vc
412}
413
414define <vscale x 32 x i16> @vmul_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
415; CHECK-LABEL: vmul_vx_nxv32i16:
416; CHECK:       # %bb.0:
417; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
418; CHECK-NEXT:    vmul.vx v8, v8, a0
419; CHECK-NEXT:    ret
420  %head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
421  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
422  %vc = mul <vscale x 32 x i16> %va, %splat
423  ret <vscale x 32 x i16> %vc
424}
425
426define <vscale x 32 x i16> @vmul_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
427; CHECK-LABEL: vmul_vi_nxv32i16_0:
428; CHECK:       # %bb.0:
429; CHECK-NEXT:    li a0, -7
430; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
431; CHECK-NEXT:    vmul.vx v8, v8, a0
432; CHECK-NEXT:    ret
433  %vc = mul <vscale x 32 x i16> %va, splat (i16 -7)
434  ret <vscale x 32 x i16> %vc
435}
436
437define <vscale x 1 x i32> @vmul_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
438; CHECK-LABEL: vmul_vv_nxv1i32:
439; CHECK:       # %bb.0:
440; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
441; CHECK-NEXT:    vmul.vv v8, v8, v9
442; CHECK-NEXT:    ret
443  %vc = mul <vscale x 1 x i32> %va, %vb
444  ret <vscale x 1 x i32> %vc
445}
446
447define <vscale x 1 x i32> @vmul_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %b) {
448; CHECK-LABEL: vmul_vx_nxv1i32:
449; CHECK:       # %bb.0:
450; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
451; CHECK-NEXT:    vmul.vx v8, v8, a0
452; CHECK-NEXT:    ret
453  %head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
454  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
455  %vc = mul <vscale x 1 x i32> %va, %splat
456  ret <vscale x 1 x i32> %vc
457}
458
459define <vscale x 1 x i32> @vmul_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
460; CHECK-LABEL: vmul_vi_nxv1i32_0:
461; CHECK:       # %bb.0:
462; CHECK-NEXT:    li a0, -7
463; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
464; CHECK-NEXT:    vmul.vx v8, v8, a0
465; CHECK-NEXT:    ret
466  %vc = mul <vscale x 1 x i32> %va, splat (i32 -7)
467  ret <vscale x 1 x i32> %vc
468}
469
470define <vscale x 2 x i32> @vmul_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
471; CHECK-LABEL: vmul_vv_nxv2i32:
472; CHECK:       # %bb.0:
473; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
474; CHECK-NEXT:    vmul.vv v8, v8, v9
475; CHECK-NEXT:    ret
476  %vc = mul <vscale x 2 x i32> %va, %vb
477  ret <vscale x 2 x i32> %vc
478}
479
480define <vscale x 2 x i32> @vmul_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %b) {
481; CHECK-LABEL: vmul_vx_nxv2i32:
482; CHECK:       # %bb.0:
483; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
484; CHECK-NEXT:    vmul.vx v8, v8, a0
485; CHECK-NEXT:    ret
486  %head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
487  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
488  %vc = mul <vscale x 2 x i32> %va, %splat
489  ret <vscale x 2 x i32> %vc
490}
491
492define <vscale x 2 x i32> @vmul_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
493; CHECK-LABEL: vmul_vi_nxv2i32_0:
494; CHECK:       # %bb.0:
495; CHECK-NEXT:    li a0, -7
496; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
497; CHECK-NEXT:    vmul.vx v8, v8, a0
498; CHECK-NEXT:    ret
499  %vc = mul <vscale x 2 x i32> %va, splat (i32 -7)
500  ret <vscale x 2 x i32> %vc
501}
502
503define <vscale x 4 x i32> @vmul_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
504; CHECK-LABEL: vmul_vv_nxv4i32:
505; CHECK:       # %bb.0:
506; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
507; CHECK-NEXT:    vmul.vv v8, v8, v10
508; CHECK-NEXT:    ret
509  %vc = mul <vscale x 4 x i32> %va, %vb
510  ret <vscale x 4 x i32> %vc
511}
512
513define <vscale x 4 x i32> @vmul_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %b) {
514; CHECK-LABEL: vmul_vx_nxv4i32:
515; CHECK:       # %bb.0:
516; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
517; CHECK-NEXT:    vmul.vx v8, v8, a0
518; CHECK-NEXT:    ret
519  %head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
520  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
521  %vc = mul <vscale x 4 x i32> %va, %splat
522  ret <vscale x 4 x i32> %vc
523}
524
525define <vscale x 4 x i32> @vmul_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
526; CHECK-LABEL: vmul_vi_nxv4i32_0:
527; CHECK:       # %bb.0:
528; CHECK-NEXT:    li a0, -7
529; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
530; CHECK-NEXT:    vmul.vx v8, v8, a0
531; CHECK-NEXT:    ret
532  %vc = mul <vscale x 4 x i32> %va, splat (i32 -7)
533  ret <vscale x 4 x i32> %vc
534}
535
536define <vscale x 8 x i32> @vmul_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
537; CHECK-LABEL: vmul_vv_nxv8i32:
538; CHECK:       # %bb.0:
539; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
540; CHECK-NEXT:    vmul.vv v8, v8, v12
541; CHECK-NEXT:    ret
542  %vc = mul <vscale x 8 x i32> %va, %vb
543  ret <vscale x 8 x i32> %vc
544}
545
546define <vscale x 8 x i32> @vmul_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b) {
547; CHECK-LABEL: vmul_vx_nxv8i32:
548; CHECK:       # %bb.0:
549; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
550; CHECK-NEXT:    vmul.vx v8, v8, a0
551; CHECK-NEXT:    ret
552  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
553  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
554  %vc = mul <vscale x 8 x i32> %va, %splat
555  ret <vscale x 8 x i32> %vc
556}
557
558define <vscale x 8 x i32> @vmul_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
559; CHECK-LABEL: vmul_vi_nxv8i32_0:
560; CHECK:       # %bb.0:
561; CHECK-NEXT:    li a0, -7
562; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
563; CHECK-NEXT:    vmul.vx v8, v8, a0
564; CHECK-NEXT:    ret
565  %vc = mul <vscale x 8 x i32> %va, splat (i32 -7)
566  ret <vscale x 8 x i32> %vc
567}
568
569define <vscale x 16 x i32> @vmul_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) {
570; CHECK-LABEL: vmul_vv_nxv16i32:
571; CHECK:       # %bb.0:
572; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
573; CHECK-NEXT:    vmul.vv v8, v8, v16
574; CHECK-NEXT:    ret
575  %vc = mul <vscale x 16 x i32> %va, %vb
576  ret <vscale x 16 x i32> %vc
577}
578
579define <vscale x 16 x i32> @vmul_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signext %b) {
580; CHECK-LABEL: vmul_vx_nxv16i32:
581; CHECK:       # %bb.0:
582; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
583; CHECK-NEXT:    vmul.vx v8, v8, a0
584; CHECK-NEXT:    ret
585  %head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
586  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
587  %vc = mul <vscale x 16 x i32> %va, %splat
588  ret <vscale x 16 x i32> %vc
589}
590
591define <vscale x 16 x i32> @vmul_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
592; CHECK-LABEL: vmul_vi_nxv16i32_0:
593; CHECK:       # %bb.0:
594; CHECK-NEXT:    li a0, -7
595; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
596; CHECK-NEXT:    vmul.vx v8, v8, a0
597; CHECK-NEXT:    ret
598  %vc = mul <vscale x 16 x i32> %va, splat (i32 -7)
599  ret <vscale x 16 x i32> %vc
600}
601
602define <vscale x 1 x i64> @vmul_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
603; CHECK-LABEL: vmul_vv_nxv1i64:
604; CHECK:       # %bb.0:
605; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
606; CHECK-NEXT:    vmul.vv v8, v8, v9
607; CHECK-NEXT:    ret
608  %vc = mul <vscale x 1 x i64> %va, %vb
609  ret <vscale x 1 x i64> %vc
610}
611
612define <vscale x 1 x i64> @vmul_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
613; RV32-LABEL: vmul_vx_nxv1i64:
614; RV32:       # %bb.0:
615; RV32-NEXT:    addi sp, sp, -16
616; RV32-NEXT:    .cfi_def_cfa_offset 16
617; RV32-NEXT:    sw a0, 8(sp)
618; RV32-NEXT:    sw a1, 12(sp)
619; RV32-NEXT:    addi a0, sp, 8
620; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
621; RV32-NEXT:    vlse64.v v9, (a0), zero
622; RV32-NEXT:    vmul.vv v8, v8, v9
623; RV32-NEXT:    addi sp, sp, 16
624; RV32-NEXT:    .cfi_def_cfa_offset 0
625; RV32-NEXT:    ret
626;
627; RV64-LABEL: vmul_vx_nxv1i64:
628; RV64:       # %bb.0:
629; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
630; RV64-NEXT:    vmul.vx v8, v8, a0
631; RV64-NEXT:    ret
632  %head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
633  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
634  %vc = mul <vscale x 1 x i64> %va, %splat
635  ret <vscale x 1 x i64> %vc
636}
637
638define <vscale x 1 x i64> @vmul_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
639; CHECK-LABEL: vmul_vi_nxv1i64_0:
640; CHECK:       # %bb.0:
641; CHECK-NEXT:    li a0, -7
642; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
643; CHECK-NEXT:    vmul.vx v8, v8, a0
644; CHECK-NEXT:    ret
645  %vc = mul <vscale x 1 x i64> %va, splat (i64 -7)
646  ret <vscale x 1 x i64> %vc
647}
648
649define <vscale x 1 x i64> @vmul_vi_nxv1i64_1(<vscale x 1 x i64> %va) {
650; CHECK-LABEL: vmul_vi_nxv1i64_1:
651; CHECK:       # %bb.0:
652; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
653; CHECK-NEXT:    vadd.vv v8, v8, v8
654; CHECK-NEXT:    ret
655  %vc = mul <vscale x 1 x i64> %va, splat (i64 2)
656  ret <vscale x 1 x i64> %vc
657}
658
659define <vscale x 1 x i64> @vmul_vi_nxv1i64_2(<vscale x 1 x i64> %va) {
660; CHECK-LABEL: vmul_vi_nxv1i64_2:
661; CHECK:       # %bb.0:
662; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
663; CHECK-NEXT:    vsll.vi v8, v8, 4
664; CHECK-NEXT:    ret
665  %vc = mul <vscale x 1 x i64> %va, splat (i64 16)
666  ret <vscale x 1 x i64> %vc
667}
668
669define <vscale x 2 x i64> @vmul_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
670; CHECK-LABEL: vmul_vv_nxv2i64:
671; CHECK:       # %bb.0:
672; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
673; CHECK-NEXT:    vmul.vv v8, v8, v10
674; CHECK-NEXT:    ret
675  %vc = mul <vscale x 2 x i64> %va, %vb
676  ret <vscale x 2 x i64> %vc
677}
678
679define <vscale x 2 x i64> @vmul_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
680; RV32-LABEL: vmul_vx_nxv2i64:
681; RV32:       # %bb.0:
682; RV32-NEXT:    addi sp, sp, -16
683; RV32-NEXT:    .cfi_def_cfa_offset 16
684; RV32-NEXT:    sw a0, 8(sp)
685; RV32-NEXT:    sw a1, 12(sp)
686; RV32-NEXT:    addi a0, sp, 8
687; RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
688; RV32-NEXT:    vlse64.v v10, (a0), zero
689; RV32-NEXT:    vmul.vv v8, v8, v10
690; RV32-NEXT:    addi sp, sp, 16
691; RV32-NEXT:    .cfi_def_cfa_offset 0
692; RV32-NEXT:    ret
693;
694; RV64-LABEL: vmul_vx_nxv2i64:
695; RV64:       # %bb.0:
696; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
697; RV64-NEXT:    vmul.vx v8, v8, a0
698; RV64-NEXT:    ret
699  %head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
700  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
701  %vc = mul <vscale x 2 x i64> %va, %splat
702  ret <vscale x 2 x i64> %vc
703}
704
705define <vscale x 2 x i64> @vmul_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
706; CHECK-LABEL: vmul_vi_nxv2i64_0:
707; CHECK:       # %bb.0:
708; CHECK-NEXT:    li a0, -7
709; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
710; CHECK-NEXT:    vmul.vx v8, v8, a0
711; CHECK-NEXT:    ret
712  %vc = mul <vscale x 2 x i64> %va, splat (i64 -7)
713  ret <vscale x 2 x i64> %vc
714}
715
716define <vscale x 2 x i64> @vmul_vi_nxv2i64_1(<vscale x 2 x i64> %va) {
717; CHECK-LABEL: vmul_vi_nxv2i64_1:
718; CHECK:       # %bb.0:
719; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
720; CHECK-NEXT:    vadd.vv v8, v8, v8
721; CHECK-NEXT:    ret
722  %vc = mul <vscale x 2 x i64> %va, splat (i64 2)
723  ret <vscale x 2 x i64> %vc
724}
725
726define <vscale x 2 x i64> @vmul_vi_nxv2i64_2(<vscale x 2 x i64> %va) {
727; CHECK-LABEL: vmul_vi_nxv2i64_2:
728; CHECK:       # %bb.0:
729; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
730; CHECK-NEXT:    vsll.vi v8, v8, 4
731; CHECK-NEXT:    ret
732  %vc = mul <vscale x 2 x i64> %va, splat (i64 16)
733  ret <vscale x 2 x i64> %vc
734}
735
736define <vscale x 4 x i64> @vmul_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
737; CHECK-LABEL: vmul_vv_nxv4i64:
738; CHECK:       # %bb.0:
739; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
740; CHECK-NEXT:    vmul.vv v8, v8, v12
741; CHECK-NEXT:    ret
742  %vc = mul <vscale x 4 x i64> %va, %vb
743  ret <vscale x 4 x i64> %vc
744}
745
746define <vscale x 4 x i64> @vmul_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
747; RV32-LABEL: vmul_vx_nxv4i64:
748; RV32:       # %bb.0:
749; RV32-NEXT:    addi sp, sp, -16
750; RV32-NEXT:    .cfi_def_cfa_offset 16
751; RV32-NEXT:    sw a0, 8(sp)
752; RV32-NEXT:    sw a1, 12(sp)
753; RV32-NEXT:    addi a0, sp, 8
754; RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
755; RV32-NEXT:    vlse64.v v12, (a0), zero
756; RV32-NEXT:    vmul.vv v8, v8, v12
757; RV32-NEXT:    addi sp, sp, 16
758; RV32-NEXT:    .cfi_def_cfa_offset 0
759; RV32-NEXT:    ret
760;
761; RV64-LABEL: vmul_vx_nxv4i64:
762; RV64:       # %bb.0:
763; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
764; RV64-NEXT:    vmul.vx v8, v8, a0
765; RV64-NEXT:    ret
766  %head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
767  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
768  %vc = mul <vscale x 4 x i64> %va, %splat
769  ret <vscale x 4 x i64> %vc
770}
771
772define <vscale x 4 x i64> @vmul_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
773; CHECK-LABEL: vmul_vi_nxv4i64_0:
774; CHECK:       # %bb.0:
775; CHECK-NEXT:    li a0, -7
776; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
777; CHECK-NEXT:    vmul.vx v8, v8, a0
778; CHECK-NEXT:    ret
779  %vc = mul <vscale x 4 x i64> %va, splat (i64 -7)
780  ret <vscale x 4 x i64> %vc
781}
782
783define <vscale x 4 x i64> @vmul_vi_nxv4i64_1(<vscale x 4 x i64> %va) {
784; CHECK-LABEL: vmul_vi_nxv4i64_1:
785; CHECK:       # %bb.0:
786; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
787; CHECK-NEXT:    vadd.vv v8, v8, v8
788; CHECK-NEXT:    ret
789  %vc = mul <vscale x 4 x i64> %va, splat (i64 2)
790  ret <vscale x 4 x i64> %vc
791}
792
793define <vscale x 4 x i64> @vmul_vi_nxv4i64_2(<vscale x 4 x i64> %va) {
794; CHECK-LABEL: vmul_vi_nxv4i64_2:
795; CHECK:       # %bb.0:
796; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
797; CHECK-NEXT:    vsll.vi v8, v8, 4
798; CHECK-NEXT:    ret
799  %vc = mul <vscale x 4 x i64> %va, splat (i64 16)
800  ret <vscale x 4 x i64> %vc
801}
802
803define <vscale x 8 x i64> @vmul_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
804; CHECK-LABEL: vmul_vv_nxv8i64:
805; CHECK:       # %bb.0:
806; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
807; CHECK-NEXT:    vmul.vv v8, v8, v16
808; CHECK-NEXT:    ret
809  %vc = mul <vscale x 8 x i64> %va, %vb
810  ret <vscale x 8 x i64> %vc
811}
812
813define <vscale x 8 x i64> @vmul_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
814; RV32-LABEL: vmul_vx_nxv8i64:
815; RV32:       # %bb.0:
816; RV32-NEXT:    addi sp, sp, -16
817; RV32-NEXT:    .cfi_def_cfa_offset 16
818; RV32-NEXT:    sw a0, 8(sp)
819; RV32-NEXT:    sw a1, 12(sp)
820; RV32-NEXT:    addi a0, sp, 8
821; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
822; RV32-NEXT:    vlse64.v v16, (a0), zero
823; RV32-NEXT:    vmul.vv v8, v8, v16
824; RV32-NEXT:    addi sp, sp, 16
825; RV32-NEXT:    .cfi_def_cfa_offset 0
826; RV32-NEXT:    ret
827;
828; RV64-LABEL: vmul_vx_nxv8i64:
829; RV64:       # %bb.0:
830; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
831; RV64-NEXT:    vmul.vx v8, v8, a0
832; RV64-NEXT:    ret
833  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
834  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
835  %vc = mul <vscale x 8 x i64> %va, %splat
836  ret <vscale x 8 x i64> %vc
837}
838
839define <vscale x 8 x i64> @vmul_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
840; CHECK-LABEL: vmul_vi_nxv8i64_0:
841; CHECK:       # %bb.0:
842; CHECK-NEXT:    li a0, -7
843; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
844; CHECK-NEXT:    vmul.vx v8, v8, a0
845; CHECK-NEXT:    ret
846  %vc = mul <vscale x 8 x i64> %va, splat (i64 -7)
847  ret <vscale x 8 x i64> %vc
848}
849
850define <vscale x 8 x i64> @vmul_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
851; CHECK-LABEL: vmul_vi_nxv8i64_1:
852; CHECK:       # %bb.0:
853; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
854; CHECK-NEXT:    vadd.vv v8, v8, v8
855; CHECK-NEXT:    ret
856  %vc = mul <vscale x 8 x i64> %va, splat (i64 2)
857  ret <vscale x 8 x i64> %vc
858}
859
860define <vscale x 8 x i64> @vmul_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
861; CHECK-LABEL: vmul_vi_nxv8i64_2:
862; CHECK:       # %bb.0:
863; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
864; CHECK-NEXT:    vsll.vi v8, v8, 4
865; CHECK-NEXT:    ret
866  %vc = mul <vscale x 8 x i64> %va, splat (i64 16)
867  ret <vscale x 8 x i64> %vc
868}
869
870define <vscale x 8 x i64> @vmul_xx_nxv8i64(i64 %a, i64 %b) nounwind {
871; RV32NOM-LABEL: vmul_xx_nxv8i64:
872; RV32NOM:       # %bb.0:
873; RV32NOM-NEXT:    addi sp, sp, -16
874; RV32NOM-NEXT:    sw a0, 8(sp)
875; RV32NOM-NEXT:    sw a1, 12(sp)
876; RV32NOM-NEXT:    addi a0, sp, 8
877; RV32NOM-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
878; RV32NOM-NEXT:    vlse64.v v8, (a0), zero
879; RV32NOM-NEXT:    sw a2, 0(sp)
880; RV32NOM-NEXT:    sw a3, 4(sp)
881; RV32NOM-NEXT:    mv a0, sp
882; RV32NOM-NEXT:    vlse64.v v16, (a0), zero
883; RV32NOM-NEXT:    vmul.vv v8, v8, v16
884; RV32NOM-NEXT:    addi sp, sp, 16
885; RV32NOM-NEXT:    ret
886;
887; RV64NOM-LABEL: vmul_xx_nxv8i64:
888; RV64NOM:       # %bb.0:
889; RV64NOM-NEXT:    vsetvli a2, zero, e64, m8, ta, ma
890; RV64NOM-NEXT:    vmv.v.x v8, a0
891; RV64NOM-NEXT:    vmul.vx v8, v8, a1
892; RV64NOM-NEXT:    ret
893;
894; RV32M-LABEL: vmul_xx_nxv8i64:
895; RV32M:       # %bb.0:
896; RV32M-NEXT:    addi sp, sp, -16
897; RV32M-NEXT:    mul a4, a0, a2
898; RV32M-NEXT:    mul a3, a0, a3
899; RV32M-NEXT:    mulhu a0, a0, a2
900; RV32M-NEXT:    mul a1, a1, a2
901; RV32M-NEXT:    add a0, a0, a3
902; RV32M-NEXT:    add a0, a0, a1
903; RV32M-NEXT:    sw a4, 8(sp)
904; RV32M-NEXT:    sw a0, 12(sp)
905; RV32M-NEXT:    addi a0, sp, 8
906; RV32M-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
907; RV32M-NEXT:    vlse64.v v8, (a0), zero
908; RV32M-NEXT:    addi sp, sp, 16
909; RV32M-NEXT:    ret
910;
911; RV64M-LABEL: vmul_xx_nxv8i64:
912; RV64M:       # %bb.0:
913; RV64M-NEXT:    mul a0, a0, a1
914; RV64M-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
915; RV64M-NEXT:    vmv.v.x v8, a0
916; RV64M-NEXT:    ret
917  %head1 = insertelement <vscale x 8 x i64> poison, i64 %a, i32 0
918  %splat1 = shufflevector <vscale x 8 x i64> %head1, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
919  %head2 = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
920  %splat2 = shufflevector <vscale x 8 x i64> %head2, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
921  %v = mul <vscale x 8 x i64> %splat1, %splat2
922  ret <vscale x 8 x i64> %v
923}
924
925define <vscale x 8 x i32> @vmul_vv_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %mask) {
926; CHECK-LABEL: vmul_vv_mask_nxv8i32:
927; CHECK:       # %bb.0:
928; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
929; CHECK-NEXT:    vmul.vv v8, v8, v12, v0.t
930; CHECK-NEXT:    ret
931  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> splat (i32 1)
932  %vc = mul <vscale x 8 x i32> %va, %vs
933  ret <vscale x 8 x i32> %vc
934}
935
936define <vscale x 8 x i32> @vmul_vx_mask_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b, <vscale x 8 x i1> %mask) {
937; CHECK-LABEL: vmul_vx_mask_nxv8i32:
938; CHECK:       # %bb.0:
939; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
940; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
941; CHECK-NEXT:    ret
942  %head2 = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
943  %splat = shufflevector <vscale x 8 x i32> %head2, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
944  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> splat (i32 1)
945  %vc = mul <vscale x 8 x i32> %va, %vs
946  ret <vscale x 8 x i32> %vc
947}
948
949define <vscale x 8 x i32> @vmul_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %mask) {
950; CHECK-LABEL: vmul_vi_mask_nxv8i32:
951; CHECK:       # %bb.0:
952; CHECK-NEXT:    li a0, 7
953; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
954; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
955; CHECK-NEXT:    ret
956  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 7), <vscale x 8 x i32> splat (i32 1)
957  %vc = mul <vscale x 8 x i32> %va, %vs
958  ret <vscale x 8 x i32> %vc
959}
960