xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vmul.ll (revision 2967e5f8007d873a3e9d97870d2461d0827a3976)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
6; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zve64d \
7; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
8; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64d \
9; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
10
11declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
12  <vscale x 1 x i8>,
13  <vscale x 1 x i8>,
14  <vscale x 1 x i8>,
15  iXLen);
16
17define <vscale x 1 x i8> @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
18; CHECK-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8:
19; CHECK:       # %bb.0: # %entry
20; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
21; CHECK-NEXT:    vmul.vv v8, v8, v9
22; CHECK-NEXT:    ret
23entry:
24  %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
25    <vscale x 1 x i8> undef,
26    <vscale x 1 x i8> %0,
27    <vscale x 1 x i8> %1,
28    iXLen %2)
29
30  ret <vscale x 1 x i8> %a
31}
32
33declare <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8(
34  <vscale x 1 x i8>,
35  <vscale x 1 x i8>,
36  <vscale x 1 x i8>,
37  <vscale x 1 x i1>,
38  iXLen, iXLen);
39
40define <vscale x 1 x i8> @intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
41; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
42; CHECK:       # %bb.0: # %entry
43; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
44; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
45; CHECK-NEXT:    ret
46entry:
47  %a = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8(
48    <vscale x 1 x i8> %0,
49    <vscale x 1 x i8> %1,
50    <vscale x 1 x i8> %2,
51    <vscale x 1 x i1> %3,
52    iXLen %4, iXLen 1)
53
54  ret <vscale x 1 x i8> %a
55}
56
57declare <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8(
58  <vscale x 2 x i8>,
59  <vscale x 2 x i8>,
60  <vscale x 2 x i8>,
61  iXLen);
62
63define <vscale x 2 x i8> @intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
64; CHECK-LABEL: intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8:
65; CHECK:       # %bb.0: # %entry
66; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
67; CHECK-NEXT:    vmul.vv v8, v8, v9
68; CHECK-NEXT:    ret
69entry:
70  %a = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8(
71    <vscale x 2 x i8> undef,
72    <vscale x 2 x i8> %0,
73    <vscale x 2 x i8> %1,
74    iXLen %2)
75
76  ret <vscale x 2 x i8> %a
77}
78
79declare <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8(
80  <vscale x 2 x i8>,
81  <vscale x 2 x i8>,
82  <vscale x 2 x i8>,
83  <vscale x 2 x i1>,
84  iXLen, iXLen);
85
86define <vscale x 2 x i8> @intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
87; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8:
88; CHECK:       # %bb.0: # %entry
89; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
90; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
91; CHECK-NEXT:    ret
92entry:
93  %a = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8(
94    <vscale x 2 x i8> %0,
95    <vscale x 2 x i8> %1,
96    <vscale x 2 x i8> %2,
97    <vscale x 2 x i1> %3,
98    iXLen %4, iXLen 1)
99
100  ret <vscale x 2 x i8> %a
101}
102
103declare <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8(
104  <vscale x 4 x i8>,
105  <vscale x 4 x i8>,
106  <vscale x 4 x i8>,
107  iXLen);
108
109define <vscale x 4 x i8> @intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
110; CHECK-LABEL: intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8:
111; CHECK:       # %bb.0: # %entry
112; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
113; CHECK-NEXT:    vmul.vv v8, v8, v9
114; CHECK-NEXT:    ret
115entry:
116  %a = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8(
117    <vscale x 4 x i8> undef,
118    <vscale x 4 x i8> %0,
119    <vscale x 4 x i8> %1,
120    iXLen %2)
121
122  ret <vscale x 4 x i8> %a
123}
124
125declare <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8(
126  <vscale x 4 x i8>,
127  <vscale x 4 x i8>,
128  <vscale x 4 x i8>,
129  <vscale x 4 x i1>,
130  iXLen, iXLen);
131
132define <vscale x 4 x i8> @intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
133; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8:
134; CHECK:       # %bb.0: # %entry
135; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
136; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
137; CHECK-NEXT:    ret
138entry:
139  %a = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8(
140    <vscale x 4 x i8> %0,
141    <vscale x 4 x i8> %1,
142    <vscale x 4 x i8> %2,
143    <vscale x 4 x i1> %3,
144    iXLen %4, iXLen 1)
145
146  ret <vscale x 4 x i8> %a
147}
148
149declare <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8(
150  <vscale x 8 x i8>,
151  <vscale x 8 x i8>,
152  <vscale x 8 x i8>,
153  iXLen);
154
155define <vscale x 8 x i8> @intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
156; CHECK-LABEL: intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8:
157; CHECK:       # %bb.0: # %entry
158; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
159; CHECK-NEXT:    vmul.vv v8, v8, v9
160; CHECK-NEXT:    ret
161entry:
162  %a = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8(
163    <vscale x 8 x i8> undef,
164    <vscale x 8 x i8> %0,
165    <vscale x 8 x i8> %1,
166    iXLen %2)
167
168  ret <vscale x 8 x i8> %a
169}
170
171declare <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8(
172  <vscale x 8 x i8>,
173  <vscale x 8 x i8>,
174  <vscale x 8 x i8>,
175  <vscale x 8 x i1>,
176  iXLen, iXLen);
177
178define <vscale x 8 x i8> @intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8:
180; CHECK:       # %bb.0: # %entry
181; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
182; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
183; CHECK-NEXT:    ret
184entry:
185  %a = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8(
186    <vscale x 8 x i8> %0,
187    <vscale x 8 x i8> %1,
188    <vscale x 8 x i8> %2,
189    <vscale x 8 x i1> %3,
190    iXLen %4, iXLen 1)
191
192  ret <vscale x 8 x i8> %a
193}
194
195declare <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8(
196  <vscale x 16 x i8>,
197  <vscale x 16 x i8>,
198  <vscale x 16 x i8>,
199  iXLen);
200
201define <vscale x 16 x i8> @intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
202; CHECK-LABEL: intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8:
203; CHECK:       # %bb.0: # %entry
204; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
205; CHECK-NEXT:    vmul.vv v8, v8, v10
206; CHECK-NEXT:    ret
207entry:
208  %a = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8(
209    <vscale x 16 x i8> undef,
210    <vscale x 16 x i8> %0,
211    <vscale x 16 x i8> %1,
212    iXLen %2)
213
214  ret <vscale x 16 x i8> %a
215}
216
217declare <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8(
218  <vscale x 16 x i8>,
219  <vscale x 16 x i8>,
220  <vscale x 16 x i8>,
221  <vscale x 16 x i1>,
222  iXLen, iXLen);
223
224define <vscale x 16 x i8> @intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
225; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8:
226; CHECK:       # %bb.0: # %entry
227; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
228; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
229; CHECK-NEXT:    ret
230entry:
231  %a = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8(
232    <vscale x 16 x i8> %0,
233    <vscale x 16 x i8> %1,
234    <vscale x 16 x i8> %2,
235    <vscale x 16 x i1> %3,
236    iXLen %4, iXLen 1)
237
238  ret <vscale x 16 x i8> %a
239}
240
241declare <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8(
242  <vscale x 32 x i8>,
243  <vscale x 32 x i8>,
244  <vscale x 32 x i8>,
245  iXLen);
246
247define <vscale x 32 x i8> @intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
248; CHECK-LABEL: intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8:
249; CHECK:       # %bb.0: # %entry
250; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
251; CHECK-NEXT:    vmul.vv v8, v8, v12
252; CHECK-NEXT:    ret
253entry:
254  %a = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8(
255    <vscale x 32 x i8> undef,
256    <vscale x 32 x i8> %0,
257    <vscale x 32 x i8> %1,
258    iXLen %2)
259
260  ret <vscale x 32 x i8> %a
261}
262
263declare <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8(
264  <vscale x 32 x i8>,
265  <vscale x 32 x i8>,
266  <vscale x 32 x i8>,
267  <vscale x 32 x i1>,
268  iXLen, iXLen);
269
270define <vscale x 32 x i8> @intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
271; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8:
272; CHECK:       # %bb.0: # %entry
273; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
274; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
275; CHECK-NEXT:    ret
276entry:
277  %a = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8(
278    <vscale x 32 x i8> %0,
279    <vscale x 32 x i8> %1,
280    <vscale x 32 x i8> %2,
281    <vscale x 32 x i1> %3,
282    iXLen %4, iXLen 1)
283
284  ret <vscale x 32 x i8> %a
285}
286
287declare <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8(
288  <vscale x 64 x i8>,
289  <vscale x 64 x i8>,
290  <vscale x 64 x i8>,
291  iXLen);
292
293define <vscale x 64 x i8> @intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
294; CHECK-LABEL: intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8:
295; CHECK:       # %bb.0: # %entry
296; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
297; CHECK-NEXT:    vmul.vv v8, v8, v16
298; CHECK-NEXT:    ret
299entry:
300  %a = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8(
301    <vscale x 64 x i8> undef,
302    <vscale x 64 x i8> %0,
303    <vscale x 64 x i8> %1,
304    iXLen %2)
305
306  ret <vscale x 64 x i8> %a
307}
308
309declare <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8(
310  <vscale x 64 x i8>,
311  <vscale x 64 x i8>,
312  <vscale x 64 x i8>,
313  <vscale x 64 x i1>,
314  iXLen, iXLen);
315
316define <vscale x 64 x i8> @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
317; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
318; CHECK:       # %bb.0: # %entry
319; CHECK-NEXT:    vl8r.v v24, (a0)
320; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
321; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
322; CHECK-NEXT:    ret
323entry:
324  %a = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8(
325    <vscale x 64 x i8> %0,
326    <vscale x 64 x i8> %1,
327    <vscale x 64 x i8> %2,
328    <vscale x 64 x i1> %3,
329    iXLen %4, iXLen 1)
330
331  ret <vscale x 64 x i8> %a
332}
333
334declare <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16(
335  <vscale x 1 x i16>,
336  <vscale x 1 x i16>,
337  <vscale x 1 x i16>,
338  iXLen);
339
340define <vscale x 1 x i16> @intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
341; CHECK-LABEL: intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16:
342; CHECK:       # %bb.0: # %entry
343; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
344; CHECK-NEXT:    vmul.vv v8, v8, v9
345; CHECK-NEXT:    ret
346entry:
347  %a = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16(
348    <vscale x 1 x i16> undef,
349    <vscale x 1 x i16> %0,
350    <vscale x 1 x i16> %1,
351    iXLen %2)
352
353  ret <vscale x 1 x i16> %a
354}
355
356declare <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16(
357  <vscale x 1 x i16>,
358  <vscale x 1 x i16>,
359  <vscale x 1 x i16>,
360  <vscale x 1 x i1>,
361  iXLen, iXLen);
362
363define <vscale x 1 x i16> @intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
364; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16:
365; CHECK:       # %bb.0: # %entry
366; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
367; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
368; CHECK-NEXT:    ret
369entry:
370  %a = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16(
371    <vscale x 1 x i16> %0,
372    <vscale x 1 x i16> %1,
373    <vscale x 1 x i16> %2,
374    <vscale x 1 x i1> %3,
375    iXLen %4, iXLen 1)
376
377  ret <vscale x 1 x i16> %a
378}
379
380declare <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16(
381  <vscale x 2 x i16>,
382  <vscale x 2 x i16>,
383  <vscale x 2 x i16>,
384  iXLen);
385
386define <vscale x 2 x i16> @intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
387; CHECK-LABEL: intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16:
388; CHECK:       # %bb.0: # %entry
389; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
390; CHECK-NEXT:    vmul.vv v8, v8, v9
391; CHECK-NEXT:    ret
392entry:
393  %a = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16(
394    <vscale x 2 x i16> undef,
395    <vscale x 2 x i16> %0,
396    <vscale x 2 x i16> %1,
397    iXLen %2)
398
399  ret <vscale x 2 x i16> %a
400}
401
402declare <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16(
403  <vscale x 2 x i16>,
404  <vscale x 2 x i16>,
405  <vscale x 2 x i16>,
406  <vscale x 2 x i1>,
407  iXLen, iXLen);
408
409define <vscale x 2 x i16> @intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
410; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16:
411; CHECK:       # %bb.0: # %entry
412; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
413; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
414; CHECK-NEXT:    ret
415entry:
416  %a = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16(
417    <vscale x 2 x i16> %0,
418    <vscale x 2 x i16> %1,
419    <vscale x 2 x i16> %2,
420    <vscale x 2 x i1> %3,
421    iXLen %4, iXLen 1)
422
423  ret <vscale x 2 x i16> %a
424}
425
426declare <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16(
427  <vscale x 4 x i16>,
428  <vscale x 4 x i16>,
429  <vscale x 4 x i16>,
430  iXLen);
431
432define <vscale x 4 x i16> @intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
433; CHECK-LABEL: intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16:
434; CHECK:       # %bb.0: # %entry
435; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
436; CHECK-NEXT:    vmul.vv v8, v8, v9
437; CHECK-NEXT:    ret
438entry:
439  %a = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16(
440    <vscale x 4 x i16> undef,
441    <vscale x 4 x i16> %0,
442    <vscale x 4 x i16> %1,
443    iXLen %2)
444
445  ret <vscale x 4 x i16> %a
446}
447
448declare <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16(
449  <vscale x 4 x i16>,
450  <vscale x 4 x i16>,
451  <vscale x 4 x i16>,
452  <vscale x 4 x i1>,
453  iXLen, iXLen);
454
455define <vscale x 4 x i16> @intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
456; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16:
457; CHECK:       # %bb.0: # %entry
458; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
459; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
460; CHECK-NEXT:    ret
461entry:
462  %a = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16(
463    <vscale x 4 x i16> %0,
464    <vscale x 4 x i16> %1,
465    <vscale x 4 x i16> %2,
466    <vscale x 4 x i1> %3,
467    iXLen %4, iXLen 1)
468
469  ret <vscale x 4 x i16> %a
470}
471
472declare <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16(
473  <vscale x 8 x i16>,
474  <vscale x 8 x i16>,
475  <vscale x 8 x i16>,
476  iXLen);
477
478define <vscale x 8 x i16> @intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
479; CHECK-LABEL: intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16:
480; CHECK:       # %bb.0: # %entry
481; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
482; CHECK-NEXT:    vmul.vv v8, v8, v10
483; CHECK-NEXT:    ret
484entry:
485  %a = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16(
486    <vscale x 8 x i16> undef,
487    <vscale x 8 x i16> %0,
488    <vscale x 8 x i16> %1,
489    iXLen %2)
490
491  ret <vscale x 8 x i16> %a
492}
493
494declare <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16(
495  <vscale x 8 x i16>,
496  <vscale x 8 x i16>,
497  <vscale x 8 x i16>,
498  <vscale x 8 x i1>,
499  iXLen, iXLen);
500
501define <vscale x 8 x i16> @intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
502; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16:
503; CHECK:       # %bb.0: # %entry
504; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
505; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
506; CHECK-NEXT:    ret
507entry:
508  %a = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16(
509    <vscale x 8 x i16> %0,
510    <vscale x 8 x i16> %1,
511    <vscale x 8 x i16> %2,
512    <vscale x 8 x i1> %3,
513    iXLen %4, iXLen 1)
514
515  ret <vscale x 8 x i16> %a
516}
517
518declare <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16(
519  <vscale x 16 x i16>,
520  <vscale x 16 x i16>,
521  <vscale x 16 x i16>,
522  iXLen);
523
524define <vscale x 16 x i16> @intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
525; CHECK-LABEL: intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16:
526; CHECK:       # %bb.0: # %entry
527; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
528; CHECK-NEXT:    vmul.vv v8, v8, v12
529; CHECK-NEXT:    ret
530entry:
531  %a = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16(
532    <vscale x 16 x i16> undef,
533    <vscale x 16 x i16> %0,
534    <vscale x 16 x i16> %1,
535    iXLen %2)
536
537  ret <vscale x 16 x i16> %a
538}
539
540declare <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16(
541  <vscale x 16 x i16>,
542  <vscale x 16 x i16>,
543  <vscale x 16 x i16>,
544  <vscale x 16 x i1>,
545  iXLen, iXLen);
546
547define <vscale x 16 x i16> @intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
548; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16:
549; CHECK:       # %bb.0: # %entry
550; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
551; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
552; CHECK-NEXT:    ret
553entry:
554  %a = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16(
555    <vscale x 16 x i16> %0,
556    <vscale x 16 x i16> %1,
557    <vscale x 16 x i16> %2,
558    <vscale x 16 x i1> %3,
559    iXLen %4, iXLen 1)
560
561  ret <vscale x 16 x i16> %a
562}
563
564declare <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16(
565  <vscale x 32 x i16>,
566  <vscale x 32 x i16>,
567  <vscale x 32 x i16>,
568  iXLen);
569
570define <vscale x 32 x i16> @intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
571; CHECK-LABEL: intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16:
572; CHECK:       # %bb.0: # %entry
573; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
574; CHECK-NEXT:    vmul.vv v8, v8, v16
575; CHECK-NEXT:    ret
576entry:
577  %a = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16(
578    <vscale x 32 x i16> undef,
579    <vscale x 32 x i16> %0,
580    <vscale x 32 x i16> %1,
581    iXLen %2)
582
583  ret <vscale x 32 x i16> %a
584}
585
586declare <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16(
587  <vscale x 32 x i16>,
588  <vscale x 32 x i16>,
589  <vscale x 32 x i16>,
590  <vscale x 32 x i1>,
591  iXLen, iXLen);
592
593define <vscale x 32 x i16> @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
594; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
595; CHECK:       # %bb.0: # %entry
596; CHECK-NEXT:    vl8re16.v v24, (a0)
597; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
598; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
599; CHECK-NEXT:    ret
600entry:
601  %a = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16(
602    <vscale x 32 x i16> %0,
603    <vscale x 32 x i16> %1,
604    <vscale x 32 x i16> %2,
605    <vscale x 32 x i1> %3,
606    iXLen %4, iXLen 1)
607
608  ret <vscale x 32 x i16> %a
609}
610
611declare <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32(
612  <vscale x 1 x i32>,
613  <vscale x 1 x i32>,
614  <vscale x 1 x i32>,
615  iXLen);
616
617define <vscale x 1 x i32> @intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
618; CHECK-LABEL: intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32:
619; CHECK:       # %bb.0: # %entry
620; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
621; CHECK-NEXT:    vmul.vv v8, v8, v9
622; CHECK-NEXT:    ret
623entry:
624  %a = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32(
625    <vscale x 1 x i32> undef,
626    <vscale x 1 x i32> %0,
627    <vscale x 1 x i32> %1,
628    iXLen %2)
629
630  ret <vscale x 1 x i32> %a
631}
632
633declare <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32(
634  <vscale x 1 x i32>,
635  <vscale x 1 x i32>,
636  <vscale x 1 x i32>,
637  <vscale x 1 x i1>,
638  iXLen, iXLen);
639
640define <vscale x 1 x i32> @intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
641; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32:
642; CHECK:       # %bb.0: # %entry
643; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
644; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
645; CHECK-NEXT:    ret
646entry:
647  %a = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32(
648    <vscale x 1 x i32> %0,
649    <vscale x 1 x i32> %1,
650    <vscale x 1 x i32> %2,
651    <vscale x 1 x i1> %3,
652    iXLen %4, iXLen 1)
653
654  ret <vscale x 1 x i32> %a
655}
656
657declare <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32(
658  <vscale x 2 x i32>,
659  <vscale x 2 x i32>,
660  <vscale x 2 x i32>,
661  iXLen);
662
663define <vscale x 2 x i32> @intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
664; CHECK-LABEL: intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32:
665; CHECK:       # %bb.0: # %entry
666; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
667; CHECK-NEXT:    vmul.vv v8, v8, v9
668; CHECK-NEXT:    ret
669entry:
670  %a = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32(
671    <vscale x 2 x i32> undef,
672    <vscale x 2 x i32> %0,
673    <vscale x 2 x i32> %1,
674    iXLen %2)
675
676  ret <vscale x 2 x i32> %a
677}
678
679declare <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32(
680  <vscale x 2 x i32>,
681  <vscale x 2 x i32>,
682  <vscale x 2 x i32>,
683  <vscale x 2 x i1>,
684  iXLen, iXLen);
685
686define <vscale x 2 x i32> @intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
687; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32:
688; CHECK:       # %bb.0: # %entry
689; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
690; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
691; CHECK-NEXT:    ret
692entry:
693  %a = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32(
694    <vscale x 2 x i32> %0,
695    <vscale x 2 x i32> %1,
696    <vscale x 2 x i32> %2,
697    <vscale x 2 x i1> %3,
698    iXLen %4, iXLen 1)
699
700  ret <vscale x 2 x i32> %a
701}
702
703declare <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(
704  <vscale x 4 x i32>,
705  <vscale x 4 x i32>,
706  <vscale x 4 x i32>,
707  iXLen);
708
709define <vscale x 4 x i32> @intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
710; CHECK-LABEL: intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32:
711; CHECK:       # %bb.0: # %entry
712; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
713; CHECK-NEXT:    vmul.vv v8, v8, v10
714; CHECK-NEXT:    ret
715entry:
716  %a = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(
717    <vscale x 4 x i32> undef,
718    <vscale x 4 x i32> %0,
719    <vscale x 4 x i32> %1,
720    iXLen %2)
721
722  ret <vscale x 4 x i32> %a
723}
724
725declare <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32(
726  <vscale x 4 x i32>,
727  <vscale x 4 x i32>,
728  <vscale x 4 x i32>,
729  <vscale x 4 x i1>,
730  iXLen, iXLen);
731
732define <vscale x 4 x i32> @intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
733; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32:
734; CHECK:       # %bb.0: # %entry
735; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
736; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
737; CHECK-NEXT:    ret
738entry:
739  %a = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32(
740    <vscale x 4 x i32> %0,
741    <vscale x 4 x i32> %1,
742    <vscale x 4 x i32> %2,
743    <vscale x 4 x i1> %3,
744    iXLen %4, iXLen 1)
745
746  ret <vscale x 4 x i32> %a
747}
748
749declare <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32(
750  <vscale x 8 x i32>,
751  <vscale x 8 x i32>,
752  <vscale x 8 x i32>,
753  iXLen);
754
755define <vscale x 8 x i32> @intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
756; CHECK-LABEL: intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32:
757; CHECK:       # %bb.0: # %entry
758; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
759; CHECK-NEXT:    vmul.vv v8, v8, v12
760; CHECK-NEXT:    ret
761entry:
762  %a = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32(
763    <vscale x 8 x i32> undef,
764    <vscale x 8 x i32> %0,
765    <vscale x 8 x i32> %1,
766    iXLen %2)
767
768  ret <vscale x 8 x i32> %a
769}
770
771declare <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32(
772  <vscale x 8 x i32>,
773  <vscale x 8 x i32>,
774  <vscale x 8 x i32>,
775  <vscale x 8 x i1>,
776  iXLen, iXLen);
777
778define <vscale x 8 x i32> @intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
779; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32:
780; CHECK:       # %bb.0: # %entry
781; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
782; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
783; CHECK-NEXT:    ret
784entry:
785  %a = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32(
786    <vscale x 8 x i32> %0,
787    <vscale x 8 x i32> %1,
788    <vscale x 8 x i32> %2,
789    <vscale x 8 x i1> %3,
790    iXLen %4, iXLen 1)
791
792  ret <vscale x 8 x i32> %a
793}
794
795declare <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32(
796  <vscale x 16 x i32>,
797  <vscale x 16 x i32>,
798  <vscale x 16 x i32>,
799  iXLen);
800
801define <vscale x 16 x i32> @intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
802; CHECK-LABEL: intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32:
803; CHECK:       # %bb.0: # %entry
804; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
805; CHECK-NEXT:    vmul.vv v8, v8, v16
806; CHECK-NEXT:    ret
807entry:
808  %a = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32(
809    <vscale x 16 x i32> undef,
810    <vscale x 16 x i32> %0,
811    <vscale x 16 x i32> %1,
812    iXLen %2)
813
814  ret <vscale x 16 x i32> %a
815}
816
817declare <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32(
818  <vscale x 16 x i32>,
819  <vscale x 16 x i32>,
820  <vscale x 16 x i32>,
821  <vscale x 16 x i1>,
822  iXLen, iXLen);
823
824define <vscale x 16 x i32> @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
825; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
826; CHECK:       # %bb.0: # %entry
827; CHECK-NEXT:    vl8re32.v v24, (a0)
828; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
829; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
830; CHECK-NEXT:    ret
831entry:
832  %a = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32(
833    <vscale x 16 x i32> %0,
834    <vscale x 16 x i32> %1,
835    <vscale x 16 x i32> %2,
836    <vscale x 16 x i1> %3,
837    iXLen %4, iXLen 1)
838
839  ret <vscale x 16 x i32> %a
840}
841
842declare <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(
843  <vscale x 1 x i64>,
844  <vscale x 1 x i64>,
845  <vscale x 1 x i64>,
846  iXLen);
847
848define <vscale x 1 x i64> @intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
849; CHECK-LABEL: intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64:
850; CHECK:       # %bb.0: # %entry
851; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
852; CHECK-NEXT:    vmul.vv v8, v8, v9
853; CHECK-NEXT:    ret
854entry:
855  %a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(
856    <vscale x 1 x i64> undef,
857    <vscale x 1 x i64> %0,
858    <vscale x 1 x i64> %1,
859    iXLen %2)
860
861  ret <vscale x 1 x i64> %a
862}
863
864declare <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(
865  <vscale x 1 x i64>,
866  <vscale x 1 x i64>,
867  <vscale x 1 x i64>,
868  <vscale x 1 x i1>,
869  iXLen, iXLen);
870
871define <vscale x 1 x i64> @intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
872; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64:
873; CHECK:       # %bb.0: # %entry
874; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
875; CHECK-NEXT:    vmul.vv v8, v9, v10, v0.t
876; CHECK-NEXT:    ret
877entry:
878  %a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(
879    <vscale x 1 x i64> %0,
880    <vscale x 1 x i64> %1,
881    <vscale x 1 x i64> %2,
882    <vscale x 1 x i1> %3,
883    iXLen %4, iXLen 1)
884
885  ret <vscale x 1 x i64> %a
886}
887
888declare <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64(
889  <vscale x 2 x i64>,
890  <vscale x 2 x i64>,
891  <vscale x 2 x i64>,
892  iXLen);
893
894define <vscale x 2 x i64> @intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
895; CHECK-LABEL: intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64:
896; CHECK:       # %bb.0: # %entry
897; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
898; CHECK-NEXT:    vmul.vv v8, v8, v10
899; CHECK-NEXT:    ret
900entry:
901  %a = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64(
902    <vscale x 2 x i64> undef,
903    <vscale x 2 x i64> %0,
904    <vscale x 2 x i64> %1,
905    iXLen %2)
906
907  ret <vscale x 2 x i64> %a
908}
909
910declare <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64(
911  <vscale x 2 x i64>,
912  <vscale x 2 x i64>,
913  <vscale x 2 x i64>,
914  <vscale x 2 x i1>,
915  iXLen, iXLen);
916
917define <vscale x 2 x i64> @intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
918; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64:
919; CHECK:       # %bb.0: # %entry
920; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
921; CHECK-NEXT:    vmul.vv v8, v10, v12, v0.t
922; CHECK-NEXT:    ret
923entry:
924  %a = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64(
925    <vscale x 2 x i64> %0,
926    <vscale x 2 x i64> %1,
927    <vscale x 2 x i64> %2,
928    <vscale x 2 x i1> %3,
929    iXLen %4, iXLen 1)
930
931  ret <vscale x 2 x i64> %a
932}
933
934declare <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64(
935  <vscale x 4 x i64>,
936  <vscale x 4 x i64>,
937  <vscale x 4 x i64>,
938  iXLen);
939
940define <vscale x 4 x i64> @intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
941; CHECK-LABEL: intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64:
942; CHECK:       # %bb.0: # %entry
943; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
944; CHECK-NEXT:    vmul.vv v8, v8, v12
945; CHECK-NEXT:    ret
946entry:
947  %a = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64(
948    <vscale x 4 x i64> undef,
949    <vscale x 4 x i64> %0,
950    <vscale x 4 x i64> %1,
951    iXLen %2)
952
953  ret <vscale x 4 x i64> %a
954}
955
956declare <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64(
957  <vscale x 4 x i64>,
958  <vscale x 4 x i64>,
959  <vscale x 4 x i64>,
960  <vscale x 4 x i1>,
961  iXLen, iXLen);
962
963define <vscale x 4 x i64> @intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
964; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64:
965; CHECK:       # %bb.0: # %entry
966; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
967; CHECK-NEXT:    vmul.vv v8, v12, v16, v0.t
968; CHECK-NEXT:    ret
969entry:
970  %a = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64(
971    <vscale x 4 x i64> %0,
972    <vscale x 4 x i64> %1,
973    <vscale x 4 x i64> %2,
974    <vscale x 4 x i1> %3,
975    iXLen %4, iXLen 1)
976
977  ret <vscale x 4 x i64> %a
978}
979
980declare <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64(
981  <vscale x 8 x i64>,
982  <vscale x 8 x i64>,
983  <vscale x 8 x i64>,
984  iXLen);
985
986define <vscale x 8 x i64> @intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
987; CHECK-LABEL: intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64:
988; CHECK:       # %bb.0: # %entry
989; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
990; CHECK-NEXT:    vmul.vv v8, v8, v16
991; CHECK-NEXT:    ret
992entry:
993  %a = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64(
994    <vscale x 8 x i64> undef,
995    <vscale x 8 x i64> %0,
996    <vscale x 8 x i64> %1,
997    iXLen %2)
998
999  ret <vscale x 8 x i64> %a
1000}
1001
1002declare <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64(
1003  <vscale x 8 x i64>,
1004  <vscale x 8 x i64>,
1005  <vscale x 8 x i64>,
1006  <vscale x 8 x i1>,
1007  iXLen, iXLen);
1008
1009define <vscale x 8 x i64> @intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1010; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64:
1011; CHECK:       # %bb.0: # %entry
1012; CHECK-NEXT:    vl8re64.v v24, (a0)
1013; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
1014; CHECK-NEXT:    vmul.vv v8, v16, v24, v0.t
1015; CHECK-NEXT:    ret
1016entry:
1017  %a = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64(
1018    <vscale x 8 x i64> %0,
1019    <vscale x 8 x i64> %1,
1020    <vscale x 8 x i64> %2,
1021    <vscale x 8 x i1> %3,
1022    iXLen %4, iXLen 1)
1023
1024  ret <vscale x 8 x i64> %a
1025}
1026
1027declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8(
1028  <vscale x 1 x i8>,
1029  <vscale x 1 x i8>,
1030  i8,
1031  iXLen);
1032
1033define <vscale x 1 x i8> @intrinsic_vmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
1034; CHECK-LABEL: intrinsic_vmul_vx_nxv1i8_nxv1i8_i8:
1035; CHECK:       # %bb.0: # %entry
1036; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
1037; CHECK-NEXT:    vmul.vx v8, v8, a0
1038; CHECK-NEXT:    ret
1039entry:
1040  %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8(
1041    <vscale x 1 x i8> undef,
1042    <vscale x 1 x i8> %0,
1043    i8 %1,
1044    iXLen %2)
1045
1046  ret <vscale x 1 x i8> %a
1047}
1048
1049declare <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8(
1050  <vscale x 1 x i8>,
1051  <vscale x 1 x i8>,
1052  i8,
1053  <vscale x 1 x i1>,
1054  iXLen, iXLen);
1055
1056define <vscale x 1 x i8> @intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1057; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8:
1058; CHECK:       # %bb.0: # %entry
1059; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
1060; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
1061; CHECK-NEXT:    ret
1062entry:
1063  %a = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8(
1064    <vscale x 1 x i8> %0,
1065    <vscale x 1 x i8> %1,
1066    i8 %2,
1067    <vscale x 1 x i1> %3,
1068    iXLen %4, iXLen 1)
1069
1070  ret <vscale x 1 x i8> %a
1071}
1072
1073declare <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8(
1074  <vscale x 2 x i8>,
1075  <vscale x 2 x i8>,
1076  i8,
1077  iXLen);
1078
1079define <vscale x 2 x i8> @intrinsic_vmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
1080; CHECK-LABEL: intrinsic_vmul_vx_nxv2i8_nxv2i8_i8:
1081; CHECK:       # %bb.0: # %entry
1082; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
1083; CHECK-NEXT:    vmul.vx v8, v8, a0
1084; CHECK-NEXT:    ret
1085entry:
1086  %a = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8(
1087    <vscale x 2 x i8> undef,
1088    <vscale x 2 x i8> %0,
1089    i8 %1,
1090    iXLen %2)
1091
1092  ret <vscale x 2 x i8> %a
1093}
1094
1095declare <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8(
1096  <vscale x 2 x i8>,
1097  <vscale x 2 x i8>,
1098  i8,
1099  <vscale x 2 x i1>,
1100  iXLen, iXLen);
1101
1102define <vscale x 2 x i8> @intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1103; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8:
1104; CHECK:       # %bb.0: # %entry
1105; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
1106; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
1107; CHECK-NEXT:    ret
1108entry:
1109  %a = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8(
1110    <vscale x 2 x i8> %0,
1111    <vscale x 2 x i8> %1,
1112    i8 %2,
1113    <vscale x 2 x i1> %3,
1114    iXLen %4, iXLen 1)
1115
1116  ret <vscale x 2 x i8> %a
1117}
1118
1119declare <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8(
1120  <vscale x 4 x i8>,
1121  <vscale x 4 x i8>,
1122  i8,
1123  iXLen);
1124
1125define <vscale x 4 x i8> @intrinsic_vmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
1126; CHECK-LABEL: intrinsic_vmul_vx_nxv4i8_nxv4i8_i8:
1127; CHECK:       # %bb.0: # %entry
1128; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
1129; CHECK-NEXT:    vmul.vx v8, v8, a0
1130; CHECK-NEXT:    ret
1131entry:
1132  %a = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8(
1133    <vscale x 4 x i8> undef,
1134    <vscale x 4 x i8> %0,
1135    i8 %1,
1136    iXLen %2)
1137
1138  ret <vscale x 4 x i8> %a
1139}
1140
1141declare <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8(
1142  <vscale x 4 x i8>,
1143  <vscale x 4 x i8>,
1144  i8,
1145  <vscale x 4 x i1>,
1146  iXLen, iXLen);
1147
1148define <vscale x 4 x i8> @intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1149; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8:
1150; CHECK:       # %bb.0: # %entry
1151; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1152; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
1153; CHECK-NEXT:    ret
1154entry:
1155  %a = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8(
1156    <vscale x 4 x i8> %0,
1157    <vscale x 4 x i8> %1,
1158    i8 %2,
1159    <vscale x 4 x i1> %3,
1160    iXLen %4, iXLen 1)
1161
1162  ret <vscale x 4 x i8> %a
1163}
1164
1165declare <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8(
1166  <vscale x 8 x i8>,
1167  <vscale x 8 x i8>,
1168  i8,
1169  iXLen);
1170
1171define <vscale x 8 x i8> @intrinsic_vmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
1172; CHECK-LABEL: intrinsic_vmul_vx_nxv8i8_nxv8i8_i8:
1173; CHECK:       # %bb.0: # %entry
1174; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
1175; CHECK-NEXT:    vmul.vx v8, v8, a0
1176; CHECK-NEXT:    ret
1177entry:
1178  %a = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8(
1179    <vscale x 8 x i8> undef,
1180    <vscale x 8 x i8> %0,
1181    i8 %1,
1182    iXLen %2)
1183
1184  ret <vscale x 8 x i8> %a
1185}
1186
1187declare <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8(
1188  <vscale x 8 x i8>,
1189  <vscale x 8 x i8>,
1190  i8,
1191  <vscale x 8 x i1>,
1192  iXLen, iXLen);
1193
1194define <vscale x 8 x i8> @intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1195; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8:
1196; CHECK:       # %bb.0: # %entry
1197; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
1198; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
1199; CHECK-NEXT:    ret
1200entry:
1201  %a = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8(
1202    <vscale x 8 x i8> %0,
1203    <vscale x 8 x i8> %1,
1204    i8 %2,
1205    <vscale x 8 x i1> %3,
1206    iXLen %4, iXLen 1)
1207
1208  ret <vscale x 8 x i8> %a
1209}
1210
1211declare <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8(
1212  <vscale x 16 x i8>,
1213  <vscale x 16 x i8>,
1214  i8,
1215  iXLen);
1216
1217define <vscale x 16 x i8> @intrinsic_vmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
1218; CHECK-LABEL: intrinsic_vmul_vx_nxv16i8_nxv16i8_i8:
1219; CHECK:       # %bb.0: # %entry
1220; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
1221; CHECK-NEXT:    vmul.vx v8, v8, a0
1222; CHECK-NEXT:    ret
1223entry:
1224  %a = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8(
1225    <vscale x 16 x i8> undef,
1226    <vscale x 16 x i8> %0,
1227    i8 %1,
1228    iXLen %2)
1229
1230  ret <vscale x 16 x i8> %a
1231}
1232
1233declare <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8(
1234  <vscale x 16 x i8>,
1235  <vscale x 16 x i8>,
1236  i8,
1237  <vscale x 16 x i1>,
1238  iXLen, iXLen);
1239
1240define <vscale x 16 x i8> @intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1241; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8:
1242; CHECK:       # %bb.0: # %entry
1243; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
1244; CHECK-NEXT:    vmul.vx v8, v10, a0, v0.t
1245; CHECK-NEXT:    ret
1246entry:
1247  %a = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8(
1248    <vscale x 16 x i8> %0,
1249    <vscale x 16 x i8> %1,
1250    i8 %2,
1251    <vscale x 16 x i1> %3,
1252    iXLen %4, iXLen 1)
1253
1254  ret <vscale x 16 x i8> %a
1255}
1256
1257declare <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8(
1258  <vscale x 32 x i8>,
1259  <vscale x 32 x i8>,
1260  i8,
1261  iXLen);
1262
1263define <vscale x 32 x i8> @intrinsic_vmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
1264; CHECK-LABEL: intrinsic_vmul_vx_nxv32i8_nxv32i8_i8:
1265; CHECK:       # %bb.0: # %entry
1266; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
1267; CHECK-NEXT:    vmul.vx v8, v8, a0
1268; CHECK-NEXT:    ret
1269entry:
1270  %a = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8(
1271    <vscale x 32 x i8> undef,
1272    <vscale x 32 x i8> %0,
1273    i8 %1,
1274    iXLen %2)
1275
1276  ret <vscale x 32 x i8> %a
1277}
1278
1279declare <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8(
1280  <vscale x 32 x i8>,
1281  <vscale x 32 x i8>,
1282  i8,
1283  <vscale x 32 x i1>,
1284  iXLen, iXLen);
1285
1286define <vscale x 32 x i8> @intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1287; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8:
1288; CHECK:       # %bb.0: # %entry
1289; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
1290; CHECK-NEXT:    vmul.vx v8, v12, a0, v0.t
1291; CHECK-NEXT:    ret
1292entry:
1293  %a = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8(
1294    <vscale x 32 x i8> %0,
1295    <vscale x 32 x i8> %1,
1296    i8 %2,
1297    <vscale x 32 x i1> %3,
1298    iXLen %4, iXLen 1)
1299
1300  ret <vscale x 32 x i8> %a
1301}
1302
1303declare <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8(
1304  <vscale x 64 x i8>,
1305  <vscale x 64 x i8>,
1306  i8,
1307  iXLen);
1308
1309define <vscale x 64 x i8> @intrinsic_vmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
1310; CHECK-LABEL: intrinsic_vmul_vx_nxv64i8_nxv64i8_i8:
1311; CHECK:       # %bb.0: # %entry
1312; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
1313; CHECK-NEXT:    vmul.vx v8, v8, a0
1314; CHECK-NEXT:    ret
1315entry:
1316  %a = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8(
1317    <vscale x 64 x i8> undef,
1318    <vscale x 64 x i8> %0,
1319    i8 %1,
1320    iXLen %2)
1321
1322  ret <vscale x 64 x i8> %a
1323}
1324
1325declare <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8(
1326  <vscale x 64 x i8>,
1327  <vscale x 64 x i8>,
1328  i8,
1329  <vscale x 64 x i1>,
1330  iXLen, iXLen);
1331
1332define <vscale x 64 x i8> @intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
1333; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8:
1334; CHECK:       # %bb.0: # %entry
1335; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
1336; CHECK-NEXT:    vmul.vx v8, v16, a0, v0.t
1337; CHECK-NEXT:    ret
1338entry:
1339  %a = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8(
1340    <vscale x 64 x i8> %0,
1341    <vscale x 64 x i8> %1,
1342    i8 %2,
1343    <vscale x 64 x i1> %3,
1344    iXLen %4, iXLen 1)
1345
1346  ret <vscale x 64 x i8> %a
1347}
1348
1349declare <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16(
1350  <vscale x 1 x i16>,
1351  <vscale x 1 x i16>,
1352  i16,
1353  iXLen);
1354
1355define <vscale x 1 x i16> @intrinsic_vmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
1356; CHECK-LABEL: intrinsic_vmul_vx_nxv1i16_nxv1i16_i16:
1357; CHECK:       # %bb.0: # %entry
1358; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1359; CHECK-NEXT:    vmul.vx v8, v8, a0
1360; CHECK-NEXT:    ret
1361entry:
1362  %a = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16(
1363    <vscale x 1 x i16> undef,
1364    <vscale x 1 x i16> %0,
1365    i16 %1,
1366    iXLen %2)
1367
1368  ret <vscale x 1 x i16> %a
1369}
1370
1371declare <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16(
1372  <vscale x 1 x i16>,
1373  <vscale x 1 x i16>,
1374  i16,
1375  <vscale x 1 x i1>,
1376  iXLen, iXLen);
1377
1378define <vscale x 1 x i16> @intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1379; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16:
1380; CHECK:       # %bb.0: # %entry
1381; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1382; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
1383; CHECK-NEXT:    ret
1384entry:
1385  %a = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16(
1386    <vscale x 1 x i16> %0,
1387    <vscale x 1 x i16> %1,
1388    i16 %2,
1389    <vscale x 1 x i1> %3,
1390    iXLen %4, iXLen 1)
1391
1392  ret <vscale x 1 x i16> %a
1393}
1394
1395declare <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16(
1396  <vscale x 2 x i16>,
1397  <vscale x 2 x i16>,
1398  i16,
1399  iXLen);
1400
1401define <vscale x 2 x i16> @intrinsic_vmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
1402; CHECK-LABEL: intrinsic_vmul_vx_nxv2i16_nxv2i16_i16:
1403; CHECK:       # %bb.0: # %entry
1404; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1405; CHECK-NEXT:    vmul.vx v8, v8, a0
1406; CHECK-NEXT:    ret
1407entry:
1408  %a = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16(
1409    <vscale x 2 x i16> undef,
1410    <vscale x 2 x i16> %0,
1411    i16 %1,
1412    iXLen %2)
1413
1414  ret <vscale x 2 x i16> %a
1415}
1416
1417declare <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16(
1418  <vscale x 2 x i16>,
1419  <vscale x 2 x i16>,
1420  i16,
1421  <vscale x 2 x i1>,
1422  iXLen, iXLen);
1423
1424define <vscale x 2 x i16> @intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1425; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16:
1426; CHECK:       # %bb.0: # %entry
1427; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1428; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
1429; CHECK-NEXT:    ret
1430entry:
1431  %a = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16(
1432    <vscale x 2 x i16> %0,
1433    <vscale x 2 x i16> %1,
1434    i16 %2,
1435    <vscale x 2 x i1> %3,
1436    iXLen %4, iXLen 1)
1437
1438  ret <vscale x 2 x i16> %a
1439}
1440
1441declare <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16(
1442  <vscale x 4 x i16>,
1443  <vscale x 4 x i16>,
1444  i16,
1445  iXLen);
1446
1447define <vscale x 4 x i16> @intrinsic_vmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
1448; CHECK-LABEL: intrinsic_vmul_vx_nxv4i16_nxv4i16_i16:
1449; CHECK:       # %bb.0: # %entry
1450; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1451; CHECK-NEXT:    vmul.vx v8, v8, a0
1452; CHECK-NEXT:    ret
1453entry:
1454  %a = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16(
1455    <vscale x 4 x i16> undef,
1456    <vscale x 4 x i16> %0,
1457    i16 %1,
1458    iXLen %2)
1459
1460  ret <vscale x 4 x i16> %a
1461}
1462
1463declare <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16(
1464  <vscale x 4 x i16>,
1465  <vscale x 4 x i16>,
1466  i16,
1467  <vscale x 4 x i1>,
1468  iXLen, iXLen);
1469
1470define <vscale x 4 x i16> @intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1471; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16:
1472; CHECK:       # %bb.0: # %entry
1473; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1474; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
1475; CHECK-NEXT:    ret
1476entry:
1477  %a = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16(
1478    <vscale x 4 x i16> %0,
1479    <vscale x 4 x i16> %1,
1480    i16 %2,
1481    <vscale x 4 x i1> %3,
1482    iXLen %4, iXLen 1)
1483
1484  ret <vscale x 4 x i16> %a
1485}
1486
1487declare <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16(
1488  <vscale x 8 x i16>,
1489  <vscale x 8 x i16>,
1490  i16,
1491  iXLen);
1492
1493define <vscale x 8 x i16> @intrinsic_vmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
1494; CHECK-LABEL: intrinsic_vmul_vx_nxv8i16_nxv8i16_i16:
1495; CHECK:       # %bb.0: # %entry
1496; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1497; CHECK-NEXT:    vmul.vx v8, v8, a0
1498; CHECK-NEXT:    ret
1499entry:
1500  %a = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16(
1501    <vscale x 8 x i16> undef,
1502    <vscale x 8 x i16> %0,
1503    i16 %1,
1504    iXLen %2)
1505
1506  ret <vscale x 8 x i16> %a
1507}
1508
1509declare <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16(
1510  <vscale x 8 x i16>,
1511  <vscale x 8 x i16>,
1512  i16,
1513  <vscale x 8 x i1>,
1514  iXLen, iXLen);
1515
1516define <vscale x 8 x i16> @intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1517; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16:
1518; CHECK:       # %bb.0: # %entry
1519; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1520; CHECK-NEXT:    vmul.vx v8, v10, a0, v0.t
1521; CHECK-NEXT:    ret
1522entry:
1523  %a = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16(
1524    <vscale x 8 x i16> %0,
1525    <vscale x 8 x i16> %1,
1526    i16 %2,
1527    <vscale x 8 x i1> %3,
1528    iXLen %4, iXLen 1)
1529
1530  ret <vscale x 8 x i16> %a
1531}
1532
1533declare <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16(
1534  <vscale x 16 x i16>,
1535  <vscale x 16 x i16>,
1536  i16,
1537  iXLen);
1538
1539define <vscale x 16 x i16> @intrinsic_vmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
1540; CHECK-LABEL: intrinsic_vmul_vx_nxv16i16_nxv16i16_i16:
1541; CHECK:       # %bb.0: # %entry
1542; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1543; CHECK-NEXT:    vmul.vx v8, v8, a0
1544; CHECK-NEXT:    ret
1545entry:
1546  %a = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16(
1547    <vscale x 16 x i16> undef,
1548    <vscale x 16 x i16> %0,
1549    i16 %1,
1550    iXLen %2)
1551
1552  ret <vscale x 16 x i16> %a
1553}
1554
1555declare <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16(
1556  <vscale x 16 x i16>,
1557  <vscale x 16 x i16>,
1558  i16,
1559  <vscale x 16 x i1>,
1560  iXLen, iXLen);
1561
1562define <vscale x 16 x i16> @intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1563; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16:
1564; CHECK:       # %bb.0: # %entry
1565; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1566; CHECK-NEXT:    vmul.vx v8, v12, a0, v0.t
1567; CHECK-NEXT:    ret
1568entry:
1569  %a = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16(
1570    <vscale x 16 x i16> %0,
1571    <vscale x 16 x i16> %1,
1572    i16 %2,
1573    <vscale x 16 x i1> %3,
1574    iXLen %4, iXLen 1)
1575
1576  ret <vscale x 16 x i16> %a
1577}
1578
1579declare <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16(
1580  <vscale x 32 x i16>,
1581  <vscale x 32 x i16>,
1582  i16,
1583  iXLen);
1584
1585define <vscale x 32 x i16> @intrinsic_vmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
1586; CHECK-LABEL: intrinsic_vmul_vx_nxv32i16_nxv32i16_i16:
1587; CHECK:       # %bb.0: # %entry
1588; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
1589; CHECK-NEXT:    vmul.vx v8, v8, a0
1590; CHECK-NEXT:    ret
1591entry:
1592  %a = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16(
1593    <vscale x 32 x i16> undef,
1594    <vscale x 32 x i16> %0,
1595    i16 %1,
1596    iXLen %2)
1597
1598  ret <vscale x 32 x i16> %a
1599}
1600
1601declare <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16(
1602  <vscale x 32 x i16>,
1603  <vscale x 32 x i16>,
1604  i16,
1605  <vscale x 32 x i1>,
1606  iXLen, iXLen);
1607
1608define <vscale x 32 x i16> @intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1609; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16:
1610; CHECK:       # %bb.0: # %entry
1611; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
1612; CHECK-NEXT:    vmul.vx v8, v16, a0, v0.t
1613; CHECK-NEXT:    ret
1614entry:
1615  %a = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16(
1616    <vscale x 32 x i16> %0,
1617    <vscale x 32 x i16> %1,
1618    i16 %2,
1619    <vscale x 32 x i1> %3,
1620    iXLen %4, iXLen 1)
1621
1622  ret <vscale x 32 x i16> %a
1623}
1624
1625declare <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32(
1626  <vscale x 1 x i32>,
1627  <vscale x 1 x i32>,
1628  i32,
1629  iXLen);
1630
1631define <vscale x 1 x i32> @intrinsic_vmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
1632; CHECK-LABEL: intrinsic_vmul_vx_nxv1i32_nxv1i32_i32:
1633; CHECK:       # %bb.0: # %entry
1634; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1635; CHECK-NEXT:    vmul.vx v8, v8, a0
1636; CHECK-NEXT:    ret
1637entry:
1638  %a = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32(
1639    <vscale x 1 x i32> undef,
1640    <vscale x 1 x i32> %0,
1641    i32 %1,
1642    iXLen %2)
1643
1644  ret <vscale x 1 x i32> %a
1645}
1646
1647declare <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32(
1648  <vscale x 1 x i32>,
1649  <vscale x 1 x i32>,
1650  i32,
1651  <vscale x 1 x i1>,
1652  iXLen, iXLen);
1653
1654define <vscale x 1 x i32> @intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1655; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32:
1656; CHECK:       # %bb.0: # %entry
1657; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1658; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
1659; CHECK-NEXT:    ret
1660entry:
1661  %a = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32(
1662    <vscale x 1 x i32> %0,
1663    <vscale x 1 x i32> %1,
1664    i32 %2,
1665    <vscale x 1 x i1> %3,
1666    iXLen %4, iXLen 1)
1667
1668  ret <vscale x 1 x i32> %a
1669}
1670
1671declare <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32(
1672  <vscale x 2 x i32>,
1673  <vscale x 2 x i32>,
1674  i32,
1675  iXLen);
1676
1677define <vscale x 2 x i32> @intrinsic_vmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
1678; CHECK-LABEL: intrinsic_vmul_vx_nxv2i32_nxv2i32_i32:
1679; CHECK:       # %bb.0: # %entry
1680; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1681; CHECK-NEXT:    vmul.vx v8, v8, a0
1682; CHECK-NEXT:    ret
1683entry:
1684  %a = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32(
1685    <vscale x 2 x i32> undef,
1686    <vscale x 2 x i32> %0,
1687    i32 %1,
1688    iXLen %2)
1689
1690  ret <vscale x 2 x i32> %a
1691}
1692
1693declare <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32(
1694  <vscale x 2 x i32>,
1695  <vscale x 2 x i32>,
1696  i32,
1697  <vscale x 2 x i1>,
1698  iXLen, iXLen);
1699
1700define <vscale x 2 x i32> @intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1701; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32:
1702; CHECK:       # %bb.0: # %entry
1703; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1704; CHECK-NEXT:    vmul.vx v8, v9, a0, v0.t
1705; CHECK-NEXT:    ret
1706entry:
1707  %a = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32(
1708    <vscale x 2 x i32> %0,
1709    <vscale x 2 x i32> %1,
1710    i32 %2,
1711    <vscale x 2 x i1> %3,
1712    iXLen %4, iXLen 1)
1713
1714  ret <vscale x 2 x i32> %a
1715}
1716
1717declare <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32(
1718  <vscale x 4 x i32>,
1719  <vscale x 4 x i32>,
1720  i32,
1721  iXLen);
1722
1723define <vscale x 4 x i32> @intrinsic_vmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
1724; CHECK-LABEL: intrinsic_vmul_vx_nxv4i32_nxv4i32_i32:
1725; CHECK:       # %bb.0: # %entry
1726; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1727; CHECK-NEXT:    vmul.vx v8, v8, a0
1728; CHECK-NEXT:    ret
1729entry:
1730  %a = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32(
1731    <vscale x 4 x i32> undef,
1732    <vscale x 4 x i32> %0,
1733    i32 %1,
1734    iXLen %2)
1735
1736  ret <vscale x 4 x i32> %a
1737}
1738
1739declare <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32(
1740  <vscale x 4 x i32>,
1741  <vscale x 4 x i32>,
1742  i32,
1743  <vscale x 4 x i1>,
1744  iXLen, iXLen);
1745
1746define <vscale x 4 x i32> @intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1747; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32:
1748; CHECK:       # %bb.0: # %entry
1749; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1750; CHECK-NEXT:    vmul.vx v8, v10, a0, v0.t
1751; CHECK-NEXT:    ret
1752entry:
1753  %a = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32(
1754    <vscale x 4 x i32> %0,
1755    <vscale x 4 x i32> %1,
1756    i32 %2,
1757    <vscale x 4 x i1> %3,
1758    iXLen %4, iXLen 1)
1759
1760  ret <vscale x 4 x i32> %a
1761}
1762
1763declare <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32(
1764  <vscale x 8 x i32>,
1765  <vscale x 8 x i32>,
1766  i32,
1767  iXLen);
1768
1769define <vscale x 8 x i32> @intrinsic_vmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
1770; CHECK-LABEL: intrinsic_vmul_vx_nxv8i32_nxv8i32_i32:
1771; CHECK:       # %bb.0: # %entry
1772; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1773; CHECK-NEXT:    vmul.vx v8, v8, a0
1774; CHECK-NEXT:    ret
1775entry:
1776  %a = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32(
1777    <vscale x 8 x i32> undef,
1778    <vscale x 8 x i32> %0,
1779    i32 %1,
1780    iXLen %2)
1781
1782  ret <vscale x 8 x i32> %a
1783}
1784
1785declare <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32(
1786  <vscale x 8 x i32>,
1787  <vscale x 8 x i32>,
1788  i32,
1789  <vscale x 8 x i1>,
1790  iXLen, iXLen);
1791
1792define <vscale x 8 x i32> @intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1793; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32:
1794; CHECK:       # %bb.0: # %entry
1795; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1796; CHECK-NEXT:    vmul.vx v8, v12, a0, v0.t
1797; CHECK-NEXT:    ret
1798entry:
1799  %a = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32(
1800    <vscale x 8 x i32> %0,
1801    <vscale x 8 x i32> %1,
1802    i32 %2,
1803    <vscale x 8 x i1> %3,
1804    iXLen %4, iXLen 1)
1805
1806  ret <vscale x 8 x i32> %a
1807}
1808
1809declare <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32(
1810  <vscale x 16 x i32>,
1811  <vscale x 16 x i32>,
1812  i32,
1813  iXLen);
1814
1815define <vscale x 16 x i32> @intrinsic_vmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
1816; CHECK-LABEL: intrinsic_vmul_vx_nxv16i32_nxv16i32_i32:
1817; CHECK:       # %bb.0: # %entry
1818; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1819; CHECK-NEXT:    vmul.vx v8, v8, a0
1820; CHECK-NEXT:    ret
1821entry:
1822  %a = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32(
1823    <vscale x 16 x i32> undef,
1824    <vscale x 16 x i32> %0,
1825    i32 %1,
1826    iXLen %2)
1827
1828  ret <vscale x 16 x i32> %a
1829}
1830
1831declare <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32(
1832  <vscale x 16 x i32>,
1833  <vscale x 16 x i32>,
1834  i32,
1835  <vscale x 16 x i1>,
1836  iXLen, iXLen);
1837
1838define <vscale x 16 x i32> @intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1839; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32:
1840; CHECK:       # %bb.0: # %entry
1841; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
1842; CHECK-NEXT:    vmul.vx v8, v16, a0, v0.t
1843; CHECK-NEXT:    ret
1844entry:
1845  %a = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32(
1846    <vscale x 16 x i32> %0,
1847    <vscale x 16 x i32> %1,
1848    i32 %2,
1849    <vscale x 16 x i1> %3,
1850    iXLen %4, iXLen 1)
1851
1852  ret <vscale x 16 x i32> %a
1853}
1854
1855declare <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64(
1856  <vscale x 1 x i64>,
1857  <vscale x 1 x i64>,
1858  i64,
1859  iXLen);
1860
1861define <vscale x 1 x i64> @intrinsic_vmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
1862; RV32-LABEL: intrinsic_vmul_vx_nxv1i64_nxv1i64_i64:
1863; RV32:       # %bb.0: # %entry
1864; RV32-NEXT:    addi sp, sp, -16
1865; RV32-NEXT:    sw a0, 8(sp)
1866; RV32-NEXT:    sw a1, 12(sp)
1867; RV32-NEXT:    addi a0, sp, 8
1868; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1869; RV32-NEXT:    vlse64.v v9, (a0), zero
1870; RV32-NEXT:    vmul.vv v8, v8, v9
1871; RV32-NEXT:    addi sp, sp, 16
1872; RV32-NEXT:    ret
1873;
1874; RV64-LABEL: intrinsic_vmul_vx_nxv1i64_nxv1i64_i64:
1875; RV64:       # %bb.0: # %entry
1876; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1877; RV64-NEXT:    vmul.vx v8, v8, a0
1878; RV64-NEXT:    ret
1879entry:
1880  %a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64(
1881    <vscale x 1 x i64> undef,
1882    <vscale x 1 x i64> %0,
1883    i64 %1,
1884    iXLen %2)
1885
1886  ret <vscale x 1 x i64> %a
1887}
1888
1889declare <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64(
1890  <vscale x 1 x i64>,
1891  <vscale x 1 x i64>,
1892  i64,
1893  <vscale x 1 x i1>,
1894  iXLen, iXLen);
1895
1896define <vscale x 1 x i64> @intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1897; RV32-LABEL: intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64:
1898; RV32:       # %bb.0: # %entry
1899; RV32-NEXT:    addi sp, sp, -16
1900; RV32-NEXT:    sw a0, 8(sp)
1901; RV32-NEXT:    sw a1, 12(sp)
1902; RV32-NEXT:    addi a0, sp, 8
1903; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
1904; RV32-NEXT:    vlse64.v v10, (a0), zero
1905; RV32-NEXT:    vmul.vv v8, v9, v10, v0.t
1906; RV32-NEXT:    addi sp, sp, 16
1907; RV32-NEXT:    ret
1908;
1909; RV64-LABEL: intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64:
1910; RV64:       # %bb.0: # %entry
1911; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
1912; RV64-NEXT:    vmul.vx v8, v9, a0, v0.t
1913; RV64-NEXT:    ret
1914entry:
1915  %a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64(
1916    <vscale x 1 x i64> %0,
1917    <vscale x 1 x i64> %1,
1918    i64 %2,
1919    <vscale x 1 x i1> %3,
1920    iXLen %4, iXLen 1)
1921
1922  ret <vscale x 1 x i64> %a
1923}
1924
1925declare <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64(
1926  <vscale x 2 x i64>,
1927  <vscale x 2 x i64>,
1928  i64,
1929  iXLen);
1930
1931define <vscale x 2 x i64> @intrinsic_vmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
1932; RV32-LABEL: intrinsic_vmul_vx_nxv2i64_nxv2i64_i64:
1933; RV32:       # %bb.0: # %entry
1934; RV32-NEXT:    addi sp, sp, -16
1935; RV32-NEXT:    sw a0, 8(sp)
1936; RV32-NEXT:    sw a1, 12(sp)
1937; RV32-NEXT:    addi a0, sp, 8
1938; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
1939; RV32-NEXT:    vlse64.v v10, (a0), zero
1940; RV32-NEXT:    vmul.vv v8, v8, v10
1941; RV32-NEXT:    addi sp, sp, 16
1942; RV32-NEXT:    ret
1943;
1944; RV64-LABEL: intrinsic_vmul_vx_nxv2i64_nxv2i64_i64:
1945; RV64:       # %bb.0: # %entry
1946; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1947; RV64-NEXT:    vmul.vx v8, v8, a0
1948; RV64-NEXT:    ret
1949entry:
1950  %a = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64(
1951    <vscale x 2 x i64> undef,
1952    <vscale x 2 x i64> %0,
1953    i64 %1,
1954    iXLen %2)
1955
1956  ret <vscale x 2 x i64> %a
1957}
1958
1959declare <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64(
1960  <vscale x 2 x i64>,
1961  <vscale x 2 x i64>,
1962  i64,
1963  <vscale x 2 x i1>,
1964  iXLen, iXLen);
1965
1966define <vscale x 2 x i64> @intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1967; RV32-LABEL: intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64:
1968; RV32:       # %bb.0: # %entry
1969; RV32-NEXT:    addi sp, sp, -16
1970; RV32-NEXT:    sw a0, 8(sp)
1971; RV32-NEXT:    sw a1, 12(sp)
1972; RV32-NEXT:    addi a0, sp, 8
1973; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
1974; RV32-NEXT:    vlse64.v v12, (a0), zero
1975; RV32-NEXT:    vmul.vv v8, v10, v12, v0.t
1976; RV32-NEXT:    addi sp, sp, 16
1977; RV32-NEXT:    ret
1978;
1979; RV64-LABEL: intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64:
1980; RV64:       # %bb.0: # %entry
1981; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
1982; RV64-NEXT:    vmul.vx v8, v10, a0, v0.t
1983; RV64-NEXT:    ret
1984entry:
1985  %a = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64(
1986    <vscale x 2 x i64> %0,
1987    <vscale x 2 x i64> %1,
1988    i64 %2,
1989    <vscale x 2 x i1> %3,
1990    iXLen %4, iXLen 1)
1991
1992  ret <vscale x 2 x i64> %a
1993}
1994
1995declare <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64(
1996  <vscale x 4 x i64>,
1997  <vscale x 4 x i64>,
1998  i64,
1999  iXLen);
2000
2001define <vscale x 4 x i64> @intrinsic_vmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
2002; RV32-LABEL: intrinsic_vmul_vx_nxv4i64_nxv4i64_i64:
2003; RV32:       # %bb.0: # %entry
2004; RV32-NEXT:    addi sp, sp, -16
2005; RV32-NEXT:    sw a0, 8(sp)
2006; RV32-NEXT:    sw a1, 12(sp)
2007; RV32-NEXT:    addi a0, sp, 8
2008; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
2009; RV32-NEXT:    vlse64.v v12, (a0), zero
2010; RV32-NEXT:    vmul.vv v8, v8, v12
2011; RV32-NEXT:    addi sp, sp, 16
2012; RV32-NEXT:    ret
2013;
2014; RV64-LABEL: intrinsic_vmul_vx_nxv4i64_nxv4i64_i64:
2015; RV64:       # %bb.0: # %entry
2016; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
2017; RV64-NEXT:    vmul.vx v8, v8, a0
2018; RV64-NEXT:    ret
2019entry:
2020  %a = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64(
2021    <vscale x 4 x i64> undef,
2022    <vscale x 4 x i64> %0,
2023    i64 %1,
2024    iXLen %2)
2025
2026  ret <vscale x 4 x i64> %a
2027}
2028
2029declare <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64(
2030  <vscale x 4 x i64>,
2031  <vscale x 4 x i64>,
2032  i64,
2033  <vscale x 4 x i1>,
2034  iXLen, iXLen);
2035
2036define <vscale x 4 x i64> @intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2037; RV32-LABEL: intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64:
2038; RV32:       # %bb.0: # %entry
2039; RV32-NEXT:    addi sp, sp, -16
2040; RV32-NEXT:    sw a0, 8(sp)
2041; RV32-NEXT:    sw a1, 12(sp)
2042; RV32-NEXT:    addi a0, sp, 8
2043; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
2044; RV32-NEXT:    vlse64.v v16, (a0), zero
2045; RV32-NEXT:    vmul.vv v8, v12, v16, v0.t
2046; RV32-NEXT:    addi sp, sp, 16
2047; RV32-NEXT:    ret
2048;
2049; RV64-LABEL: intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64:
2050; RV64:       # %bb.0: # %entry
2051; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
2052; RV64-NEXT:    vmul.vx v8, v12, a0, v0.t
2053; RV64-NEXT:    ret
2054entry:
2055  %a = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64(
2056    <vscale x 4 x i64> %0,
2057    <vscale x 4 x i64> %1,
2058    i64 %2,
2059    <vscale x 4 x i1> %3,
2060    iXLen %4, iXLen 1)
2061
2062  ret <vscale x 4 x i64> %a
2063}
2064
2065declare <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64(
2066  <vscale x 8 x i64>,
2067  <vscale x 8 x i64>,
2068  i64,
2069  iXLen);
2070
2071define <vscale x 8 x i64> @intrinsic_vmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
2072; RV32-LABEL: intrinsic_vmul_vx_nxv8i64_nxv8i64_i64:
2073; RV32:       # %bb.0: # %entry
2074; RV32-NEXT:    addi sp, sp, -16
2075; RV32-NEXT:    sw a0, 8(sp)
2076; RV32-NEXT:    sw a1, 12(sp)
2077; RV32-NEXT:    addi a0, sp, 8
2078; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
2079; RV32-NEXT:    vlse64.v v16, (a0), zero
2080; RV32-NEXT:    vmul.vv v8, v8, v16
2081; RV32-NEXT:    addi sp, sp, 16
2082; RV32-NEXT:    ret
2083;
2084; RV64-LABEL: intrinsic_vmul_vx_nxv8i64_nxv8i64_i64:
2085; RV64:       # %bb.0: # %entry
2086; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
2087; RV64-NEXT:    vmul.vx v8, v8, a0
2088; RV64-NEXT:    ret
2089entry:
2090  %a = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64(
2091    <vscale x 8 x i64> undef,
2092    <vscale x 8 x i64> %0,
2093    i64 %1,
2094    iXLen %2)
2095
2096  ret <vscale x 8 x i64> %a
2097}
2098
2099declare <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64(
2100  <vscale x 8 x i64>,
2101  <vscale x 8 x i64>,
2102  i64,
2103  <vscale x 8 x i1>,
2104  iXLen, iXLen);
2105
2106define <vscale x 8 x i64> @intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2107; RV32-LABEL: intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64:
2108; RV32:       # %bb.0: # %entry
2109; RV32-NEXT:    addi sp, sp, -16
2110; RV32-NEXT:    sw a0, 8(sp)
2111; RV32-NEXT:    sw a1, 12(sp)
2112; RV32-NEXT:    addi a0, sp, 8
2113; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
2114; RV32-NEXT:    vlse64.v v24, (a0), zero
2115; RV32-NEXT:    vmul.vv v8, v16, v24, v0.t
2116; RV32-NEXT:    addi sp, sp, 16
2117; RV32-NEXT:    ret
2118;
2119; RV64-LABEL: intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64:
2120; RV64:       # %bb.0: # %entry
2121; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
2122; RV64-NEXT:    vmul.vx v8, v16, a0, v0.t
2123; RV64-NEXT:    ret
2124entry:
2125  %a = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64(
2126    <vscale x 8 x i64> %0,
2127    <vscale x 8 x i64> %1,
2128    i64 %2,
2129    <vscale x 8 x i1> %3,
2130    iXLen %4, iXLen 1)
2131
2132  ret <vscale x 8 x i64> %a
2133}
2134