xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll (revision 36e4176f1d83d04cdebb4e1870561099b2478d80)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 8 x i7> @llvm.vp.mul.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
8
9define <vscale x 8 x i7> @vmul_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
10; CHECK-LABEL: vmul_vx_nxv8i7:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
13; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
14; CHECK-NEXT:    ret
15  %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
16  %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
17  %v = call <vscale x 8 x i7> @llvm.vp.mul.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
18  ret <vscale x 8 x i7> %v
19}
20
21declare <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
22
23define <vscale x 1 x i8> @vmul_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
24; CHECK-LABEL: vmul_vv_nxv1i8:
25; CHECK:       # %bb.0:
26; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
27; CHECK-NEXT:    vmul.vv v8, v8, v9, v0.t
28; CHECK-NEXT:    ret
29  %v = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
30  ret <vscale x 1 x i8> %v
31}
32
33define <vscale x 1 x i8> @vmul_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
34; CHECK-LABEL: vmul_vv_nxv1i8_unmasked:
35; CHECK:       # %bb.0:
36; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
37; CHECK-NEXT:    vmul.vv v8, v8, v9
38; CHECK-NEXT:    ret
39  %v = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
40  ret <vscale x 1 x i8> %v
41}
42
43define <vscale x 1 x i8> @vmul_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
44; CHECK-LABEL: vmul_vx_nxv1i8:
45; CHECK:       # %bb.0:
46; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
47; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
48; CHECK-NEXT:    ret
49  %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
50  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
51  %v = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
52  ret <vscale x 1 x i8> %v
53}
54
55define <vscale x 1 x i8> @vmul_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
56; CHECK-LABEL: vmul_vx_nxv1i8_unmasked:
57; CHECK:       # %bb.0:
58; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
59; CHECK-NEXT:    vmul.vx v8, v8, a0
60; CHECK-NEXT:    ret
61  %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
62  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
63  %v = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
64  ret <vscale x 1 x i8> %v
65}
66
67declare <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
68
69define <vscale x 2 x i8> @vmul_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
70; CHECK-LABEL: vmul_vv_nxv2i8:
71; CHECK:       # %bb.0:
72; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
73; CHECK-NEXT:    vmul.vv v8, v8, v9, v0.t
74; CHECK-NEXT:    ret
75  %v = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
76  ret <vscale x 2 x i8> %v
77}
78
79define <vscale x 2 x i8> @vmul_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
80; CHECK-LABEL: vmul_vv_nxv2i8_unmasked:
81; CHECK:       # %bb.0:
82; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
83; CHECK-NEXT:    vmul.vv v8, v8, v9
84; CHECK-NEXT:    ret
85  %v = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
86  ret <vscale x 2 x i8> %v
87}
88
89define <vscale x 2 x i8> @vmul_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
90; CHECK-LABEL: vmul_vx_nxv2i8:
91; CHECK:       # %bb.0:
92; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
93; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
94; CHECK-NEXT:    ret
95  %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
96  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
97  %v = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
98  ret <vscale x 2 x i8> %v
99}
100
101define <vscale x 2 x i8> @vmul_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
102; CHECK-LABEL: vmul_vx_nxv2i8_unmasked:
103; CHECK:       # %bb.0:
104; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
105; CHECK-NEXT:    vmul.vx v8, v8, a0
106; CHECK-NEXT:    ret
107  %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
108  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
109  %v = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
110  ret <vscale x 2 x i8> %v
111}
112
113declare <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
114
115define <vscale x 4 x i8> @vmul_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
116; CHECK-LABEL: vmul_vv_nxv4i8:
117; CHECK:       # %bb.0:
118; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
119; CHECK-NEXT:    vmul.vv v8, v8, v9, v0.t
120; CHECK-NEXT:    ret
121  %v = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
122  ret <vscale x 4 x i8> %v
123}
124
125define <vscale x 4 x i8> @vmul_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
126; CHECK-LABEL: vmul_vv_nxv4i8_unmasked:
127; CHECK:       # %bb.0:
128; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
129; CHECK-NEXT:    vmul.vv v8, v8, v9
130; CHECK-NEXT:    ret
131  %v = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
132  ret <vscale x 4 x i8> %v
133}
134
135define <vscale x 4 x i8> @vmul_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
136; CHECK-LABEL: vmul_vx_nxv4i8:
137; CHECK:       # %bb.0:
138; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
139; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
140; CHECK-NEXT:    ret
141  %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
142  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
143  %v = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
144  ret <vscale x 4 x i8> %v
145}
146
147define <vscale x 4 x i8> @vmul_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
148; CHECK-LABEL: vmul_vx_nxv4i8_unmasked:
149; CHECK:       # %bb.0:
150; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
151; CHECK-NEXT:    vmul.vx v8, v8, a0
152; CHECK-NEXT:    ret
153  %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
154  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
155  %v = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
156  ret <vscale x 4 x i8> %v
157}
158
159declare <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
160
161define <vscale x 8 x i8> @vmul_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
162; CHECK-LABEL: vmul_vv_nxv8i8:
163; CHECK:       # %bb.0:
164; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
165; CHECK-NEXT:    vmul.vv v8, v8, v9, v0.t
166; CHECK-NEXT:    ret
167  %v = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
168  ret <vscale x 8 x i8> %v
169}
170
171define <vscale x 8 x i8> @vmul_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
172; CHECK-LABEL: vmul_vv_nxv8i8_unmasked:
173; CHECK:       # %bb.0:
174; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
175; CHECK-NEXT:    vmul.vv v8, v8, v9
176; CHECK-NEXT:    ret
177  %v = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
178  ret <vscale x 8 x i8> %v
179}
180
181define <vscale x 8 x i8> @vmul_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
182; CHECK-LABEL: vmul_vx_nxv8i8:
183; CHECK:       # %bb.0:
184; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
185; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
186; CHECK-NEXT:    ret
187  %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
188  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
189  %v = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
190  ret <vscale x 8 x i8> %v
191}
192
193define <vscale x 8 x i8> @vmul_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
194; CHECK-LABEL: vmul_vx_nxv8i8_unmasked:
195; CHECK:       # %bb.0:
196; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
197; CHECK-NEXT:    vmul.vx v8, v8, a0
198; CHECK-NEXT:    ret
199  %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
200  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
201  %v = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
202  ret <vscale x 8 x i8> %v
203}
204
205declare <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
206
207define <vscale x 16 x i8> @vmul_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
208; CHECK-LABEL: vmul_vv_nxv16i8:
209; CHECK:       # %bb.0:
210; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
211; CHECK-NEXT:    vmul.vv v8, v8, v10, v0.t
212; CHECK-NEXT:    ret
213  %v = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
214  ret <vscale x 16 x i8> %v
215}
216
217define <vscale x 16 x i8> @vmul_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
218; CHECK-LABEL: vmul_vv_nxv16i8_unmasked:
219; CHECK:       # %bb.0:
220; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
221; CHECK-NEXT:    vmul.vv v8, v8, v10
222; CHECK-NEXT:    ret
223  %v = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
224  ret <vscale x 16 x i8> %v
225}
226
227define <vscale x 16 x i8> @vmul_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
228; CHECK-LABEL: vmul_vx_nxv16i8:
229; CHECK:       # %bb.0:
230; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
231; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
232; CHECK-NEXT:    ret
233  %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
234  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
235  %v = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
236  ret <vscale x 16 x i8> %v
237}
238
239define <vscale x 16 x i8> @vmul_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
240; CHECK-LABEL: vmul_vx_nxv16i8_unmasked:
241; CHECK:       # %bb.0:
242; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
243; CHECK-NEXT:    vmul.vx v8, v8, a0
244; CHECK-NEXT:    ret
245  %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
246  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
247  %v = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
248  ret <vscale x 16 x i8> %v
249}
250
251declare <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
252
253define <vscale x 32 x i8> @vmul_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
254; CHECK-LABEL: vmul_vv_nxv32i8:
255; CHECK:       # %bb.0:
256; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
257; CHECK-NEXT:    vmul.vv v8, v8, v12, v0.t
258; CHECK-NEXT:    ret
259  %v = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
260  ret <vscale x 32 x i8> %v
261}
262
263define <vscale x 32 x i8> @vmul_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
264; CHECK-LABEL: vmul_vv_nxv32i8_unmasked:
265; CHECK:       # %bb.0:
266; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
267; CHECK-NEXT:    vmul.vv v8, v8, v12
268; CHECK-NEXT:    ret
269  %v = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
270  ret <vscale x 32 x i8> %v
271}
272
273define <vscale x 32 x i8> @vmul_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
274; CHECK-LABEL: vmul_vx_nxv32i8:
275; CHECK:       # %bb.0:
276; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
277; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
278; CHECK-NEXT:    ret
279  %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
280  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
281  %v = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
282  ret <vscale x 32 x i8> %v
283}
284
285define <vscale x 32 x i8> @vmul_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
286; CHECK-LABEL: vmul_vx_nxv32i8_unmasked:
287; CHECK:       # %bb.0:
288; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
289; CHECK-NEXT:    vmul.vx v8, v8, a0
290; CHECK-NEXT:    ret
291  %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
292  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
293  %v = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
294  ret <vscale x 32 x i8> %v
295}
296
297declare <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
298
299define <vscale x 64 x i8> @vmul_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
300; CHECK-LABEL: vmul_vv_nxv64i8:
301; CHECK:       # %bb.0:
302; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
303; CHECK-NEXT:    vmul.vv v8, v8, v16, v0.t
304; CHECK-NEXT:    ret
305  %v = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
306  ret <vscale x 64 x i8> %v
307}
308
309define <vscale x 64 x i8> @vmul_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
310; CHECK-LABEL: vmul_vv_nxv64i8_unmasked:
311; CHECK:       # %bb.0:
312; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
313; CHECK-NEXT:    vmul.vv v8, v8, v16
314; CHECK-NEXT:    ret
315  %v = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
316  ret <vscale x 64 x i8> %v
317}
318
319define <vscale x 64 x i8> @vmul_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
320; CHECK-LABEL: vmul_vx_nxv64i8:
321; CHECK:       # %bb.0:
322; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
323; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
324; CHECK-NEXT:    ret
325  %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
326  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
327  %v = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
328  ret <vscale x 64 x i8> %v
329}
330
331define <vscale x 64 x i8> @vmul_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
332; CHECK-LABEL: vmul_vx_nxv64i8_unmasked:
333; CHECK:       # %bb.0:
334; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
335; CHECK-NEXT:    vmul.vx v8, v8, a0
336; CHECK-NEXT:    ret
337  %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
338  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
339  %v = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
340  ret <vscale x 64 x i8> %v
341}
342
343declare <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
344
345define <vscale x 1 x i16> @vmul_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
346; CHECK-LABEL: vmul_vv_nxv1i16:
347; CHECK:       # %bb.0:
348; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
349; CHECK-NEXT:    vmul.vv v8, v8, v9, v0.t
350; CHECK-NEXT:    ret
351  %v = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
352  ret <vscale x 1 x i16> %v
353}
354
355define <vscale x 1 x i16> @vmul_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
356; CHECK-LABEL: vmul_vv_nxv1i16_unmasked:
357; CHECK:       # %bb.0:
358; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
359; CHECK-NEXT:    vmul.vv v8, v8, v9
360; CHECK-NEXT:    ret
361  %v = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
362  ret <vscale x 1 x i16> %v
363}
364
365define <vscale x 1 x i16> @vmul_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
366; CHECK-LABEL: vmul_vx_nxv1i16:
367; CHECK:       # %bb.0:
368; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
369; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
370; CHECK-NEXT:    ret
371  %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
372  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
373  %v = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
374  ret <vscale x 1 x i16> %v
375}
376
377define <vscale x 1 x i16> @vmul_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
378; CHECK-LABEL: vmul_vx_nxv1i16_unmasked:
379; CHECK:       # %bb.0:
380; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
381; CHECK-NEXT:    vmul.vx v8, v8, a0
382; CHECK-NEXT:    ret
383  %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
384  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
385  %v = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
386  ret <vscale x 1 x i16> %v
387}
388
389declare <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
390
391define <vscale x 2 x i16> @vmul_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
392; CHECK-LABEL: vmul_vv_nxv2i16:
393; CHECK:       # %bb.0:
394; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
395; CHECK-NEXT:    vmul.vv v8, v8, v9, v0.t
396; CHECK-NEXT:    ret
397  %v = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
398  ret <vscale x 2 x i16> %v
399}
400
401define <vscale x 2 x i16> @vmul_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
402; CHECK-LABEL: vmul_vv_nxv2i16_unmasked:
403; CHECK:       # %bb.0:
404; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
405; CHECK-NEXT:    vmul.vv v8, v8, v9
406; CHECK-NEXT:    ret
407  %v = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
408  ret <vscale x 2 x i16> %v
409}
410
411define <vscale x 2 x i16> @vmul_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
412; CHECK-LABEL: vmul_vx_nxv2i16:
413; CHECK:       # %bb.0:
414; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
415; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
416; CHECK-NEXT:    ret
417  %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
418  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
419  %v = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
420  ret <vscale x 2 x i16> %v
421}
422
423define <vscale x 2 x i16> @vmul_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
424; CHECK-LABEL: vmul_vx_nxv2i16_unmasked:
425; CHECK:       # %bb.0:
426; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
427; CHECK-NEXT:    vmul.vx v8, v8, a0
428; CHECK-NEXT:    ret
429  %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
430  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
431  %v = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
432  ret <vscale x 2 x i16> %v
433}
434
435declare <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
436
437define <vscale x 4 x i16> @vmul_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
438; CHECK-LABEL: vmul_vv_nxv4i16:
439; CHECK:       # %bb.0:
440; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
441; CHECK-NEXT:    vmul.vv v8, v8, v9, v0.t
442; CHECK-NEXT:    ret
443  %v = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
444  ret <vscale x 4 x i16> %v
445}
446
447define <vscale x 4 x i16> @vmul_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
448; CHECK-LABEL: vmul_vv_nxv4i16_unmasked:
449; CHECK:       # %bb.0:
450; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
451; CHECK-NEXT:    vmul.vv v8, v8, v9
452; CHECK-NEXT:    ret
453  %v = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
454  ret <vscale x 4 x i16> %v
455}
456
457define <vscale x 4 x i16> @vmul_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
458; CHECK-LABEL: vmul_vx_nxv4i16:
459; CHECK:       # %bb.0:
460; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
461; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
462; CHECK-NEXT:    ret
463  %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
464  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
465  %v = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
466  ret <vscale x 4 x i16> %v
467}
468
469define <vscale x 4 x i16> @vmul_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
470; CHECK-LABEL: vmul_vx_nxv4i16_unmasked:
471; CHECK:       # %bb.0:
472; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
473; CHECK-NEXT:    vmul.vx v8, v8, a0
474; CHECK-NEXT:    ret
475  %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
476  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
477  %v = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
478  ret <vscale x 4 x i16> %v
479}
480
481declare <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
482
483define <vscale x 8 x i16> @vmul_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
484; CHECK-LABEL: vmul_vv_nxv8i16:
485; CHECK:       # %bb.0:
486; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
487; CHECK-NEXT:    vmul.vv v8, v8, v10, v0.t
488; CHECK-NEXT:    ret
489  %v = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
490  ret <vscale x 8 x i16> %v
491}
492
493define <vscale x 8 x i16> @vmul_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
494; CHECK-LABEL: vmul_vv_nxv8i16_unmasked:
495; CHECK:       # %bb.0:
496; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
497; CHECK-NEXT:    vmul.vv v8, v8, v10
498; CHECK-NEXT:    ret
499  %v = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
500  ret <vscale x 8 x i16> %v
501}
502
503define <vscale x 8 x i16> @vmul_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
504; CHECK-LABEL: vmul_vx_nxv8i16:
505; CHECK:       # %bb.0:
506; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
507; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
508; CHECK-NEXT:    ret
509  %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
510  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
511  %v = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
512  ret <vscale x 8 x i16> %v
513}
514
515define <vscale x 8 x i16> @vmul_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
516; CHECK-LABEL: vmul_vx_nxv8i16_unmasked:
517; CHECK:       # %bb.0:
518; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
519; CHECK-NEXT:    vmul.vx v8, v8, a0
520; CHECK-NEXT:    ret
521  %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
522  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
523  %v = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
524  ret <vscale x 8 x i16> %v
525}
526
527declare <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
528
529define <vscale x 16 x i16> @vmul_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
530; CHECK-LABEL: vmul_vv_nxv16i16:
531; CHECK:       # %bb.0:
532; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
533; CHECK-NEXT:    vmul.vv v8, v8, v12, v0.t
534; CHECK-NEXT:    ret
535  %v = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
536  ret <vscale x 16 x i16> %v
537}
538
539define <vscale x 16 x i16> @vmul_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
540; CHECK-LABEL: vmul_vv_nxv16i16_unmasked:
541; CHECK:       # %bb.0:
542; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
543; CHECK-NEXT:    vmul.vv v8, v8, v12
544; CHECK-NEXT:    ret
545  %v = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
546  ret <vscale x 16 x i16> %v
547}
548
549define <vscale x 16 x i16> @vmul_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
550; CHECK-LABEL: vmul_vx_nxv16i16:
551; CHECK:       # %bb.0:
552; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
553; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
554; CHECK-NEXT:    ret
555  %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
556  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
557  %v = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
558  ret <vscale x 16 x i16> %v
559}
560
561define <vscale x 16 x i16> @vmul_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
562; CHECK-LABEL: vmul_vx_nxv16i16_unmasked:
563; CHECK:       # %bb.0:
564; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
565; CHECK-NEXT:    vmul.vx v8, v8, a0
566; CHECK-NEXT:    ret
567  %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
568  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
569  %v = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
570  ret <vscale x 16 x i16> %v
571}
572
573declare <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
574
575define <vscale x 32 x i16> @vmul_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
576; CHECK-LABEL: vmul_vv_nxv32i16:
577; CHECK:       # %bb.0:
578; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
579; CHECK-NEXT:    vmul.vv v8, v8, v16, v0.t
580; CHECK-NEXT:    ret
581  %v = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
582  ret <vscale x 32 x i16> %v
583}
584
585define <vscale x 32 x i16> @vmul_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
586; CHECK-LABEL: vmul_vv_nxv32i16_unmasked:
587; CHECK:       # %bb.0:
588; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
589; CHECK-NEXT:    vmul.vv v8, v8, v16
590; CHECK-NEXT:    ret
591  %v = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
592  ret <vscale x 32 x i16> %v
593}
594
595define <vscale x 32 x i16> @vmul_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
596; CHECK-LABEL: vmul_vx_nxv32i16:
597; CHECK:       # %bb.0:
598; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
599; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
600; CHECK-NEXT:    ret
601  %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
602  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
603  %v = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
604  ret <vscale x 32 x i16> %v
605}
606
607define <vscale x 32 x i16> @vmul_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
608; CHECK-LABEL: vmul_vx_nxv32i16_unmasked:
609; CHECK:       # %bb.0:
610; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
611; CHECK-NEXT:    vmul.vx v8, v8, a0
612; CHECK-NEXT:    ret
613  %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
614  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
615  %v = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
616  ret <vscale x 32 x i16> %v
617}
618
619declare <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
620
621define <vscale x 1 x i32> @vmul_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
622; CHECK-LABEL: vmul_vv_nxv1i32:
623; CHECK:       # %bb.0:
624; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
625; CHECK-NEXT:    vmul.vv v8, v8, v9, v0.t
626; CHECK-NEXT:    ret
627  %v = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
628  ret <vscale x 1 x i32> %v
629}
630
631define <vscale x 1 x i32> @vmul_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
632; CHECK-LABEL: vmul_vv_nxv1i32_unmasked:
633; CHECK:       # %bb.0:
634; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
635; CHECK-NEXT:    vmul.vv v8, v8, v9
636; CHECK-NEXT:    ret
637  %v = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
638  ret <vscale x 1 x i32> %v
639}
640
641define <vscale x 1 x i32> @vmul_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
642; CHECK-LABEL: vmul_vx_nxv1i32:
643; CHECK:       # %bb.0:
644; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
645; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
646; CHECK-NEXT:    ret
647  %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
648  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
649  %v = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
650  ret <vscale x 1 x i32> %v
651}
652
653define <vscale x 1 x i32> @vmul_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
654; CHECK-LABEL: vmul_vx_nxv1i32_unmasked:
655; CHECK:       # %bb.0:
656; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
657; CHECK-NEXT:    vmul.vx v8, v8, a0
658; CHECK-NEXT:    ret
659  %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
660  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
661  %v = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
662  ret <vscale x 1 x i32> %v
663}
664
665declare <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
666
667define <vscale x 2 x i32> @vmul_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
668; CHECK-LABEL: vmul_vv_nxv2i32:
669; CHECK:       # %bb.0:
670; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
671; CHECK-NEXT:    vmul.vv v8, v8, v9, v0.t
672; CHECK-NEXT:    ret
673  %v = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
674  ret <vscale x 2 x i32> %v
675}
676
677define <vscale x 2 x i32> @vmul_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
678; CHECK-LABEL: vmul_vv_nxv2i32_unmasked:
679; CHECK:       # %bb.0:
680; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
681; CHECK-NEXT:    vmul.vv v8, v8, v9
682; CHECK-NEXT:    ret
683  %v = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
684  ret <vscale x 2 x i32> %v
685}
686
687define <vscale x 2 x i32> @vmul_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
688; CHECK-LABEL: vmul_vx_nxv2i32:
689; CHECK:       # %bb.0:
690; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
691; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
692; CHECK-NEXT:    ret
693  %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
694  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
695  %v = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
696  ret <vscale x 2 x i32> %v
697}
698
699define <vscale x 2 x i32> @vmul_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
700; CHECK-LABEL: vmul_vx_nxv2i32_unmasked:
701; CHECK:       # %bb.0:
702; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
703; CHECK-NEXT:    vmul.vx v8, v8, a0
704; CHECK-NEXT:    ret
705  %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
706  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
707  %v = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
708  ret <vscale x 2 x i32> %v
709}
710
711declare <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
712
713define <vscale x 4 x i32> @vmul_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
714; CHECK-LABEL: vmul_vv_nxv4i32:
715; CHECK:       # %bb.0:
716; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
717; CHECK-NEXT:    vmul.vv v8, v8, v10, v0.t
718; CHECK-NEXT:    ret
719  %v = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
720  ret <vscale x 4 x i32> %v
721}
722
723define <vscale x 4 x i32> @vmul_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
724; CHECK-LABEL: vmul_vv_nxv4i32_unmasked:
725; CHECK:       # %bb.0:
726; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
727; CHECK-NEXT:    vmul.vv v8, v8, v10
728; CHECK-NEXT:    ret
729  %v = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
730  ret <vscale x 4 x i32> %v
731}
732
733define <vscale x 4 x i32> @vmul_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
734; CHECK-LABEL: vmul_vx_nxv4i32:
735; CHECK:       # %bb.0:
736; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
737; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
738; CHECK-NEXT:    ret
739  %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
740  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
741  %v = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
742  ret <vscale x 4 x i32> %v
743}
744
745define <vscale x 4 x i32> @vmul_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
746; CHECK-LABEL: vmul_vx_nxv4i32_unmasked:
747; CHECK:       # %bb.0:
748; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
749; CHECK-NEXT:    vmul.vx v8, v8, a0
750; CHECK-NEXT:    ret
751  %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
752  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
753  %v = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
754  ret <vscale x 4 x i32> %v
755}
756
757declare <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32>, <vscale x 7 x i32>, <vscale x 7 x i1>, i32)
758
759define <vscale x 7 x i32> @vmul_vv_nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %b, <vscale x 7 x i1> %m, i32 zeroext %evl) {
760; CHECK-LABEL: vmul_vv_nxv7i32:
761; CHECK:       # %bb.0:
762; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
763; CHECK-NEXT:    vmul.vv v8, v8, v12, v0.t
764; CHECK-NEXT:    ret
765  %v = call <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %b, <vscale x 7 x i1> %m, i32 %evl)
766  ret <vscale x 7 x i32> %v
767}
768
769define <vscale x 7 x i32> @vmul_vv_nxv7i32_unmasked(<vscale x 7 x i32> %va, <vscale x 7 x i32> %b, i32 zeroext %evl) {
770; CHECK-LABEL: vmul_vv_nxv7i32_unmasked:
771; CHECK:       # %bb.0:
772; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
773; CHECK-NEXT:    vmul.vv v8, v8, v12
774; CHECK-NEXT:    ret
775  %v = call <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %b, <vscale x 7 x i1> splat (i1 true), i32 %evl)
776  ret <vscale x 7 x i32> %v
777}
778
779define <vscale x 7 x i32> @vmul_vx_nxv7i32(<vscale x 7 x i32> %va, i32 %b, <vscale x 7 x i1> %m, i32 zeroext %evl) {
780; CHECK-LABEL: vmul_vx_nxv7i32:
781; CHECK:       # %bb.0:
782; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
783; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
784; CHECK-NEXT:    ret
785  %elt.head = insertelement <vscale x 7 x i32> poison, i32 %b, i32 0
786  %vb = shufflevector <vscale x 7 x i32> %elt.head, <vscale x 7 x i32> poison, <vscale x 7 x i32> zeroinitializer
787  %v = call <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %vb, <vscale x 7 x i1> %m, i32 %evl)
788  ret <vscale x 7 x i32> %v
789}
790
791define <vscale x 7 x i32> @vmul_vx_nxv7i32_unmasked(<vscale x 7 x i32> %va, i32 %b, i32 zeroext %evl) {
792; CHECK-LABEL: vmul_vx_nxv7i32_unmasked:
793; CHECK:       # %bb.0:
794; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
795; CHECK-NEXT:    vmul.vx v8, v8, a0
796; CHECK-NEXT:    ret
797  %elt.head = insertelement <vscale x 7 x i32> poison, i32 %b, i32 0
798  %vb = shufflevector <vscale x 7 x i32> %elt.head, <vscale x 7 x i32> poison, <vscale x 7 x i32> zeroinitializer
799  %v = call <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %vb, <vscale x 7 x i1> splat (i1 true), i32 %evl)
800  ret <vscale x 7 x i32> %v
801}
802
803declare <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
804
805define <vscale x 8 x i32> @vmul_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
806; CHECK-LABEL: vmul_vv_nxv8i32:
807; CHECK:       # %bb.0:
808; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
809; CHECK-NEXT:    vmul.vv v8, v8, v12, v0.t
810; CHECK-NEXT:    ret
811  %v = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
812  ret <vscale x 8 x i32> %v
813}
814
815define <vscale x 8 x i32> @vmul_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
816; CHECK-LABEL: vmul_vv_nxv8i32_unmasked:
817; CHECK:       # %bb.0:
818; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
819; CHECK-NEXT:    vmul.vv v8, v8, v12
820; CHECK-NEXT:    ret
821  %v = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
822  ret <vscale x 8 x i32> %v
823}
824
825define <vscale x 8 x i32> @vmul_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
826; CHECK-LABEL: vmul_vx_nxv8i32:
827; CHECK:       # %bb.0:
828; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
829; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
830; CHECK-NEXT:    ret
831  %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
832  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
833  %v = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
834  ret <vscale x 8 x i32> %v
835}
836
837define <vscale x 8 x i32> @vmul_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
838; CHECK-LABEL: vmul_vx_nxv8i32_unmasked:
839; CHECK:       # %bb.0:
840; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
841; CHECK-NEXT:    vmul.vx v8, v8, a0
842; CHECK-NEXT:    ret
843  %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
844  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
845  %v = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
846  ret <vscale x 8 x i32> %v
847}
848
849declare <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
850
851define <vscale x 16 x i32> @vmul_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
852; CHECK-LABEL: vmul_vv_nxv16i32:
853; CHECK:       # %bb.0:
854; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
855; CHECK-NEXT:    vmul.vv v8, v8, v16, v0.t
856; CHECK-NEXT:    ret
857  %v = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
858  ret <vscale x 16 x i32> %v
859}
860
861define <vscale x 16 x i32> @vmul_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
862; CHECK-LABEL: vmul_vv_nxv16i32_unmasked:
863; CHECK:       # %bb.0:
864; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
865; CHECK-NEXT:    vmul.vv v8, v8, v16
866; CHECK-NEXT:    ret
867  %v = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
868  ret <vscale x 16 x i32> %v
869}
870
871define <vscale x 16 x i32> @vmul_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
872; CHECK-LABEL: vmul_vx_nxv16i32:
873; CHECK:       # %bb.0:
874; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
875; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
876; CHECK-NEXT:    ret
877  %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
878  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
879  %v = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
880  ret <vscale x 16 x i32> %v
881}
882
883define <vscale x 16 x i32> @vmul_vx_nxv16i32_commute(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
884; CHECK-LABEL: vmul_vx_nxv16i32_commute:
885; CHECK:       # %bb.0:
886; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
887; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
888; CHECK-NEXT:    ret
889  %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
890  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
891  %v = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %vb, <vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 %evl)
892  ret <vscale x 16 x i32> %v
893}
894
895define <vscale x 16 x i32> @vmul_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
896; CHECK-LABEL: vmul_vx_nxv16i32_unmasked:
897; CHECK:       # %bb.0:
898; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
899; CHECK-NEXT:    vmul.vx v8, v8, a0
900; CHECK-NEXT:    ret
901  %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
902  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
903  %v = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
904  ret <vscale x 16 x i32> %v
905}
906
907declare <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
908
909define <vscale x 1 x i64> @vmul_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
910; CHECK-LABEL: vmul_vv_nxv1i64:
911; CHECK:       # %bb.0:
912; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
913; CHECK-NEXT:    vmul.vv v8, v8, v9, v0.t
914; CHECK-NEXT:    ret
915  %v = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
916  ret <vscale x 1 x i64> %v
917}
918
919define <vscale x 1 x i64> @vmul_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
920; CHECK-LABEL: vmul_vv_nxv1i64_unmasked:
921; CHECK:       # %bb.0:
922; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
923; CHECK-NEXT:    vmul.vv v8, v8, v9
924; CHECK-NEXT:    ret
925  %v = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
926  ret <vscale x 1 x i64> %v
927}
928
929define <vscale x 1 x i64> @vmul_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
930; RV32-LABEL: vmul_vx_nxv1i64:
931; RV32:       # %bb.0:
932; RV32-NEXT:    addi sp, sp, -16
933; RV32-NEXT:    .cfi_def_cfa_offset 16
934; RV32-NEXT:    sw a0, 8(sp)
935; RV32-NEXT:    sw a1, 12(sp)
936; RV32-NEXT:    addi a0, sp, 8
937; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
938; RV32-NEXT:    vlse64.v v9, (a0), zero
939; RV32-NEXT:    vmul.vv v8, v8, v9, v0.t
940; RV32-NEXT:    addi sp, sp, 16
941; RV32-NEXT:    .cfi_def_cfa_offset 0
942; RV32-NEXT:    ret
943;
944; RV64-LABEL: vmul_vx_nxv1i64:
945; RV64:       # %bb.0:
946; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
947; RV64-NEXT:    vmul.vx v8, v8, a0, v0.t
948; RV64-NEXT:    ret
949  %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
950  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
951  %v = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
952  ret <vscale x 1 x i64> %v
953}
954
955define <vscale x 1 x i64> @vmul_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
956; RV32-LABEL: vmul_vx_nxv1i64_unmasked:
957; RV32:       # %bb.0:
958; RV32-NEXT:    addi sp, sp, -16
959; RV32-NEXT:    .cfi_def_cfa_offset 16
960; RV32-NEXT:    sw a0, 8(sp)
961; RV32-NEXT:    sw a1, 12(sp)
962; RV32-NEXT:    addi a0, sp, 8
963; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
964; RV32-NEXT:    vlse64.v v9, (a0), zero
965; RV32-NEXT:    vmul.vv v8, v8, v9
966; RV32-NEXT:    addi sp, sp, 16
967; RV32-NEXT:    .cfi_def_cfa_offset 0
968; RV32-NEXT:    ret
969;
970; RV64-LABEL: vmul_vx_nxv1i64_unmasked:
971; RV64:       # %bb.0:
972; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
973; RV64-NEXT:    vmul.vx v8, v8, a0
974; RV64-NEXT:    ret
975  %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
976  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
977  %v = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
978  ret <vscale x 1 x i64> %v
979}
980
981declare <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
982
983define <vscale x 2 x i64> @vmul_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
984; CHECK-LABEL: vmul_vv_nxv2i64:
985; CHECK:       # %bb.0:
986; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
987; CHECK-NEXT:    vmul.vv v8, v8, v10, v0.t
988; CHECK-NEXT:    ret
989  %v = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
990  ret <vscale x 2 x i64> %v
991}
992
993define <vscale x 2 x i64> @vmul_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
994; CHECK-LABEL: vmul_vv_nxv2i64_unmasked:
995; CHECK:       # %bb.0:
996; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
997; CHECK-NEXT:    vmul.vv v8, v8, v10
998; CHECK-NEXT:    ret
999  %v = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1000  ret <vscale x 2 x i64> %v
1001}
1002
1003define <vscale x 2 x i64> @vmul_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1004; RV32-LABEL: vmul_vx_nxv2i64:
1005; RV32:       # %bb.0:
1006; RV32-NEXT:    addi sp, sp, -16
1007; RV32-NEXT:    .cfi_def_cfa_offset 16
1008; RV32-NEXT:    sw a0, 8(sp)
1009; RV32-NEXT:    sw a1, 12(sp)
1010; RV32-NEXT:    addi a0, sp, 8
1011; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
1012; RV32-NEXT:    vlse64.v v10, (a0), zero
1013; RV32-NEXT:    vmul.vv v8, v8, v10, v0.t
1014; RV32-NEXT:    addi sp, sp, 16
1015; RV32-NEXT:    .cfi_def_cfa_offset 0
1016; RV32-NEXT:    ret
1017;
1018; RV64-LABEL: vmul_vx_nxv2i64:
1019; RV64:       # %bb.0:
1020; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1021; RV64-NEXT:    vmul.vx v8, v8, a0, v0.t
1022; RV64-NEXT:    ret
1023  %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1024  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1025  %v = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
1026  ret <vscale x 2 x i64> %v
1027}
1028
1029define <vscale x 2 x i64> @vmul_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
1030; RV32-LABEL: vmul_vx_nxv2i64_unmasked:
1031; RV32:       # %bb.0:
1032; RV32-NEXT:    addi sp, sp, -16
1033; RV32-NEXT:    .cfi_def_cfa_offset 16
1034; RV32-NEXT:    sw a0, 8(sp)
1035; RV32-NEXT:    sw a1, 12(sp)
1036; RV32-NEXT:    addi a0, sp, 8
1037; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
1038; RV32-NEXT:    vlse64.v v10, (a0), zero
1039; RV32-NEXT:    vmul.vv v8, v8, v10
1040; RV32-NEXT:    addi sp, sp, 16
1041; RV32-NEXT:    .cfi_def_cfa_offset 0
1042; RV32-NEXT:    ret
1043;
1044; RV64-LABEL: vmul_vx_nxv2i64_unmasked:
1045; RV64:       # %bb.0:
1046; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1047; RV64-NEXT:    vmul.vx v8, v8, a0
1048; RV64-NEXT:    ret
1049  %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1050  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1051  %v = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1052  ret <vscale x 2 x i64> %v
1053}
1054
1055declare <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1056
1057define <vscale x 4 x i64> @vmul_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1058; CHECK-LABEL: vmul_vv_nxv4i64:
1059; CHECK:       # %bb.0:
1060; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1061; CHECK-NEXT:    vmul.vv v8, v8, v12, v0.t
1062; CHECK-NEXT:    ret
1063  %v = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
1064  ret <vscale x 4 x i64> %v
1065}
1066
1067define <vscale x 4 x i64> @vmul_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
1068; CHECK-LABEL: vmul_vv_nxv4i64_unmasked:
1069; CHECK:       # %bb.0:
1070; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1071; CHECK-NEXT:    vmul.vv v8, v8, v12
1072; CHECK-NEXT:    ret
1073  %v = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1074  ret <vscale x 4 x i64> %v
1075}
1076
1077define <vscale x 4 x i64> @vmul_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1078; RV32-LABEL: vmul_vx_nxv4i64:
1079; RV32:       # %bb.0:
1080; RV32-NEXT:    addi sp, sp, -16
1081; RV32-NEXT:    .cfi_def_cfa_offset 16
1082; RV32-NEXT:    sw a0, 8(sp)
1083; RV32-NEXT:    sw a1, 12(sp)
1084; RV32-NEXT:    addi a0, sp, 8
1085; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1086; RV32-NEXT:    vlse64.v v12, (a0), zero
1087; RV32-NEXT:    vmul.vv v8, v8, v12, v0.t
1088; RV32-NEXT:    addi sp, sp, 16
1089; RV32-NEXT:    .cfi_def_cfa_offset 0
1090; RV32-NEXT:    ret
1091;
1092; RV64-LABEL: vmul_vx_nxv4i64:
1093; RV64:       # %bb.0:
1094; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1095; RV64-NEXT:    vmul.vx v8, v8, a0, v0.t
1096; RV64-NEXT:    ret
1097  %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1098  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1099  %v = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
1100  ret <vscale x 4 x i64> %v
1101}
1102
1103define <vscale x 4 x i64> @vmul_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
1104; RV32-LABEL: vmul_vx_nxv4i64_unmasked:
1105; RV32:       # %bb.0:
1106; RV32-NEXT:    addi sp, sp, -16
1107; RV32-NEXT:    .cfi_def_cfa_offset 16
1108; RV32-NEXT:    sw a0, 8(sp)
1109; RV32-NEXT:    sw a1, 12(sp)
1110; RV32-NEXT:    addi a0, sp, 8
1111; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1112; RV32-NEXT:    vlse64.v v12, (a0), zero
1113; RV32-NEXT:    vmul.vv v8, v8, v12
1114; RV32-NEXT:    addi sp, sp, 16
1115; RV32-NEXT:    .cfi_def_cfa_offset 0
1116; RV32-NEXT:    ret
1117;
1118; RV64-LABEL: vmul_vx_nxv4i64_unmasked:
1119; RV64:       # %bb.0:
1120; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1121; RV64-NEXT:    vmul.vx v8, v8, a0
1122; RV64-NEXT:    ret
1123  %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1124  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1125  %v = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1126  ret <vscale x 4 x i64> %v
1127}
1128
1129declare <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1130
1131define <vscale x 8 x i64> @vmul_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1132; CHECK-LABEL: vmul_vv_nxv8i64:
1133; CHECK:       # %bb.0:
1134; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1135; CHECK-NEXT:    vmul.vv v8, v8, v16, v0.t
1136; CHECK-NEXT:    ret
1137  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
1138  ret <vscale x 8 x i64> %v
1139}
1140
1141define <vscale x 8 x i64> @vmul_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
1142; CHECK-LABEL: vmul_vv_nxv8i64_unmasked:
1143; CHECK:       # %bb.0:
1144; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1145; CHECK-NEXT:    vmul.vv v8, v8, v16
1146; CHECK-NEXT:    ret
1147  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1148  ret <vscale x 8 x i64> %v
1149}
1150
1151define <vscale x 8 x i64> @vmul_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1152; RV32-LABEL: vmul_vx_nxv8i64:
1153; RV32:       # %bb.0:
1154; RV32-NEXT:    addi sp, sp, -16
1155; RV32-NEXT:    .cfi_def_cfa_offset 16
1156; RV32-NEXT:    sw a0, 8(sp)
1157; RV32-NEXT:    sw a1, 12(sp)
1158; RV32-NEXT:    addi a0, sp, 8
1159; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1160; RV32-NEXT:    vlse64.v v16, (a0), zero
1161; RV32-NEXT:    vmul.vv v8, v8, v16, v0.t
1162; RV32-NEXT:    addi sp, sp, 16
1163; RV32-NEXT:    .cfi_def_cfa_offset 0
1164; RV32-NEXT:    ret
1165;
1166; RV64-LABEL: vmul_vx_nxv8i64:
1167; RV64:       # %bb.0:
1168; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1169; RV64-NEXT:    vmul.vx v8, v8, a0, v0.t
1170; RV64-NEXT:    ret
1171  %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1172  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1173  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1174  ret <vscale x 8 x i64> %v
1175}
1176
1177define <vscale x 8 x i64> @vmul_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
1178; RV32-LABEL: vmul_vx_nxv8i64_unmasked:
1179; RV32:       # %bb.0:
1180; RV32-NEXT:    addi sp, sp, -16
1181; RV32-NEXT:    .cfi_def_cfa_offset 16
1182; RV32-NEXT:    sw a0, 8(sp)
1183; RV32-NEXT:    sw a1, 12(sp)
1184; RV32-NEXT:    addi a0, sp, 8
1185; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1186; RV32-NEXT:    vlse64.v v16, (a0), zero
1187; RV32-NEXT:    vmul.vv v8, v8, v16
1188; RV32-NEXT:    addi sp, sp, 16
1189; RV32-NEXT:    .cfi_def_cfa_offset 0
1190; RV32-NEXT:    ret
1191;
1192; RV64-LABEL: vmul_vx_nxv8i64_unmasked:
1193; RV64:       # %bb.0:
1194; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1195; RV64-NEXT:    vmul.vx v8, v8, a0
1196; RV64-NEXT:    ret
1197  %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1198  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1199  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1200  ret <vscale x 8 x i64> %v
1201}
1202
1203define <vscale x 8 x i64> @vmul_vv_undef_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1204; CHECK-LABEL: vmul_vv_undef_nxv8i64:
1205; CHECK:       # %bb.0:
1206; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
1207; CHECK-NEXT:    vmv.v.i v8, 0
1208; CHECK-NEXT:    ret
1209  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> undef, <vscale x 8 x i1> %m, i32 %evl)
1210  ret <vscale x 8 x i64> %v
1211}
1212
1213define <vscale x 8 x i64> @vmul_vx_undef_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1214; CHECK-LABEL: vmul_vx_undef_nxv8i64_unmasked:
1215; CHECK:       # %bb.0:
1216; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
1217; CHECK-NEXT:    vmv.v.i v8, 0
1218; CHECK-NEXT:    ret
1219  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1220  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1221  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> undef, <vscale x 8 x i1> %m, i32 %evl)
1222  ret <vscale x 8 x i64> %v
1223}
1224
1225define <vscale x 8 x i64> @vmul_vx_zero_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1226; CHECK-LABEL: vmul_vx_zero_nxv8i64:
1227; CHECK:       # %bb.0:
1228; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
1229; CHECK-NEXT:    vmv.v.i v8, 0
1230; CHECK-NEXT:    ret
1231  %elt.head = insertelement <vscale x 8 x i64> poison, i64 0, i32 0
1232  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1233  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1234  ret <vscale x 8 x i64> %v
1235}
1236
1237define <vscale x 8 x i64> @vmul_vx_zero_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1238; CHECK-LABEL: vmul_vx_zero_nxv8i64_unmasked:
1239; CHECK:       # %bb.0:
1240; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
1241; CHECK-NEXT:    vmv.v.i v8, 0
1242; CHECK-NEXT:    ret
1243  %elt.head = insertelement <vscale x 8 x i64> poison, i64 0, i32 0
1244  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1245  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1246  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1247  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1248  ret <vscale x 8 x i64> %v
1249}
1250
1251define <vscale x 8 x i64> @vmul_vx_one_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1252; CHECK-LABEL: vmul_vx_one_nxv8i64:
1253; CHECK:       # %bb.0:
1254; CHECK-NEXT:    ret
1255  %elt.head = insertelement <vscale x 8 x i64> poison, i64 1, i32 0
1256  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1257  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1258  ret <vscale x 8 x i64> %v
1259}
1260
1261define <vscale x 8 x i64> @vmul_vx_one_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1262; CHECK-LABEL: vmul_vx_one_nxv8i64_unmasked:
1263; CHECK:       # %bb.0:
1264; CHECK-NEXT:    ret
1265  %elt.head = insertelement <vscale x 8 x i64> poison, i64 1, i32 0
1266  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1267  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1268  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1269  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1270  ret <vscale x 8 x i64> %v
1271}
1272
1273define <vscale x 8 x i64> @vmul_vx_negone_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1274; CHECK-LABEL: vmul_vx_negone_nxv8i64:
1275; CHECK:       # %bb.0:
1276; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1277; CHECK-NEXT:    vrsub.vi v8, v8, 0, v0.t
1278; CHECK-NEXT:    ret
1279  %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
1280  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1281  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1282  ret <vscale x 8 x i64> %v
1283}
1284
1285define <vscale x 8 x i64> @vmul_vx_negone_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1286; CHECK-LABEL: vmul_vx_negone_nxv8i64_unmasked:
1287; CHECK:       # %bb.0:
1288; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1289; CHECK-NEXT:    vrsub.vi v8, v8, 0
1290; CHECK-NEXT:    ret
1291  %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
1292  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1293  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1294  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1295  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1296  ret <vscale x 8 x i64> %v
1297}
1298
1299define <vscale x 8 x i64> @vmul_vx_pow2_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1300; CHECK-LABEL: vmul_vx_pow2_nxv8i64:
1301; CHECK:       # %bb.0:
1302; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1303; CHECK-NEXT:    vsll.vi v8, v8, 6, v0.t
1304; CHECK-NEXT:    ret
1305  %elt.head = insertelement <vscale x 8 x i64> poison, i64 64, i32 0
1306  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1307  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1308  ret <vscale x 8 x i64> %v
1309}
1310
1311define <vscale x 8 x i64> @vmul_vx_pow2_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1312; CHECK-LABEL: vmul_vx_pow2_nxv8i64_unmasked:
1313; CHECK:       # %bb.0:
1314; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1315; CHECK-NEXT:    vsll.vi v8, v8, 6
1316; CHECK-NEXT:    ret
1317  %elt.head = insertelement <vscale x 8 x i64> poison, i64 64, i32 0
1318  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1319  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1320  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1321  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1322  ret <vscale x 8 x i64> %v
1323}
1324
1325define <vscale x 8 x i64> @vmul_vx_negpow2_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1326; CHECK-LABEL: vmul_vx_negpow2_nxv8i64:
1327; CHECK:       # %bb.0:
1328; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1329; CHECK-NEXT:    vsll.vi v8, v8, 6, v0.t
1330; CHECK-NEXT:    vrsub.vi v8, v8, 0, v0.t
1331; CHECK-NEXT:    ret
1332  %elt.head = insertelement <vscale x 8 x i64> poison, i64 -64, i32 0
1333  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1334  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1335  ret <vscale x 8 x i64> %v
1336}
1337
1338define <vscale x 8 x i64> @vmul_vx_negpow2_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1339; CHECK-LABEL: vmul_vx_negpow2_nxv8i64_unmasked:
1340; CHECK:       # %bb.0:
1341; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1342; CHECK-NEXT:    vsll.vi v8, v8, 6
1343; CHECK-NEXT:    vrsub.vi v8, v8, 0
1344; CHECK-NEXT:    ret
1345  %elt.head = insertelement <vscale x 8 x i64> poison, i64 -64, i32 0
1346  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1347  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1348  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1349  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1350  ret <vscale x 8 x i64> %v
1351}
1352
1353declare <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1354
1355define <vscale x 8 x i64> @vmul_vshl_vx_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1356; CHECK-LABEL: vmul_vshl_vx_nxv8i64:
1357; CHECK:       # %bb.0:
1358; CHECK-NEXT:    li a0, 56
1359; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
1360; CHECK-NEXT:    vmul.vx v8, v8, a0
1361; CHECK-NEXT:    ret
1362  %elt.head1 = insertelement <vscale x 8 x i64> poison, i64 3, i32 0
1363  %vb = shufflevector <vscale x 8 x i64> %elt.head1, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1364  %elt.head2 = insertelement <vscale x 8 x i64> poison, i64 7, i32 0
1365  %vc = shufflevector <vscale x 8 x i64> %elt.head2, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1366  %vshl = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1367  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %vshl, <vscale x 8 x i64> %vc, <vscale x 8 x i1> %m, i32 %evl)
1368  ret <vscale x 8 x i64> %v
1369}
1370
1371define <vscale x 8 x i64> @vmul_vshl_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1372; CHECK-LABEL: vmul_vshl_vx_nxv8i64_unmasked:
1373; CHECK:       # %bb.0:
1374; CHECK-NEXT:    li a0, 56
1375; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
1376; CHECK-NEXT:    vmul.vx v8, v8, a0
1377; CHECK-NEXT:    ret
1378  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1379  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1380  %elt.head1 = insertelement <vscale x 8 x i64> poison, i64 3, i32 0
1381  %vb = shufflevector <vscale x 8 x i64> %elt.head1, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1382  %elt.head2 = insertelement <vscale x 8 x i64> poison, i64 7, i32 0
1383  %vc = shufflevector <vscale x 8 x i64> %elt.head2, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1384  %vshl = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1385  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %vshl, <vscale x 8 x i64> %vc, <vscale x 8 x i1> %m, i32 %evl)
1386  ret <vscale x 8 x i64> %v
1387}
1388
1389define <vscale x 8 x i64> @vmul_vshl_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1390; CHECK-LABEL: vmul_vshl_vv_nxv8i64:
1391; CHECK:       # %bb.0:
1392; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1393; CHECK-NEXT:    vmul.vv v8, v8, v16, v0.t
1394; CHECK-NEXT:    vsll.vi v8, v8, 7, v0.t
1395; CHECK-NEXT:    ret
1396  %elt.head = insertelement <vscale x 8 x i64> poison, i64 7, i32 0
1397  %vc = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1398  %vshl = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vc, <vscale x 8 x i1> %m, i32 %evl)
1399  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %vshl, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1400  ret <vscale x 8 x i64> %v
1401}
1402
1403define <vscale x 8 x i64> @vmul_vshl_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, i32 zeroext %evl) {
1404; CHECK-LABEL: vmul_vshl_vv_nxv8i64_unmasked:
1405; CHECK:       # %bb.0:
1406; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1407; CHECK-NEXT:    vmul.vv v8, v8, v16
1408; CHECK-NEXT:    vsll.vi v8, v8, 7
1409; CHECK-NEXT:    ret
1410  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1411  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1412  %elt.head = insertelement <vscale x 8 x i64> poison, i64 7, i32 0
1413  %vc = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1414  %vshl = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vc, <vscale x 8 x i1> %m, i32 %evl)
1415  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %vshl, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1416  ret <vscale x 8 x i64> %v
1417}
1418
1419declare <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1420
1421define <vscale x 8 x i64> @vmul_vadd_vx_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1422; CHECK-LABEL: vmul_vadd_vx_nxv8i64:
1423; CHECK:       # %bb.0:
1424; CHECK-NEXT:    li a1, 7
1425; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1426; CHECK-NEXT:    vmul.vx v8, v8, a1, v0.t
1427; CHECK-NEXT:    li a0, 21
1428; CHECK-NEXT:    vadd.vx v8, v8, a0, v0.t
1429; CHECK-NEXT:    ret
1430  %elt.head1 = insertelement <vscale x 8 x i64> poison, i64 3, i32 0
1431  %vb = shufflevector <vscale x 8 x i64> %elt.head1, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1432  %elt.head2 = insertelement <vscale x 8 x i64> poison, i64 7, i32 0
1433  %vc = shufflevector <vscale x 8 x i64> %elt.head2, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1434  %vadd = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1435  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %vadd, <vscale x 8 x i64> %vc, <vscale x 8 x i1> %m, i32 %evl)
1436  ret <vscale x 8 x i64> %v
1437}
1438
1439define <vscale x 8 x i64> @vmul_vadd_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1440; CHECK-LABEL: vmul_vadd_vx_nxv8i64_unmasked:
1441; CHECK:       # %bb.0:
1442; CHECK-NEXT:    li a1, 21
1443; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1444; CHECK-NEXT:    vmv.v.x v16, a1
1445; CHECK-NEXT:    li a0, 7
1446; CHECK-NEXT:    vmadd.vx v8, a0, v16
1447; CHECK-NEXT:    ret
1448  %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
1449  %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1450  %elt.head1 = insertelement <vscale x 8 x i64> poison, i64 3, i32 0
1451  %vb = shufflevector <vscale x 8 x i64> %elt.head1, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1452  %elt.head2 = insertelement <vscale x 8 x i64> poison, i64 7, i32 0
1453  %vc = shufflevector <vscale x 8 x i64> %elt.head2, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1454  %vadd = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1455  %v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %vadd, <vscale x 8 x i64> %vc, <vscale x 8 x i1> %m, i32 %evl)
1456  ret <vscale x 8 x i64> %v
1457}
1458