xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vclmulh.ll (revision 2967e5f8007d873a3e9d97870d2461d0827a3976)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvbc \
3; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbc \
5; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.nxv1i64(
8  <vscale x 1 x i64>,
9  <vscale x 1 x i64>,
10  <vscale x 1 x i64>,
11  iXLen)
12
13define <vscale x 1 x i64> @intrinsic_vclmulh_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vclmulh_vv_nxv1i64_nxv1i64:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
17; CHECK-NEXT:    vclmulh.vv v8, v8, v9
18; CHECK-NEXT:    ret
19entry:
20  %a = call <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.nxv1i64(
21    <vscale x 1 x i64> undef,
22    <vscale x 1 x i64> %0,
23    <vscale x 1 x i64> %1,
24    iXLen %2)
25
26  ret <vscale x 1 x i64> %a
27}
28
29declare <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64(
30  <vscale x 1 x i64>,
31  <vscale x 1 x i64>,
32  <vscale x 1 x i64>,
33  <vscale x 1 x i1>,
34  iXLen,
35  iXLen)
36
37define <vscale x 1 x i64> @intrinsic_vclmulh_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv1i64_nxv1i64:
39; CHECK:       # %bb.0: # %entry
40; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
41; CHECK-NEXT:    vclmulh.vv v8, v9, v10, v0.t
42; CHECK-NEXT:    ret
43entry:
44  %a = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64(
45    <vscale x 1 x i64> %0,
46    <vscale x 1 x i64> %1,
47    <vscale x 1 x i64> %2,
48    <vscale x 1 x i1> %3,
49    iXLen %4, iXLen 1)
50
51  ret <vscale x 1 x i64> %a
52}
53
54declare <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.nxv2i64(
55  <vscale x 2 x i64>,
56  <vscale x 2 x i64>,
57  <vscale x 2 x i64>,
58  iXLen)
59
60define <vscale x 2 x i64> @intrinsic_vclmulh_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
61; CHECK-LABEL: intrinsic_vclmulh_vv_nxv2i64_nxv2i64:
62; CHECK:       # %bb.0: # %entry
63; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
64; CHECK-NEXT:    vclmulh.vv v8, v8, v10
65; CHECK-NEXT:    ret
66entry:
67  %a = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.nxv2i64(
68    <vscale x 2 x i64> undef,
69    <vscale x 2 x i64> %0,
70    <vscale x 2 x i64> %1,
71    iXLen %2)
72
73  ret <vscale x 2 x i64> %a
74}
75
76declare <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64(
77  <vscale x 2 x i64>,
78  <vscale x 2 x i64>,
79  <vscale x 2 x i64>,
80  <vscale x 2 x i1>,
81  iXLen,
82  iXLen)
83
84define <vscale x 2 x i64> @intrinsic_vclmulh_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
85; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv2i64_nxv2i64:
86; CHECK:       # %bb.0: # %entry
87; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
88; CHECK-NEXT:    vclmulh.vv v8, v10, v12, v0.t
89; CHECK-NEXT:    ret
90entry:
91  %a = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64(
92    <vscale x 2 x i64> %0,
93    <vscale x 2 x i64> %1,
94    <vscale x 2 x i64> %2,
95    <vscale x 2 x i1> %3,
96    iXLen %4, iXLen 1)
97
98  ret <vscale x 2 x i64> %a
99}
100
101declare <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.nxv4i64(
102  <vscale x 4 x i64>,
103  <vscale x 4 x i64>,
104  <vscale x 4 x i64>,
105  iXLen)
106
107define <vscale x 4 x i64> @intrinsic_vclmulh_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
108; CHECK-LABEL: intrinsic_vclmulh_vv_nxv4i64_nxv4i64:
109; CHECK:       # %bb.0: # %entry
110; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
111; CHECK-NEXT:    vclmulh.vv v8, v8, v12
112; CHECK-NEXT:    ret
113entry:
114  %a = call <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.nxv4i64(
115    <vscale x 4 x i64> undef,
116    <vscale x 4 x i64> %0,
117    <vscale x 4 x i64> %1,
118    iXLen %2)
119
120  ret <vscale x 4 x i64> %a
121}
122
123declare <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64(
124  <vscale x 4 x i64>,
125  <vscale x 4 x i64>,
126  <vscale x 4 x i64>,
127  <vscale x 4 x i1>,
128  iXLen,
129  iXLen)
130
131define <vscale x 4 x i64> @intrinsic_vclmulh_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
132; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv4i64_nxv4i64:
133; CHECK:       # %bb.0: # %entry
134; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
135; CHECK-NEXT:    vclmulh.vv v8, v12, v16, v0.t
136; CHECK-NEXT:    ret
137entry:
138  %a = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64(
139    <vscale x 4 x i64> %0,
140    <vscale x 4 x i64> %1,
141    <vscale x 4 x i64> %2,
142    <vscale x 4 x i1> %3,
143    iXLen %4, iXLen 1)
144
145  ret <vscale x 4 x i64> %a
146}
147
148declare <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.nxv8i64(
149  <vscale x 8 x i64>,
150  <vscale x 8 x i64>,
151  <vscale x 8 x i64>,
152  iXLen)
153
154define <vscale x 8 x i64> @intrinsic_vclmulh_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
155; CHECK-LABEL: intrinsic_vclmulh_vv_nxv8i64_nxv8i64:
156; CHECK:       # %bb.0: # %entry
157; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
158; CHECK-NEXT:    vclmulh.vv v8, v8, v16
159; CHECK-NEXT:    ret
160entry:
161  %a = call <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.nxv8i64(
162    <vscale x 8 x i64> undef,
163    <vscale x 8 x i64> %0,
164    <vscale x 8 x i64> %1,
165    iXLen %2)
166
167  ret <vscale x 8 x i64> %a
168}
169
170declare <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64(
171  <vscale x 8 x i64>,
172  <vscale x 8 x i64>,
173  <vscale x 8 x i64>,
174  <vscale x 8 x i1>,
175  iXLen,
176  iXLen)
177
178define <vscale x 8 x i64> @intrinsic_vclmulh_mask_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv8i64_nxv8i64:
180; CHECK:       # %bb.0: # %entry
181; CHECK-NEXT:    vl8re64.v v24, (a0)
182; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
183; CHECK-NEXT:    vclmulh.vv v8, v16, v24, v0.t
184; CHECK-NEXT:    ret
185entry:
186  %a = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64(
187    <vscale x 8 x i64> %0,
188    <vscale x 8 x i64> %1,
189    <vscale x 8 x i64> %2,
190    <vscale x 8 x i1> %3,
191    iXLen %4, iXLen 1)
192
193  ret <vscale x 8 x i64> %a
194}
195
196declare <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.i64(
197  <vscale x 1 x i64>,
198  <vscale x 1 x i64>,
199  i64,
200  iXLen)
201
202define <vscale x 1 x i64> @intrinsic_vclmulh_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
203; RV32-LABEL: intrinsic_vclmulh_vx_nxv1i64_i64:
204; RV32:       # %bb.0: # %entry
205; RV32-NEXT:    addi sp, sp, -16
206; RV32-NEXT:    sw a0, 8(sp)
207; RV32-NEXT:    sw a1, 12(sp)
208; RV32-NEXT:    addi a0, sp, 8
209; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
210; RV32-NEXT:    vlse64.v v9, (a0), zero
211; RV32-NEXT:    vclmulh.vv v8, v8, v9
212; RV32-NEXT:    addi sp, sp, 16
213; RV32-NEXT:    ret
214;
215; RV64-LABEL: intrinsic_vclmulh_vx_nxv1i64_i64:
216; RV64:       # %bb.0: # %entry
217; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
218; RV64-NEXT:    vclmulh.vx v8, v8, a0
219; RV64-NEXT:    ret
220entry:
221  %a = call <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.i64(
222    <vscale x 1 x i64> undef,
223    <vscale x 1 x i64> %0,
224    i64 %1,
225    iXLen %2)
226
227  ret <vscale x 1 x i64> %a
228}
229
230declare <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.i64(
231  <vscale x 1 x i64>,
232  <vscale x 1 x i64>,
233  i64,
234  <vscale x 1 x i1>,
235  iXLen,
236  iXLen)
237
238define <vscale x 1 x i64> @intrinsic_vclmulh_mask_vx_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
239; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv1i64_i64:
240; RV32:       # %bb.0: # %entry
241; RV32-NEXT:    addi sp, sp, -16
242; RV32-NEXT:    sw a0, 8(sp)
243; RV32-NEXT:    sw a1, 12(sp)
244; RV32-NEXT:    addi a0, sp, 8
245; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
246; RV32-NEXT:    vlse64.v v10, (a0), zero
247; RV32-NEXT:    vclmulh.vv v8, v9, v10, v0.t
248; RV32-NEXT:    addi sp, sp, 16
249; RV32-NEXT:    ret
250;
251; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv1i64_i64:
252; RV64:       # %bb.0: # %entry
253; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
254; RV64-NEXT:    vclmulh.vx v8, v9, a0, v0.t
255; RV64-NEXT:    ret
256entry:
257  %a = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.i64(
258    <vscale x 1 x i64> %0,
259    <vscale x 1 x i64> %1,
260    i64 %2,
261    <vscale x 1 x i1> %3,
262    iXLen %4, iXLen 1)
263
264  ret <vscale x 1 x i64> %a
265}
266
267declare <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.i64(
268  <vscale x 2 x i64>,
269  <vscale x 2 x i64>,
270  i64,
271  iXLen)
272
273define <vscale x 2 x i64> @intrinsic_vclmulh_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
274; RV32-LABEL: intrinsic_vclmulh_vx_nxv2i64_i64:
275; RV32:       # %bb.0: # %entry
276; RV32-NEXT:    addi sp, sp, -16
277; RV32-NEXT:    sw a0, 8(sp)
278; RV32-NEXT:    sw a1, 12(sp)
279; RV32-NEXT:    addi a0, sp, 8
280; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
281; RV32-NEXT:    vlse64.v v10, (a0), zero
282; RV32-NEXT:    vclmulh.vv v8, v8, v10
283; RV32-NEXT:    addi sp, sp, 16
284; RV32-NEXT:    ret
285;
286; RV64-LABEL: intrinsic_vclmulh_vx_nxv2i64_i64:
287; RV64:       # %bb.0: # %entry
288; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
289; RV64-NEXT:    vclmulh.vx v8, v8, a0
290; RV64-NEXT:    ret
291entry:
292  %a = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.i64(
293    <vscale x 2 x i64> undef,
294    <vscale x 2 x i64> %0,
295    i64 %1,
296    iXLen %2)
297
298  ret <vscale x 2 x i64> %a
299}
300
301declare <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.i64(
302  <vscale x 2 x i64>,
303  <vscale x 2 x i64>,
304  i64,
305  <vscale x 2 x i1>,
306  iXLen,
307  iXLen)
308
309define <vscale x 2 x i64> @intrinsic_vclmulh_mask_vx_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
310; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv2i64_i64:
311; RV32:       # %bb.0: # %entry
312; RV32-NEXT:    addi sp, sp, -16
313; RV32-NEXT:    sw a0, 8(sp)
314; RV32-NEXT:    sw a1, 12(sp)
315; RV32-NEXT:    addi a0, sp, 8
316; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
317; RV32-NEXT:    vlse64.v v12, (a0), zero
318; RV32-NEXT:    vclmulh.vv v8, v10, v12, v0.t
319; RV32-NEXT:    addi sp, sp, 16
320; RV32-NEXT:    ret
321;
322; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv2i64_i64:
323; RV64:       # %bb.0: # %entry
324; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
325; RV64-NEXT:    vclmulh.vx v8, v10, a0, v0.t
326; RV64-NEXT:    ret
327entry:
328  %a = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.i64(
329    <vscale x 2 x i64> %0,
330    <vscale x 2 x i64> %1,
331    i64 %2,
332    <vscale x 2 x i1> %3,
333    iXLen %4, iXLen 1)
334
335  ret <vscale x 2 x i64> %a
336}
337
338declare <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.i64(
339  <vscale x 4 x i64>,
340  <vscale x 4 x i64>,
341  i64,
342  iXLen)
343
344define <vscale x 4 x i64> @intrinsic_vclmulh_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
345; RV32-LABEL: intrinsic_vclmulh_vx_nxv4i64_i64:
346; RV32:       # %bb.0: # %entry
347; RV32-NEXT:    addi sp, sp, -16
348; RV32-NEXT:    sw a0, 8(sp)
349; RV32-NEXT:    sw a1, 12(sp)
350; RV32-NEXT:    addi a0, sp, 8
351; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
352; RV32-NEXT:    vlse64.v v12, (a0), zero
353; RV32-NEXT:    vclmulh.vv v8, v8, v12
354; RV32-NEXT:    addi sp, sp, 16
355; RV32-NEXT:    ret
356;
357; RV64-LABEL: intrinsic_vclmulh_vx_nxv4i64_i64:
358; RV64:       # %bb.0: # %entry
359; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
360; RV64-NEXT:    vclmulh.vx v8, v8, a0
361; RV64-NEXT:    ret
362entry:
363  %a = call <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.i64(
364    <vscale x 4 x i64> undef,
365    <vscale x 4 x i64> %0,
366    i64 %1,
367    iXLen %2)
368
369  ret <vscale x 4 x i64> %a
370}
371
372declare <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.i64(
373  <vscale x 4 x i64>,
374  <vscale x 4 x i64>,
375  i64,
376  <vscale x 4 x i1>,
377  iXLen,
378  iXLen)
379
380define <vscale x 4 x i64> @intrinsic_vclmulh_mask_vx_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
381; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv4i64_i64:
382; RV32:       # %bb.0: # %entry
383; RV32-NEXT:    addi sp, sp, -16
384; RV32-NEXT:    sw a0, 8(sp)
385; RV32-NEXT:    sw a1, 12(sp)
386; RV32-NEXT:    addi a0, sp, 8
387; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
388; RV32-NEXT:    vlse64.v v16, (a0), zero
389; RV32-NEXT:    vclmulh.vv v8, v12, v16, v0.t
390; RV32-NEXT:    addi sp, sp, 16
391; RV32-NEXT:    ret
392;
393; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv4i64_i64:
394; RV64:       # %bb.0: # %entry
395; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
396; RV64-NEXT:    vclmulh.vx v8, v12, a0, v0.t
397; RV64-NEXT:    ret
398entry:
399  %a = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.i64(
400    <vscale x 4 x i64> %0,
401    <vscale x 4 x i64> %1,
402    i64 %2,
403    <vscale x 4 x i1> %3,
404    iXLen %4, iXLen 1)
405
406  ret <vscale x 4 x i64> %a
407}
408
409declare <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.i64(
410  <vscale x 8 x i64>,
411  <vscale x 8 x i64>,
412  i64,
413  iXLen)
414
415define <vscale x 8 x i64> @intrinsic_vclmulh_vx_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
416; RV32-LABEL: intrinsic_vclmulh_vx_nxv8i64_i64:
417; RV32:       # %bb.0: # %entry
418; RV32-NEXT:    addi sp, sp, -16
419; RV32-NEXT:    sw a0, 8(sp)
420; RV32-NEXT:    sw a1, 12(sp)
421; RV32-NEXT:    addi a0, sp, 8
422; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
423; RV32-NEXT:    vlse64.v v16, (a0), zero
424; RV32-NEXT:    vclmulh.vv v8, v8, v16
425; RV32-NEXT:    addi sp, sp, 16
426; RV32-NEXT:    ret
427;
428; RV64-LABEL: intrinsic_vclmulh_vx_nxv8i64_i64:
429; RV64:       # %bb.0: # %entry
430; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
431; RV64-NEXT:    vclmulh.vx v8, v8, a0
432; RV64-NEXT:    ret
433entry:
434  %a = call <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.i64(
435    <vscale x 8 x i64> undef,
436    <vscale x 8 x i64> %0,
437    i64 %1,
438    iXLen %2)
439
440  ret <vscale x 8 x i64> %a
441}
442
443declare <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.i64(
444  <vscale x 8 x i64>,
445  <vscale x 8 x i64>,
446  i64,
447  <vscale x 8 x i1>,
448  iXLen,
449  iXLen)
450
451define <vscale x 8 x i64> @intrinsic_vclmulh_mask_vx_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
452; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv8i64_i64:
453; RV32:       # %bb.0: # %entry
454; RV32-NEXT:    addi sp, sp, -16
455; RV32-NEXT:    sw a0, 8(sp)
456; RV32-NEXT:    sw a1, 12(sp)
457; RV32-NEXT:    addi a0, sp, 8
458; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
459; RV32-NEXT:    vlse64.v v24, (a0), zero
460; RV32-NEXT:    vclmulh.vv v8, v16, v24, v0.t
461; RV32-NEXT:    addi sp, sp, 16
462; RV32-NEXT:    ret
463;
464; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv8i64_i64:
465; RV64:       # %bb.0: # %entry
466; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
467; RV64-NEXT:    vclmulh.vx v8, v16, a0, v0.t
468; RV64-NEXT:    ret
469entry:
470  %a = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.i64(
471    <vscale x 8 x i64> %0,
472    <vscale x 8 x i64> %1,
473    i64 %2,
474    <vscale x 8 x i1> %3,
475    iXLen %4, iXLen 1)
476
477  ret <vscale x 8 x i64> %a
478}
479