xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vslide1down.ll (revision 99fb40d488c9d53dc1dfa0a1791f690523d7e50a)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs  | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs  | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8(
8  <vscale x 1 x i8>,
9  <vscale x 1 x i8>,
10  i8,
11  iXLen)
12
13define <vscale x 1 x i8> @intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
17; CHECK-NEXT:    vslide1down.vx v8, v8, a0
18; CHECK-NEXT:    ret
19entry:
20  %a = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8(
21    <vscale x 1 x i8> undef,
22    <vscale x 1 x i8> %0,
23    i8 %1,
24    iXLen %2)
25
26  ret <vscale x 1 x i8> %a
27}
28
29declare <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8(
30  <vscale x 1 x i8>,
31  <vscale x 1 x i8>,
32  i8,
33  <vscale x 1 x i1>,
34  iXLen,
35  iXLen)
36
37define <vscale x 1 x i8> @intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8:
39; CHECK:       # %bb.0: # %entry
40; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
41; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
42; CHECK-NEXT:    ret
43entry:
44  %a = call <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8(
45    <vscale x 1 x i8> %0,
46    <vscale x 1 x i8> %1,
47    i8 %2,
48    <vscale x 1 x i1> %3,
49    iXLen %4, iXLen 1)
50
51  ret <vscale x 1 x i8> %a
52}
53
54declare <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8(
55  <vscale x 2 x i8>,
56  <vscale x 2 x i8>,
57  i8,
58  iXLen)
59
60define <vscale x 2 x i8> @intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
61; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8:
62; CHECK:       # %bb.0: # %entry
63; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
64; CHECK-NEXT:    vslide1down.vx v8, v8, a0
65; CHECK-NEXT:    ret
66entry:
67  %a = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8(
68    <vscale x 2 x i8> undef,
69    <vscale x 2 x i8> %0,
70    i8 %1,
71    iXLen %2)
72
73  ret <vscale x 2 x i8> %a
74}
75
76declare <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8(
77  <vscale x 2 x i8>,
78  <vscale x 2 x i8>,
79  i8,
80  <vscale x 2 x i1>,
81  iXLen,
82  iXLen)
83
84define <vscale x 2 x i8> @intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
85; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8:
86; CHECK:       # %bb.0: # %entry
87; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
88; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
89; CHECK-NEXT:    ret
90entry:
91  %a = call <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8(
92    <vscale x 2 x i8> %0,
93    <vscale x 2 x i8> %1,
94    i8 %2,
95    <vscale x 2 x i1> %3,
96    iXLen %4, iXLen 1)
97
98  ret <vscale x 2 x i8> %a
99}
100
101declare <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8(
102  <vscale x 4 x i8>,
103  <vscale x 4 x i8>,
104  i8,
105  iXLen)
106
107define <vscale x 4 x i8> @intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
108; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8:
109; CHECK:       # %bb.0: # %entry
110; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
111; CHECK-NEXT:    vslide1down.vx v8, v8, a0
112; CHECK-NEXT:    ret
113entry:
114  %a = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8(
115    <vscale x 4 x i8> undef,
116    <vscale x 4 x i8> %0,
117    i8 %1,
118    iXLen %2)
119
120  ret <vscale x 4 x i8> %a
121}
122
123declare <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8(
124  <vscale x 4 x i8>,
125  <vscale x 4 x i8>,
126  i8,
127  <vscale x 4 x i1>,
128  iXLen,
129  iXLen)
130
131define <vscale x 4 x i8> @intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
132; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8:
133; CHECK:       # %bb.0: # %entry
134; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
135; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
136; CHECK-NEXT:    ret
137entry:
138  %a = call <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8(
139    <vscale x 4 x i8> %0,
140    <vscale x 4 x i8> %1,
141    i8 %2,
142    <vscale x 4 x i1> %3,
143    iXLen %4, iXLen 1)
144
145  ret <vscale x 4 x i8> %a
146}
147
148declare <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8(
149  <vscale x 8 x i8>,
150  <vscale x 8 x i8>,
151  i8,
152  iXLen)
153
154define <vscale x 8 x i8> @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
155; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8:
156; CHECK:       # %bb.0: # %entry
157; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
158; CHECK-NEXT:    vslide1down.vx v8, v8, a0
159; CHECK-NEXT:    ret
160entry:
161  %a = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8(
162    <vscale x 8 x i8> undef,
163    <vscale x 8 x i8> %0,
164    i8 %1,
165    iXLen %2)
166
167  ret <vscale x 8 x i8> %a
168}
169
170declare <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8(
171  <vscale x 8 x i8>,
172  <vscale x 8 x i8>,
173  i8,
174  <vscale x 8 x i1>,
175  iXLen,
176  iXLen)
177
178define <vscale x 8 x i8> @intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8:
180; CHECK:       # %bb.0: # %entry
181; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
182; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
183; CHECK-NEXT:    ret
184entry:
185  %a = call <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8(
186    <vscale x 8 x i8> %0,
187    <vscale x 8 x i8> %1,
188    i8 %2,
189    <vscale x 8 x i1> %3,
190    iXLen %4, iXLen 1)
191
192  ret <vscale x 8 x i8> %a
193}
194
195declare <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8(
196  <vscale x 16 x i8>,
197  <vscale x 16 x i8>,
198  i8,
199  iXLen)
200
201define <vscale x 16 x i8> @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
202; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8:
203; CHECK:       # %bb.0: # %entry
204; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
205; CHECK-NEXT:    vslide1down.vx v8, v8, a0
206; CHECK-NEXT:    ret
207entry:
208  %a = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8(
209    <vscale x 16 x i8> undef,
210    <vscale x 16 x i8> %0,
211    i8 %1,
212    iXLen %2)
213
214  ret <vscale x 16 x i8> %a
215}
216
217declare <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8(
218  <vscale x 16 x i8>,
219  <vscale x 16 x i8>,
220  i8,
221  <vscale x 16 x i1>,
222  iXLen,
223  iXLen)
224
225define <vscale x 16 x i8> @intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
226; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8:
227; CHECK:       # %bb.0: # %entry
228; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
229; CHECK-NEXT:    vslide1down.vx v8, v10, a0, v0.t
230; CHECK-NEXT:    ret
231entry:
232  %a = call <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8(
233    <vscale x 16 x i8> %0,
234    <vscale x 16 x i8> %1,
235    i8 %2,
236    <vscale x 16 x i1> %3,
237    iXLen %4, iXLen 1)
238
239  ret <vscale x 16 x i8> %a
240}
241
242declare <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8(
243  <vscale x 32 x i8>,
244  <vscale x 32 x i8>,
245  i8,
246  iXLen)
247
248define <vscale x 32 x i8> @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
249; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8:
250; CHECK:       # %bb.0: # %entry
251; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
252; CHECK-NEXT:    vslide1down.vx v8, v8, a0
253; CHECK-NEXT:    ret
254entry:
255  %a = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8(
256    <vscale x 32 x i8> undef,
257    <vscale x 32 x i8> %0,
258    i8 %1,
259    iXLen %2)
260
261  ret <vscale x 32 x i8> %a
262}
263
264declare <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8(
265  <vscale x 32 x i8>,
266  <vscale x 32 x i8>,
267  i8,
268  <vscale x 32 x i1>,
269  iXLen,
270  iXLen)
271
272define <vscale x 32 x i8> @intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
273; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8:
274; CHECK:       # %bb.0: # %entry
275; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
276; CHECK-NEXT:    vslide1down.vx v8, v12, a0, v0.t
277; CHECK-NEXT:    ret
278entry:
279  %a = call <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8(
280    <vscale x 32 x i8> %0,
281    <vscale x 32 x i8> %1,
282    i8 %2,
283    <vscale x 32 x i1> %3,
284    iXLen %4, iXLen 1)
285
286  ret <vscale x 32 x i8> %a
287}
288
289declare <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8(
290  <vscale x 64 x i8>,
291  <vscale x 64 x i8>,
292  i8,
293  iXLen)
294
295define <vscale x 64 x i8> @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
296; CHECK-LABEL: intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8:
297; CHECK:       # %bb.0: # %entry
298; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
299; CHECK-NEXT:    vslide1down.vx v8, v8, a0
300; CHECK-NEXT:    ret
301entry:
302  %a = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8(
303    <vscale x 64 x i8> undef,
304    <vscale x 64 x i8> %0,
305    i8 %1,
306    iXLen %2)
307
308  ret <vscale x 64 x i8> %a
309}
310
311declare <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8(
312  <vscale x 64 x i8>,
313  <vscale x 64 x i8>,
314  i8,
315  <vscale x 64 x i1>,
316  iXLen,
317  iXLen)
318
319define <vscale x 64 x i8> @intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
320; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8:
321; CHECK:       # %bb.0: # %entry
322; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
323; CHECK-NEXT:    vslide1down.vx v8, v16, a0, v0.t
324; CHECK-NEXT:    ret
325entry:
326  %a = call <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8(
327    <vscale x 64 x i8> %0,
328    <vscale x 64 x i8> %1,
329    i8 %2,
330    <vscale x 64 x i1> %3,
331    iXLen %4, iXLen 1)
332
333  ret <vscale x 64 x i8> %a
334}
335
336declare <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16(
337  <vscale x 1 x i16>,
338  <vscale x 1 x i16>,
339  i16,
340  iXLen)
341
342define <vscale x 1 x i16> @intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
343; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16:
344; CHECK:       # %bb.0: # %entry
345; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
346; CHECK-NEXT:    vslide1down.vx v8, v8, a0
347; CHECK-NEXT:    ret
348entry:
349  %a = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16(
350    <vscale x 1 x i16> undef,
351    <vscale x 1 x i16> %0,
352    i16 %1,
353    iXLen %2)
354
355  ret <vscale x 1 x i16> %a
356}
357
358declare <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16(
359  <vscale x 1 x i16>,
360  <vscale x 1 x i16>,
361  i16,
362  <vscale x 1 x i1>,
363  iXLen,
364  iXLen)
365
366define <vscale x 1 x i16> @intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
367; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16:
368; CHECK:       # %bb.0: # %entry
369; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
370; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
371; CHECK-NEXT:    ret
372entry:
373  %a = call <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16(
374    <vscale x 1 x i16> %0,
375    <vscale x 1 x i16> %1,
376    i16 %2,
377    <vscale x 1 x i1> %3,
378    iXLen %4, iXLen 1)
379
380  ret <vscale x 1 x i16> %a
381}
382
383declare <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16(
384  <vscale x 2 x i16>,
385  <vscale x 2 x i16>,
386  i16,
387  iXLen)
388
389define <vscale x 2 x i16> @intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
390; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16:
391; CHECK:       # %bb.0: # %entry
392; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
393; CHECK-NEXT:    vslide1down.vx v8, v8, a0
394; CHECK-NEXT:    ret
395entry:
396  %a = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16(
397    <vscale x 2 x i16> undef,
398    <vscale x 2 x i16> %0,
399    i16 %1,
400    iXLen %2)
401
402  ret <vscale x 2 x i16> %a
403}
404
405declare <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16(
406  <vscale x 2 x i16>,
407  <vscale x 2 x i16>,
408  i16,
409  <vscale x 2 x i1>,
410  iXLen,
411  iXLen)
412
413define <vscale x 2 x i16> @intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
414; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16:
415; CHECK:       # %bb.0: # %entry
416; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
417; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
418; CHECK-NEXT:    ret
419entry:
420  %a = call <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16(
421    <vscale x 2 x i16> %0,
422    <vscale x 2 x i16> %1,
423    i16 %2,
424    <vscale x 2 x i1> %3,
425    iXLen %4, iXLen 1)
426
427  ret <vscale x 2 x i16> %a
428}
429
430declare <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16(
431  <vscale x 4 x i16>,
432  <vscale x 4 x i16>,
433  i16,
434  iXLen)
435
436define <vscale x 4 x i16> @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
437; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16:
438; CHECK:       # %bb.0: # %entry
439; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
440; CHECK-NEXT:    vslide1down.vx v8, v8, a0
441; CHECK-NEXT:    ret
442entry:
443  %a = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16(
444    <vscale x 4 x i16> undef,
445    <vscale x 4 x i16> %0,
446    i16 %1,
447    iXLen %2)
448
449  ret <vscale x 4 x i16> %a
450}
451
452declare <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16(
453  <vscale x 4 x i16>,
454  <vscale x 4 x i16>,
455  i16,
456  <vscale x 4 x i1>,
457  iXLen,
458  iXLen)
459
460define <vscale x 4 x i16> @intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
461; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16:
462; CHECK:       # %bb.0: # %entry
463; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
464; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
465; CHECK-NEXT:    ret
466entry:
467  %a = call <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16(
468    <vscale x 4 x i16> %0,
469    <vscale x 4 x i16> %1,
470    i16 %2,
471    <vscale x 4 x i1> %3,
472    iXLen %4, iXLen 1)
473
474  ret <vscale x 4 x i16> %a
475}
476
477declare <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16(
478  <vscale x 8 x i16>,
479  <vscale x 8 x i16>,
480  i16,
481  iXLen)
482
483define <vscale x 8 x i16> @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
484; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16:
485; CHECK:       # %bb.0: # %entry
486; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
487; CHECK-NEXT:    vslide1down.vx v8, v8, a0
488; CHECK-NEXT:    ret
489entry:
490  %a = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16(
491    <vscale x 8 x i16> undef,
492    <vscale x 8 x i16> %0,
493    i16 %1,
494    iXLen %2)
495
496  ret <vscale x 8 x i16> %a
497}
498
499declare <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16(
500  <vscale x 8 x i16>,
501  <vscale x 8 x i16>,
502  i16,
503  <vscale x 8 x i1>,
504  iXLen,
505  iXLen)
506
507define <vscale x 8 x i16> @intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
508; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16:
509; CHECK:       # %bb.0: # %entry
510; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
511; CHECK-NEXT:    vslide1down.vx v8, v10, a0, v0.t
512; CHECK-NEXT:    ret
513entry:
514  %a = call <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16(
515    <vscale x 8 x i16> %0,
516    <vscale x 8 x i16> %1,
517    i16 %2,
518    <vscale x 8 x i1> %3,
519    iXLen %4, iXLen 1)
520
521  ret <vscale x 8 x i16> %a
522}
523
524declare <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16(
525  <vscale x 16 x i16>,
526  <vscale x 16 x i16>,
527  i16,
528  iXLen)
529
530define <vscale x 16 x i16> @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
531; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16:
532; CHECK:       # %bb.0: # %entry
533; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
534; CHECK-NEXT:    vslide1down.vx v8, v8, a0
535; CHECK-NEXT:    ret
536entry:
537  %a = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16(
538    <vscale x 16 x i16> undef,
539    <vscale x 16 x i16> %0,
540    i16 %1,
541    iXLen %2)
542
543  ret <vscale x 16 x i16> %a
544}
545
546declare <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16(
547  <vscale x 16 x i16>,
548  <vscale x 16 x i16>,
549  i16,
550  <vscale x 16 x i1>,
551  iXLen,
552  iXLen)
553
554define <vscale x 16 x i16> @intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
555; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16:
556; CHECK:       # %bb.0: # %entry
557; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
558; CHECK-NEXT:    vslide1down.vx v8, v12, a0, v0.t
559; CHECK-NEXT:    ret
560entry:
561  %a = call <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16(
562    <vscale x 16 x i16> %0,
563    <vscale x 16 x i16> %1,
564    i16 %2,
565    <vscale x 16 x i1> %3,
566    iXLen %4, iXLen 1)
567
568  ret <vscale x 16 x i16> %a
569}
570
571declare <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16(
572  <vscale x 32 x i16>,
573  <vscale x 32 x i16>,
574  i16,
575  iXLen)
576
577define <vscale x 32 x i16> @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
578; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16:
579; CHECK:       # %bb.0: # %entry
580; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
581; CHECK-NEXT:    vslide1down.vx v8, v8, a0
582; CHECK-NEXT:    ret
583entry:
584  %a = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16(
585    <vscale x 32 x i16> undef,
586    <vscale x 32 x i16> %0,
587    i16 %1,
588    iXLen %2)
589
590  ret <vscale x 32 x i16> %a
591}
592
593declare <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16(
594  <vscale x 32 x i16>,
595  <vscale x 32 x i16>,
596  i16,
597  <vscale x 32 x i1>,
598  iXLen,
599  iXLen)
600
601define <vscale x 32 x i16> @intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
602; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16:
603; CHECK:       # %bb.0: # %entry
604; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
605; CHECK-NEXT:    vslide1down.vx v8, v16, a0, v0.t
606; CHECK-NEXT:    ret
607entry:
608  %a = call <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16(
609    <vscale x 32 x i16> %0,
610    <vscale x 32 x i16> %1,
611    i16 %2,
612    <vscale x 32 x i1> %3,
613    iXLen %4, iXLen 1)
614
615  ret <vscale x 32 x i16> %a
616}
617
618declare <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32(
619  <vscale x 1 x i32>,
620  <vscale x 1 x i32>,
621  i32,
622  iXLen)
623
624define <vscale x 1 x i32> @intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
625; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32:
626; CHECK:       # %bb.0: # %entry
627; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
628; CHECK-NEXT:    vslide1down.vx v8, v8, a0
629; CHECK-NEXT:    ret
630entry:
631  %a = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32(
632    <vscale x 1 x i32> undef,
633    <vscale x 1 x i32> %0,
634    i32 %1,
635    iXLen %2)
636
637  ret <vscale x 1 x i32> %a
638}
639
640declare <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32(
641  <vscale x 1 x i32>,
642  <vscale x 1 x i32>,
643  i32,
644  <vscale x 1 x i1>,
645  iXLen,
646  iXLen)
647
648define <vscale x 1 x i32> @intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
649; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32:
650; CHECK:       # %bb.0: # %entry
651; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
652; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
653; CHECK-NEXT:    ret
654entry:
655  %a = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32(
656    <vscale x 1 x i32> %0,
657    <vscale x 1 x i32> %1,
658    i32 %2,
659    <vscale x 1 x i1> %3,
660    iXLen %4, iXLen 1)
661
662  ret <vscale x 1 x i32> %a
663}
664
665declare <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32(
666  <vscale x 2 x i32>,
667  <vscale x 2 x i32>,
668  i32,
669  iXLen)
670
671define <vscale x 2 x i32> @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
672; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32:
673; CHECK:       # %bb.0: # %entry
674; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
675; CHECK-NEXT:    vslide1down.vx v8, v8, a0
676; CHECK-NEXT:    ret
677entry:
678  %a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32(
679    <vscale x 2 x i32> undef,
680    <vscale x 2 x i32> %0,
681    i32 %1,
682    iXLen %2)
683
684  ret <vscale x 2 x i32> %a
685}
686
687declare <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32(
688  <vscale x 2 x i32>,
689  <vscale x 2 x i32>,
690  i32,
691  <vscale x 2 x i1>,
692  iXLen,
693  iXLen)
694
695define <vscale x 2 x i32> @intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
696; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32:
697; CHECK:       # %bb.0: # %entry
698; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
699; CHECK-NEXT:    vslide1down.vx v8, v9, a0, v0.t
700; CHECK-NEXT:    ret
701entry:
702  %a = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32(
703    <vscale x 2 x i32> %0,
704    <vscale x 2 x i32> %1,
705    i32 %2,
706    <vscale x 2 x i1> %3,
707    iXLen %4, iXLen 1)
708
709  ret <vscale x 2 x i32> %a
710}
711
712declare <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32(
713  <vscale x 4 x i32>,
714  <vscale x 4 x i32>,
715  i32,
716  iXLen)
717
718define <vscale x 4 x i32> @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
719; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32:
720; CHECK:       # %bb.0: # %entry
721; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
722; CHECK-NEXT:    vslide1down.vx v8, v8, a0
723; CHECK-NEXT:    ret
724entry:
725  %a = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32(
726    <vscale x 4 x i32> undef,
727    <vscale x 4 x i32> %0,
728    i32 %1,
729    iXLen %2)
730
731  ret <vscale x 4 x i32> %a
732}
733
734declare <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32(
735  <vscale x 4 x i32>,
736  <vscale x 4 x i32>,
737  i32,
738  <vscale x 4 x i1>,
739  iXLen,
740  iXLen)
741
742define <vscale x 4 x i32> @intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
743; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32:
744; CHECK:       # %bb.0: # %entry
745; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
746; CHECK-NEXT:    vslide1down.vx v8, v10, a0, v0.t
747; CHECK-NEXT:    ret
748entry:
749  %a = call <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32(
750    <vscale x 4 x i32> %0,
751    <vscale x 4 x i32> %1,
752    i32 %2,
753    <vscale x 4 x i1> %3,
754    iXLen %4, iXLen 1)
755
756  ret <vscale x 4 x i32> %a
757}
758
759declare <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32(
760  <vscale x 8 x i32>,
761  <vscale x 8 x i32>,
762  i32,
763  iXLen)
764
765define <vscale x 8 x i32> @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
766; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32:
767; CHECK:       # %bb.0: # %entry
768; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
769; CHECK-NEXT:    vslide1down.vx v8, v8, a0
770; CHECK-NEXT:    ret
771entry:
772  %a = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32(
773    <vscale x 8 x i32> undef,
774    <vscale x 8 x i32> %0,
775    i32 %1,
776    iXLen %2)
777
778  ret <vscale x 8 x i32> %a
779}
780
781declare <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32(
782  <vscale x 8 x i32>,
783  <vscale x 8 x i32>,
784  i32,
785  <vscale x 8 x i1>,
786  iXLen,
787  iXLen)
788
789define <vscale x 8 x i32> @intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
790; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32:
791; CHECK:       # %bb.0: # %entry
792; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
793; CHECK-NEXT:    vslide1down.vx v8, v12, a0, v0.t
794; CHECK-NEXT:    ret
795entry:
796  %a = call <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32(
797    <vscale x 8 x i32> %0,
798    <vscale x 8 x i32> %1,
799    i32 %2,
800    <vscale x 8 x i1> %3,
801    iXLen %4, iXLen 1)
802
803  ret <vscale x 8 x i32> %a
804}
805
806declare <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32(
807  <vscale x 16 x i32>,
808  <vscale x 16 x i32>,
809  i32,
810  iXLen)
811
812define <vscale x 16 x i32> @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
813; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32:
814; CHECK:       # %bb.0: # %entry
815; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
816; CHECK-NEXT:    vslide1down.vx v8, v8, a0
817; CHECK-NEXT:    ret
818entry:
819  %a = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32(
820    <vscale x 16 x i32> undef,
821    <vscale x 16 x i32> %0,
822    i32 %1,
823    iXLen %2)
824
825  ret <vscale x 16 x i32> %a
826}
827
828declare <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32(
829  <vscale x 16 x i32>,
830  <vscale x 16 x i32>,
831  i32,
832  <vscale x 16 x i1>,
833  iXLen,
834  iXLen)
835
836define <vscale x 16 x i32> @intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
837; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32:
838; CHECK:       # %bb.0: # %entry
839; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
840; CHECK-NEXT:    vslide1down.vx v8, v16, a0, v0.t
841; CHECK-NEXT:    ret
842entry:
843  %a = call <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32(
844    <vscale x 16 x i32> %0,
845    <vscale x 16 x i32> %1,
846    i32 %2,
847    <vscale x 16 x i1> %3,
848    iXLen %4, iXLen 1)
849
850  ret <vscale x 16 x i32> %a
851}
852
853declare <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
854  <vscale x 1 x i64>,
855  <vscale x 1 x i64>,
856  i64,
857  iXLen)
858
859define <vscale x 1 x i64> @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
860; RV32-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64:
861; RV32:       # %bb.0: # %entry
862; RV32-NEXT:    vsetvli a2, a2, e64, m1, ta, ma
863; RV32-NEXT:    slli a2, a2, 1
864; RV32-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
865; RV32-NEXT:    vslide1down.vx v8, v8, a0
866; RV32-NEXT:    vslide1down.vx v8, v8, a1
867; RV32-NEXT:    ret
868;
869; RV64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64:
870; RV64:       # %bb.0: # %entry
871; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
872; RV64-NEXT:    vslide1down.vx v8, v8, a0
873; RV64-NEXT:    ret
874entry:
875  %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64(
876    <vscale x 1 x i64> undef,
877    <vscale x 1 x i64> %0,
878    i64 %1,
879    iXLen %2)
880
881  ret <vscale x 1 x i64> %a
882}
883
884declare <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
885  <vscale x 1 x i64>,
886  <vscale x 1 x i64>,
887  i64,
888  <vscale x 1 x i1>,
889  iXLen,
890  iXLen)
891
892define <vscale x 1 x i64> @intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
893; RV32-LABEL: intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64:
894; RV32:       # %bb.0: # %entry
895; RV32-NEXT:    vsetvli a3, a2, e64, m1, ta, ma
896; RV32-NEXT:    slli a3, a3, 1
897; RV32-NEXT:    vsetvli zero, a3, e32, m1, ta, ma
898; RV32-NEXT:    vslide1down.vx v9, v9, a0
899; RV32-NEXT:    vslide1down.vx v9, v9, a1
900; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
901; RV32-NEXT:    vmerge.vvm v8, v8, v9, v0
902; RV32-NEXT:    ret
903;
904; RV64-LABEL: intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64:
905; RV64:       # %bb.0: # %entry
906; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
907; RV64-NEXT:    vslide1down.vx v8, v9, a0, v0.t
908; RV64-NEXT:    ret
909entry:
910  %a = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64(
911    <vscale x 1 x i64> %0,
912    <vscale x 1 x i64> %1,
913    i64 %2,
914    <vscale x 1 x i1> %3,
915    iXLen %4, iXLen 1)
916
917  ret <vscale x 1 x i64> %a
918}
919
920declare <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64(
921  <vscale x 2 x i64>,
922  <vscale x 2 x i64>,
923  i64,
924  iXLen)
925
926define <vscale x 2 x i64> @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
927; RV32-LABEL: intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64:
928; RV32:       # %bb.0: # %entry
929; RV32-NEXT:    vsetvli a2, a2, e64, m2, ta, ma
930; RV32-NEXT:    slli a2, a2, 1
931; RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
932; RV32-NEXT:    vslide1down.vx v8, v8, a0
933; RV32-NEXT:    vslide1down.vx v8, v8, a1
934; RV32-NEXT:    ret
935;
936; RV64-LABEL: intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64:
937; RV64:       # %bb.0: # %entry
938; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
939; RV64-NEXT:    vslide1down.vx v8, v8, a0
940; RV64-NEXT:    ret
941entry:
942  %a = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64(
943    <vscale x 2 x i64> undef,
944    <vscale x 2 x i64> %0,
945    i64 %1,
946    iXLen %2)
947
948  ret <vscale x 2 x i64> %a
949}
950
951declare <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64(
952  <vscale x 2 x i64>,
953  <vscale x 2 x i64>,
954  i64,
955  <vscale x 2 x i1>,
956  iXLen,
957  iXLen)
958
959define <vscale x 2 x i64> @intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
960; RV32-LABEL: intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64:
961; RV32:       # %bb.0: # %entry
962; RV32-NEXT:    vsetvli a3, a2, e64, m2, ta, ma
963; RV32-NEXT:    slli a3, a3, 1
964; RV32-NEXT:    vsetvli zero, a3, e32, m2, ta, ma
965; RV32-NEXT:    vslide1down.vx v10, v10, a0
966; RV32-NEXT:    vslide1down.vx v10, v10, a1
967; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
968; RV32-NEXT:    vmerge.vvm v8, v8, v10, v0
969; RV32-NEXT:    ret
970;
971; RV64-LABEL: intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64:
972; RV64:       # %bb.0: # %entry
973; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
974; RV64-NEXT:    vslide1down.vx v8, v10, a0, v0.t
975; RV64-NEXT:    ret
976entry:
977  %a = call <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64(
978    <vscale x 2 x i64> %0,
979    <vscale x 2 x i64> %1,
980    i64 %2,
981    <vscale x 2 x i1> %3,
982    iXLen %4, iXLen 1)
983
984  ret <vscale x 2 x i64> %a
985}
986
987declare <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64(
988  <vscale x 4 x i64>,
989  <vscale x 4 x i64>,
990  i64,
991  iXLen)
992
993define <vscale x 4 x i64> @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
994; RV32-LABEL: intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64:
995; RV32:       # %bb.0: # %entry
996; RV32-NEXT:    vsetvli a2, a2, e64, m4, ta, ma
997; RV32-NEXT:    slli a2, a2, 1
998; RV32-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
999; RV32-NEXT:    vslide1down.vx v8, v8, a0
1000; RV32-NEXT:    vslide1down.vx v8, v8, a1
1001; RV32-NEXT:    ret
1002;
1003; RV64-LABEL: intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64:
1004; RV64:       # %bb.0: # %entry
1005; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1006; RV64-NEXT:    vslide1down.vx v8, v8, a0
1007; RV64-NEXT:    ret
1008entry:
1009  %a = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64(
1010    <vscale x 4 x i64> undef,
1011    <vscale x 4 x i64> %0,
1012    i64 %1,
1013    iXLen %2)
1014
1015  ret <vscale x 4 x i64> %a
1016}
1017
1018declare <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64(
1019  <vscale x 4 x i64>,
1020  <vscale x 4 x i64>,
1021  i64,
1022  <vscale x 4 x i1>,
1023  iXLen,
1024  iXLen)
1025
1026define <vscale x 4 x i64> @intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1027; RV32-LABEL: intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64:
1028; RV32:       # %bb.0: # %entry
1029; RV32-NEXT:    vsetvli a3, a2, e64, m4, ta, ma
1030; RV32-NEXT:    slli a3, a3, 1
1031; RV32-NEXT:    vsetvli zero, a3, e32, m4, ta, ma
1032; RV32-NEXT:    vslide1down.vx v12, v12, a0
1033; RV32-NEXT:    vslide1down.vx v12, v12, a1
1034; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1035; RV32-NEXT:    vmerge.vvm v8, v8, v12, v0
1036; RV32-NEXT:    ret
1037;
1038; RV64-LABEL: intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64:
1039; RV64:       # %bb.0: # %entry
1040; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
1041; RV64-NEXT:    vslide1down.vx v8, v12, a0, v0.t
1042; RV64-NEXT:    ret
1043entry:
1044  %a = call <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64(
1045    <vscale x 4 x i64> %0,
1046    <vscale x 4 x i64> %1,
1047    i64 %2,
1048    <vscale x 4 x i1> %3,
1049    iXLen %4, iXLen 1)
1050
1051  ret <vscale x 4 x i64> %a
1052}
1053
1054declare <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64(
1055  <vscale x 8 x i64>,
1056  <vscale x 8 x i64>,
1057  i64,
1058  iXLen)
1059
1060define <vscale x 8 x i64> @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
1061; RV32-LABEL: intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64:
1062; RV32:       # %bb.0: # %entry
1063; RV32-NEXT:    vsetvli a2, a2, e64, m8, ta, ma
1064; RV32-NEXT:    slli a2, a2, 1
1065; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
1066; RV32-NEXT:    vslide1down.vx v8, v8, a0
1067; RV32-NEXT:    vslide1down.vx v8, v8, a1
1068; RV32-NEXT:    ret
1069;
1070; RV64-LABEL: intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64:
1071; RV64:       # %bb.0: # %entry
1072; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1073; RV64-NEXT:    vslide1down.vx v8, v8, a0
1074; RV64-NEXT:    ret
1075entry:
1076  %a = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64(
1077    <vscale x 8 x i64> undef,
1078    <vscale x 8 x i64> %0,
1079    i64 %1,
1080    iXLen %2)
1081
1082  ret <vscale x 8 x i64> %a
1083}
1084
1085declare <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64(
1086  <vscale x 8 x i64>,
1087  <vscale x 8 x i64>,
1088  i64,
1089  <vscale x 8 x i1>,
1090  iXLen,
1091  iXLen)
1092
1093define <vscale x 8 x i64> @intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1094; RV32-LABEL: intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64:
1095; RV32:       # %bb.0: # %entry
1096; RV32-NEXT:    vsetvli a3, a2, e64, m8, ta, ma
1097; RV32-NEXT:    slli a3, a3, 1
1098; RV32-NEXT:    vsetvli zero, a3, e32, m8, ta, ma
1099; RV32-NEXT:    vslide1down.vx v16, v16, a0
1100; RV32-NEXT:    vslide1down.vx v16, v16, a1
1101; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1102; RV32-NEXT:    vmerge.vvm v8, v8, v16, v0
1103; RV32-NEXT:    ret
1104;
1105; RV64-LABEL: intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64:
1106; RV64:       # %bb.0: # %entry
1107; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
1108; RV64-NEXT:    vslide1down.vx v8, v16, a0, v0.t
1109; RV64-NEXT:    ret
1110entry:
1111  %a = call <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64(
1112    <vscale x 8 x i64> %0,
1113    <vscale x 8 x i64> %1,
1114    i64 %2,
1115    <vscale x 8 x i1> %3,
1116    iXLen %4, iXLen 1)
1117
1118  ret <vscale x 8 x i64> %a
1119}
1120