xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
8  <vscale x 1 x half>,
9  <vscale x 1 x half>,
10  half,
11  iXLen);
12
13define <vscale x 1 x half> @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
17; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
18; CHECK-NEXT:    ret
19entry:
20  %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.nxv1f16.f16(
21    <vscale x 1 x half> undef,
22    <vscale x 1 x half> %0,
23    half %1,
24    iXLen %2)
25
26  ret <vscale x 1 x half> %a
27}
28
29declare <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16(
30  <vscale x 1 x half>,
31  <vscale x 1 x half>,
32  half,
33  <vscale x 1 x i1>,
34  iXLen,
35  iXLen);
36
37define <vscale x 1 x half> @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16:
39; CHECK:       # %bb.0: # %entry
40; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
41; CHECK-NEXT:    vfslide1down.vf v8, v9, fa0, v0.t
42; CHECK-NEXT:    ret
43entry:
44  %a = call <vscale x 1 x half> @llvm.riscv.vfslide1down.mask.nxv1f16.f16(
45    <vscale x 1 x half> %0,
46    <vscale x 1 x half> %1,
47    half %2,
48    <vscale x 1 x i1> %3,
49    iXLen %4, iXLen 1)
50
51  ret <vscale x 1 x half> %a
52}
53
54declare <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16(
55  <vscale x 2 x half>,
56  <vscale x 2 x half>,
57  half,
58  iXLen);
59
60define <vscale x 2 x half> @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
61; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16:
62; CHECK:       # %bb.0: # %entry
63; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
64; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
65; CHECK-NEXT:    ret
66entry:
67  %a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.nxv2f16.f16(
68    <vscale x 2 x half> undef,
69    <vscale x 2 x half> %0,
70    half %1,
71    iXLen %2)
72
73  ret <vscale x 2 x half> %a
74}
75
76declare <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16(
77  <vscale x 2 x half>,
78  <vscale x 2 x half>,
79  half,
80  <vscale x 2 x i1>,
81  iXLen,
82  iXLen);
83
84define <vscale x 2 x half> @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
85; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16:
86; CHECK:       # %bb.0: # %entry
87; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
88; CHECK-NEXT:    vfslide1down.vf v8, v9, fa0, v0.t
89; CHECK-NEXT:    ret
90entry:
91  %a = call <vscale x 2 x half> @llvm.riscv.vfslide1down.mask.nxv2f16.f16(
92    <vscale x 2 x half> %0,
93    <vscale x 2 x half> %1,
94    half %2,
95    <vscale x 2 x i1> %3,
96    iXLen %4, iXLen 1)
97
98  ret <vscale x 2 x half> %a
99}
100
101declare <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16(
102  <vscale x 4 x half>,
103  <vscale x 4 x half>,
104  half,
105  iXLen);
106
107define <vscale x 4 x half> @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
108; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16:
109; CHECK:       # %bb.0: # %entry
110; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
111; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
112; CHECK-NEXT:    ret
113entry:
114  %a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.nxv4f16.f16(
115    <vscale x 4 x half> undef,
116    <vscale x 4 x half> %0,
117    half %1,
118    iXLen %2)
119
120  ret <vscale x 4 x half> %a
121}
122
123declare <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16(
124  <vscale x 4 x half>,
125  <vscale x 4 x half>,
126  half,
127  <vscale x 4 x i1>,
128  iXLen,
129  iXLen);
130
131define <vscale x 4 x half> @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
132; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16:
133; CHECK:       # %bb.0: # %entry
134; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
135; CHECK-NEXT:    vfslide1down.vf v8, v9, fa0, v0.t
136; CHECK-NEXT:    ret
137entry:
138  %a = call <vscale x 4 x half> @llvm.riscv.vfslide1down.mask.nxv4f16.f16(
139    <vscale x 4 x half> %0,
140    <vscale x 4 x half> %1,
141    half %2,
142    <vscale x 4 x i1> %3,
143    iXLen %4, iXLen 1)
144
145  ret <vscale x 4 x half> %a
146}
147
148declare <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16(
149  <vscale x 8 x half>,
150  <vscale x 8 x half>,
151  half,
152  iXLen);
153
154define <vscale x 8 x half> @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
155; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16:
156; CHECK:       # %bb.0: # %entry
157; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
158; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
159; CHECK-NEXT:    ret
160entry:
161  %a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.nxv8f16.f16(
162    <vscale x 8 x half> undef,
163    <vscale x 8 x half> %0,
164    half %1,
165    iXLen %2)
166
167  ret <vscale x 8 x half> %a
168}
169
170declare <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16(
171  <vscale x 8 x half>,
172  <vscale x 8 x half>,
173  half,
174  <vscale x 8 x i1>,
175  iXLen,
176  iXLen);
177
178define <vscale x 8 x half> @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16:
180; CHECK:       # %bb.0: # %entry
181; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
182; CHECK-NEXT:    vfslide1down.vf v8, v10, fa0, v0.t
183; CHECK-NEXT:    ret
184entry:
185  %a = call <vscale x 8 x half> @llvm.riscv.vfslide1down.mask.nxv8f16.f16(
186    <vscale x 8 x half> %0,
187    <vscale x 8 x half> %1,
188    half %2,
189    <vscale x 8 x i1> %3,
190    iXLen %4, iXLen 1)
191
192  ret <vscale x 8 x half> %a
193}
194
195declare <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16(
196  <vscale x 16 x half>,
197  <vscale x 16 x half>,
198  half,
199  iXLen);
200
201define <vscale x 16 x half> @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
202; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16:
203; CHECK:       # %bb.0: # %entry
204; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
205; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
206; CHECK-NEXT:    ret
207entry:
208  %a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.nxv16f16.f16(
209    <vscale x 16 x half> undef,
210    <vscale x 16 x half> %0,
211    half %1,
212    iXLen %2)
213
214  ret <vscale x 16 x half> %a
215}
216
217declare <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16(
218  <vscale x 16 x half>,
219  <vscale x 16 x half>,
220  half,
221  <vscale x 16 x i1>,
222  iXLen,
223  iXLen);
224
225define <vscale x 16 x half> @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
226; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16:
227; CHECK:       # %bb.0: # %entry
228; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
229; CHECK-NEXT:    vfslide1down.vf v8, v12, fa0, v0.t
230; CHECK-NEXT:    ret
231entry:
232  %a = call <vscale x 16 x half> @llvm.riscv.vfslide1down.mask.nxv16f16.f16(
233    <vscale x 16 x half> %0,
234    <vscale x 16 x half> %1,
235    half %2,
236    <vscale x 16 x i1> %3,
237    iXLen %4, iXLen 1)
238
239  ret <vscale x 16 x half> %a
240}
241
242declare <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16(
243  <vscale x 32 x half>,
244  <vscale x 32 x half>,
245  half,
246  iXLen);
247
248define <vscale x 32 x half> @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
249; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16:
250; CHECK:       # %bb.0: # %entry
251; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
252; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
253; CHECK-NEXT:    ret
254entry:
255  %a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.nxv32f16.f16(
256    <vscale x 32 x half> undef,
257    <vscale x 32 x half> %0,
258    half %1,
259    iXLen %2)
260
261  ret <vscale x 32 x half> %a
262}
263
264declare <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16(
265  <vscale x 32 x half>,
266  <vscale x 32 x half>,
267  half,
268  <vscale x 32 x i1>,
269  iXLen,
270  iXLen);
271
272define <vscale x 32 x half> @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
273; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16:
274; CHECK:       # %bb.0: # %entry
275; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
276; CHECK-NEXT:    vfslide1down.vf v8, v16, fa0, v0.t
277; CHECK-NEXT:    ret
278entry:
279  %a = call <vscale x 32 x half> @llvm.riscv.vfslide1down.mask.nxv32f16.f16(
280    <vscale x 32 x half> %0,
281    <vscale x 32 x half> %1,
282    half %2,
283    <vscale x 32 x i1> %3,
284    iXLen %4, iXLen 1)
285
286  ret <vscale x 32 x half> %a
287}
288
289declare <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32(
290  <vscale x 1 x float>,
291  <vscale x 1 x float>,
292  float,
293  iXLen);
294
295define <vscale x 1 x float> @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
296; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32:
297; CHECK:       # %bb.0: # %entry
298; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
299; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
300; CHECK-NEXT:    ret
301entry:
302  %a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32(
303    <vscale x 1 x float> undef,
304    <vscale x 1 x float> %0,
305    float %1,
306    iXLen %2)
307
308  ret <vscale x 1 x float> %a
309}
310
311declare <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32(
312  <vscale x 1 x float>,
313  <vscale x 1 x float>,
314  float,
315  <vscale x 1 x i1>,
316  iXLen,
317  iXLen);
318
319define <vscale x 1 x float> @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
320; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32:
321; CHECK:       # %bb.0: # %entry
322; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
323; CHECK-NEXT:    vfslide1down.vf v8, v9, fa0, v0.t
324; CHECK-NEXT:    ret
325entry:
326  %a = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32(
327    <vscale x 1 x float> %0,
328    <vscale x 1 x float> %1,
329    float %2,
330    <vscale x 1 x i1> %3,
331    iXLen %4, iXLen 1)
332
333  ret <vscale x 1 x float> %a
334}
335
336declare <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32(
337  <vscale x 2 x float>,
338  <vscale x 2 x float>,
339  float,
340  iXLen);
341
342define <vscale x 2 x float> @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
343; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32:
344; CHECK:       # %bb.0: # %entry
345; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
346; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
347; CHECK-NEXT:    ret
348entry:
349  %a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32(
350    <vscale x 2 x float> undef,
351    <vscale x 2 x float> %0,
352    float %1,
353    iXLen %2)
354
355  ret <vscale x 2 x float> %a
356}
357
358declare <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32(
359  <vscale x 2 x float>,
360  <vscale x 2 x float>,
361  float,
362  <vscale x 2 x i1>,
363  iXLen,
364  iXLen);
365
366define <vscale x 2 x float> @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
367; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32:
368; CHECK:       # %bb.0: # %entry
369; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
370; CHECK-NEXT:    vfslide1down.vf v8, v9, fa0, v0.t
371; CHECK-NEXT:    ret
372entry:
373  %a = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32(
374    <vscale x 2 x float> %0,
375    <vscale x 2 x float> %1,
376    float %2,
377    <vscale x 2 x i1> %3,
378    iXLen %4, iXLen 1)
379
380  ret <vscale x 2 x float> %a
381}
382
383declare <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32(
384  <vscale x 4 x float>,
385  <vscale x 4 x float>,
386  float,
387  iXLen);
388
389define <vscale x 4 x float> @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
390; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32:
391; CHECK:       # %bb.0: # %entry
392; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
393; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
394; CHECK-NEXT:    ret
395entry:
396  %a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32(
397    <vscale x 4 x float> undef,
398    <vscale x 4 x float> %0,
399    float %1,
400    iXLen %2)
401
402  ret <vscale x 4 x float> %a
403}
404
405declare <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32(
406  <vscale x 4 x float>,
407  <vscale x 4 x float>,
408  float,
409  <vscale x 4 x i1>,
410  iXLen,
411  iXLen);
412
413define <vscale x 4 x float> @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
414; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32:
415; CHECK:       # %bb.0: # %entry
416; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
417; CHECK-NEXT:    vfslide1down.vf v8, v10, fa0, v0.t
418; CHECK-NEXT:    ret
419entry:
420  %a = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32(
421    <vscale x 4 x float> %0,
422    <vscale x 4 x float> %1,
423    float %2,
424    <vscale x 4 x i1> %3,
425    iXLen %4, iXLen 1)
426
427  ret <vscale x 4 x float> %a
428}
429
430declare <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32(
431  <vscale x 8 x float>,
432  <vscale x 8 x float>,
433  float,
434  iXLen);
435
436define <vscale x 8 x float> @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
437; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32:
438; CHECK:       # %bb.0: # %entry
439; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
440; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
441; CHECK-NEXT:    ret
442entry:
443  %a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32(
444    <vscale x 8 x float> undef,
445    <vscale x 8 x float> %0,
446    float %1,
447    iXLen %2)
448
449  ret <vscale x 8 x float> %a
450}
451
452declare <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32(
453  <vscale x 8 x float>,
454  <vscale x 8 x float>,
455  float,
456  <vscale x 8 x i1>,
457  iXLen,
458  iXLen);
459
460define <vscale x 8 x float> @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
461; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32:
462; CHECK:       # %bb.0: # %entry
463; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
464; CHECK-NEXT:    vfslide1down.vf v8, v12, fa0, v0.t
465; CHECK-NEXT:    ret
466entry:
467  %a = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32(
468    <vscale x 8 x float> %0,
469    <vscale x 8 x float> %1,
470    float %2,
471    <vscale x 8 x i1> %3,
472    iXLen %4, iXLen 1)
473
474  ret <vscale x 8 x float> %a
475}
476
477declare <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32(
478  <vscale x 16 x float>,
479  <vscale x 16 x float>,
480  float,
481  iXLen);
482
483define <vscale x 16 x float> @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
484; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32:
485; CHECK:       # %bb.0: # %entry
486; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
487; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
488; CHECK-NEXT:    ret
489entry:
490  %a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32(
491    <vscale x 16 x float> undef,
492    <vscale x 16 x float> %0,
493    float %1,
494    iXLen %2)
495
496  ret <vscale x 16 x float> %a
497}
498
499declare <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32(
500  <vscale x 16 x float>,
501  <vscale x 16 x float>,
502  float,
503  <vscale x 16 x i1>,
504  iXLen,
505  iXLen);
506
507define <vscale x 16 x float> @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
508; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32:
509; CHECK:       # %bb.0: # %entry
510; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
511; CHECK-NEXT:    vfslide1down.vf v8, v16, fa0, v0.t
512; CHECK-NEXT:    ret
513entry:
514  %a = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32(
515    <vscale x 16 x float> %0,
516    <vscale x 16 x float> %1,
517    float %2,
518    <vscale x 16 x i1> %3,
519    iXLen %4, iXLen 1)
520
521  ret <vscale x 16 x float> %a
522}
523
524declare <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
525  <vscale x 1 x double>,
526  <vscale x 1 x double>,
527  double,
528  iXLen);
529
530define <vscale x 1 x double> @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
531; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64:
532; CHECK:       # %bb.0: # %entry
533; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
534; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
535; CHECK-NEXT:    ret
536entry:
537  %a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64(
538    <vscale x 1 x double> undef,
539    <vscale x 1 x double> %0,
540    double %1,
541    iXLen %2)
542
543  ret <vscale x 1 x double> %a
544}
545
546declare <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64(
547  <vscale x 1 x double>,
548  <vscale x 1 x double>,
549  double,
550  <vscale x 1 x i1>,
551  iXLen,
552  iXLen);
553
554define <vscale x 1 x double> @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
555; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64:
556; CHECK:       # %bb.0: # %entry
557; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
558; CHECK-NEXT:    vfslide1down.vf v8, v9, fa0, v0.t
559; CHECK-NEXT:    ret
560entry:
561  %a = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64(
562    <vscale x 1 x double> %0,
563    <vscale x 1 x double> %1,
564    double %2,
565    <vscale x 1 x i1> %3,
566    iXLen %4, iXLen 1)
567
568  ret <vscale x 1 x double> %a
569}
570
571declare <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
572  <vscale x 2 x double>,
573  <vscale x 2 x double>,
574  double,
575  iXLen);
576
577define <vscale x 2 x double> @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
578; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64:
579; CHECK:       # %bb.0: # %entry
580; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
581; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
582; CHECK-NEXT:    ret
583entry:
584  %a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64(
585    <vscale x 2 x double> undef,
586    <vscale x 2 x double> %0,
587    double %1,
588    iXLen %2)
589
590  ret <vscale x 2 x double> %a
591}
592
593declare <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64(
594  <vscale x 2 x double>,
595  <vscale x 2 x double>,
596  double,
597  <vscale x 2 x i1>,
598  iXLen,
599  iXLen);
600
601define <vscale x 2 x double> @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
602; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64:
603; CHECK:       # %bb.0: # %entry
604; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
605; CHECK-NEXT:    vfslide1down.vf v8, v10, fa0, v0.t
606; CHECK-NEXT:    ret
607entry:
608  %a = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64(
609    <vscale x 2 x double> %0,
610    <vscale x 2 x double> %1,
611    double %2,
612    <vscale x 2 x i1> %3,
613    iXLen %4, iXLen 1)
614
615  ret <vscale x 2 x double> %a
616}
617
618declare <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
619  <vscale x 4 x double>,
620  <vscale x 4 x double>,
621  double,
622  iXLen);
623
624define <vscale x 4 x double> @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
625; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64:
626; CHECK:       # %bb.0: # %entry
627; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
628; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
629; CHECK-NEXT:    ret
630entry:
631  %a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64(
632    <vscale x 4 x double> undef,
633    <vscale x 4 x double> %0,
634    double %1,
635    iXLen %2)
636
637  ret <vscale x 4 x double> %a
638}
639
640declare <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64(
641  <vscale x 4 x double>,
642  <vscale x 4 x double>,
643  double,
644  <vscale x 4 x i1>,
645  iXLen,
646  iXLen);
647
648define <vscale x 4 x double> @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
649; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64:
650; CHECK:       # %bb.0: # %entry
651; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
652; CHECK-NEXT:    vfslide1down.vf v8, v12, fa0, v0.t
653; CHECK-NEXT:    ret
654entry:
655  %a = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64(
656    <vscale x 4 x double> %0,
657    <vscale x 4 x double> %1,
658    double %2,
659    <vscale x 4 x i1> %3,
660    iXLen %4, iXLen 1)
661
662  ret <vscale x 4 x double> %a
663}
664
665declare <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
666  <vscale x 8 x double>,
667  <vscale x 8 x double>,
668  double,
669  iXLen);
670
671define <vscale x 8 x double> @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
672; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64:
673; CHECK:       # %bb.0: # %entry
674; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
675; CHECK-NEXT:    vfslide1down.vf v8, v8, fa0
676; CHECK-NEXT:    ret
677entry:
678  %a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64(
679    <vscale x 8 x double> undef,
680    <vscale x 8 x double> %0,
681    double %1,
682    iXLen %2)
683
684  ret <vscale x 8 x double> %a
685}
686
687declare <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64(
688  <vscale x 8 x double>,
689  <vscale x 8 x double>,
690  double,
691  <vscale x 8 x i1>,
692  iXLen,
693  iXLen);
694
695define <vscale x 8 x double> @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
696; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64:
697; CHECK:       # %bb.0: # %entry
698; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
699; CHECK-NEXT:    vfslide1down.vf v8, v16, fa0, v0.t
700; CHECK-NEXT:    ret
701entry:
702  %a = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64(
703    <vscale x 8 x double> %0,
704    <vscale x 8 x double> %1,
705    double %2,
706    <vscale x 8 x i1> %3,
707    iXLen %4, iXLen 1)
708
709  ret <vscale x 8 x double> %a
710}
711