xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
8  <vscale x 1 x half>,
9  <vscale x 1 x half>,
10  half,
11  iXLen);
12
13define <vscale x 1 x half> @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
17; CHECK-NEXT:    vfslide1up.vf v9, v8, fa0
18; CHECK-NEXT:    vmv1r.v v8, v9
19; CHECK-NEXT:    ret
20entry:
21  %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
22    <vscale x 1 x half> undef,
23    <vscale x 1 x half> %0,
24    half %1,
25    iXLen %2)
26
27  ret <vscale x 1 x half> %a
28}
29
30declare <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
31  <vscale x 1 x half>,
32  <vscale x 1 x half>,
33  half,
34  <vscale x 1 x i1>,
35  iXLen,
36  iXLen);
37
38define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
39; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
40; CHECK:       # %bb.0: # %entry
41; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
42; CHECK-NEXT:    vfslide1up.vf v8, v9, fa0, v0.t
43; CHECK-NEXT:    ret
44entry:
45  %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
46    <vscale x 1 x half> %0,
47    <vscale x 1 x half> %1,
48    half %2,
49    <vscale x 1 x i1> %3,
50    iXLen %4, iXLen 1)
51
52  ret <vscale x 1 x half> %a
53}
54
55declare <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
56  <vscale x 2 x half>,
57  <vscale x 2 x half>,
58  half,
59  iXLen);
60
61define <vscale x 2 x half> @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
62; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16:
63; CHECK:       # %bb.0: # %entry
64; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
65; CHECK-NEXT:    vfslide1up.vf v9, v8, fa0
66; CHECK-NEXT:    vmv1r.v v8, v9
67; CHECK-NEXT:    ret
68entry:
69  %a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
70    <vscale x 2 x half> undef,
71    <vscale x 2 x half> %0,
72    half %1,
73    iXLen %2)
74
75  ret <vscale x 2 x half> %a
76}
77
78declare <vscale x 2 x half> @llvm.riscv.vfslide1up.mask.nxv2f16.f16(
79  <vscale x 2 x half>,
80  <vscale x 2 x half>,
81  half,
82  <vscale x 2 x i1>,
83  iXLen,
84  iXLen);
85
86define <vscale x 2 x half> @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
87; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16:
88; CHECK:       # %bb.0: # %entry
89; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
90; CHECK-NEXT:    vfslide1up.vf v8, v9, fa0, v0.t
91; CHECK-NEXT:    ret
92entry:
93  %a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.mask.nxv2f16.f16(
94    <vscale x 2 x half> %0,
95    <vscale x 2 x half> %1,
96    half %2,
97    <vscale x 2 x i1> %3,
98    iXLen %4, iXLen 1)
99
100  ret <vscale x 2 x half> %a
101}
102
103declare <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
104  <vscale x 4 x half>,
105  <vscale x 4 x half>,
106  half,
107  iXLen);
108
109define <vscale x 4 x half> @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
110; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16:
111; CHECK:       # %bb.0: # %entry
112; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
113; CHECK-NEXT:    vfslide1up.vf v9, v8, fa0
114; CHECK-NEXT:    vmv.v.v v8, v9
115; CHECK-NEXT:    ret
116entry:
117  %a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
118    <vscale x 4 x half> undef,
119    <vscale x 4 x half> %0,
120    half %1,
121    iXLen %2)
122
123  ret <vscale x 4 x half> %a
124}
125
126declare <vscale x 4 x half> @llvm.riscv.vfslide1up.mask.nxv4f16.f16(
127  <vscale x 4 x half>,
128  <vscale x 4 x half>,
129  half,
130  <vscale x 4 x i1>,
131  iXLen,
132  iXLen);
133
134define <vscale x 4 x half> @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
135; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16:
136; CHECK:       # %bb.0: # %entry
137; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
138; CHECK-NEXT:    vfslide1up.vf v8, v9, fa0, v0.t
139; CHECK-NEXT:    ret
140entry:
141  %a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.mask.nxv4f16.f16(
142    <vscale x 4 x half> %0,
143    <vscale x 4 x half> %1,
144    half %2,
145    <vscale x 4 x i1> %3,
146    iXLen %4, iXLen 1)
147
148  ret <vscale x 4 x half> %a
149}
150
151declare <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
152  <vscale x 8 x half>,
153  <vscale x 8 x half>,
154  half,
155  iXLen);
156
157define <vscale x 8 x half> @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
158; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16:
159; CHECK:       # %bb.0: # %entry
160; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
161; CHECK-NEXT:    vfslide1up.vf v10, v8, fa0
162; CHECK-NEXT:    vmv.v.v v8, v10
163; CHECK-NEXT:    ret
164entry:
165  %a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
166    <vscale x 8 x half> undef,
167    <vscale x 8 x half> %0,
168    half %1,
169    iXLen %2)
170
171  ret <vscale x 8 x half> %a
172}
173
174declare <vscale x 8 x half> @llvm.riscv.vfslide1up.mask.nxv8f16.f16(
175  <vscale x 8 x half>,
176  <vscale x 8 x half>,
177  half,
178  <vscale x 8 x i1>,
179  iXLen,
180  iXLen);
181
182define <vscale x 8 x half> @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
183; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16:
184; CHECK:       # %bb.0: # %entry
185; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
186; CHECK-NEXT:    vfslide1up.vf v8, v10, fa0, v0.t
187; CHECK-NEXT:    ret
188entry:
189  %a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.mask.nxv8f16.f16(
190    <vscale x 8 x half> %0,
191    <vscale x 8 x half> %1,
192    half %2,
193    <vscale x 8 x i1> %3,
194    iXLen %4, iXLen 1)
195
196  ret <vscale x 8 x half> %a
197}
198
199declare <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
200  <vscale x 16 x half>,
201  <vscale x 16 x half>,
202  half,
203  iXLen);
204
205define <vscale x 16 x half> @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
206; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16:
207; CHECK:       # %bb.0: # %entry
208; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
209; CHECK-NEXT:    vfslide1up.vf v12, v8, fa0
210; CHECK-NEXT:    vmv.v.v v8, v12
211; CHECK-NEXT:    ret
212entry:
213  %a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
214    <vscale x 16 x half> undef,
215    <vscale x 16 x half> %0,
216    half %1,
217    iXLen %2)
218
219  ret <vscale x 16 x half> %a
220}
221
222declare <vscale x 16 x half> @llvm.riscv.vfslide1up.mask.nxv16f16.f16(
223  <vscale x 16 x half>,
224  <vscale x 16 x half>,
225  half,
226  <vscale x 16 x i1>,
227  iXLen,
228  iXLen);
229
230define <vscale x 16 x half> @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
231; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16:
232; CHECK:       # %bb.0: # %entry
233; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
234; CHECK-NEXT:    vfslide1up.vf v8, v12, fa0, v0.t
235; CHECK-NEXT:    ret
236entry:
237  %a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.mask.nxv16f16.f16(
238    <vscale x 16 x half> %0,
239    <vscale x 16 x half> %1,
240    half %2,
241    <vscale x 16 x i1> %3,
242    iXLen %4, iXLen 1)
243
244  ret <vscale x 16 x half> %a
245}
246
247declare <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
248  <vscale x 32 x half>,
249  <vscale x 32 x half>,
250  half,
251  iXLen);
252
253define <vscale x 32 x half> @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
254; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16:
255; CHECK:       # %bb.0: # %entry
256; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
257; CHECK-NEXT:    vfslide1up.vf v16, v8, fa0
258; CHECK-NEXT:    vmv.v.v v8, v16
259; CHECK-NEXT:    ret
260entry:
261  %a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
262    <vscale x 32 x half> undef,
263    <vscale x 32 x half> %0,
264    half %1,
265    iXLen %2)
266
267  ret <vscale x 32 x half> %a
268}
269
270declare <vscale x 32 x half> @llvm.riscv.vfslide1up.mask.nxv32f16.f16(
271  <vscale x 32 x half>,
272  <vscale x 32 x half>,
273  half,
274  <vscale x 32 x i1>,
275  iXLen,
276  iXLen);
277
278define <vscale x 32 x half> @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
279; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16:
280; CHECK:       # %bb.0: # %entry
281; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
282; CHECK-NEXT:    vfslide1up.vf v8, v16, fa0, v0.t
283; CHECK-NEXT:    ret
284entry:
285  %a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.mask.nxv32f16.f16(
286    <vscale x 32 x half> %0,
287    <vscale x 32 x half> %1,
288    half %2,
289    <vscale x 32 x i1> %3,
290    iXLen %4, iXLen 1)
291
292  ret <vscale x 32 x half> %a
293}
294
295declare <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
296  <vscale x 1 x float>,
297  <vscale x 1 x float>,
298  float,
299  iXLen);
300
301define <vscale x 1 x float> @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
302; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32:
303; CHECK:       # %bb.0: # %entry
304; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
305; CHECK-NEXT:    vfslide1up.vf v9, v8, fa0
306; CHECK-NEXT:    vmv1r.v v8, v9
307; CHECK-NEXT:    ret
308entry:
309  %a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
310    <vscale x 1 x float> undef,
311    <vscale x 1 x float> %0,
312    float %1,
313    iXLen %2)
314
315  ret <vscale x 1 x float> %a
316}
317
318declare <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32(
319  <vscale x 1 x float>,
320  <vscale x 1 x float>,
321  float,
322  <vscale x 1 x i1>,
323  iXLen,
324  iXLen);
325
326define <vscale x 1 x float> @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
327; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32:
328; CHECK:       # %bb.0: # %entry
329; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
330; CHECK-NEXT:    vfslide1up.vf v8, v9, fa0, v0.t
331; CHECK-NEXT:    ret
332entry:
333  %a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32(
334    <vscale x 1 x float> %0,
335    <vscale x 1 x float> %1,
336    float %2,
337    <vscale x 1 x i1> %3,
338    iXLen %4, iXLen 1)
339
340  ret <vscale x 1 x float> %a
341}
342
343declare <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
344  <vscale x 2 x float>,
345  <vscale x 2 x float>,
346  float,
347  iXLen);
348
349define <vscale x 2 x float> @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
350; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32:
351; CHECK:       # %bb.0: # %entry
352; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
353; CHECK-NEXT:    vfslide1up.vf v9, v8, fa0
354; CHECK-NEXT:    vmv.v.v v8, v9
355; CHECK-NEXT:    ret
356entry:
357  %a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
358    <vscale x 2 x float> undef,
359    <vscale x 2 x float> %0,
360    float %1,
361    iXLen %2)
362
363  ret <vscale x 2 x float> %a
364}
365
366declare <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32(
367  <vscale x 2 x float>,
368  <vscale x 2 x float>,
369  float,
370  <vscale x 2 x i1>,
371  iXLen,
372  iXLen);
373
374define <vscale x 2 x float> @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
375; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32:
376; CHECK:       # %bb.0: # %entry
377; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
378; CHECK-NEXT:    vfslide1up.vf v8, v9, fa0, v0.t
379; CHECK-NEXT:    ret
380entry:
381  %a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32(
382    <vscale x 2 x float> %0,
383    <vscale x 2 x float> %1,
384    float %2,
385    <vscale x 2 x i1> %3,
386    iXLen %4, iXLen 1)
387
388  ret <vscale x 2 x float> %a
389}
390
391declare <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
392  <vscale x 4 x float>,
393  <vscale x 4 x float>,
394  float,
395  iXLen);
396
397define <vscale x 4 x float> @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
398; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32:
399; CHECK:       # %bb.0: # %entry
400; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
401; CHECK-NEXT:    vfslide1up.vf v10, v8, fa0
402; CHECK-NEXT:    vmv.v.v v8, v10
403; CHECK-NEXT:    ret
404entry:
405  %a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
406    <vscale x 4 x float> undef,
407    <vscale x 4 x float> %0,
408    float %1,
409    iXLen %2)
410
411  ret <vscale x 4 x float> %a
412}
413
414declare <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32(
415  <vscale x 4 x float>,
416  <vscale x 4 x float>,
417  float,
418  <vscale x 4 x i1>,
419  iXLen,
420  iXLen);
421
422define <vscale x 4 x float> @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
423; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32:
424; CHECK:       # %bb.0: # %entry
425; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
426; CHECK-NEXT:    vfslide1up.vf v8, v10, fa0, v0.t
427; CHECK-NEXT:    ret
428entry:
429  %a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32(
430    <vscale x 4 x float> %0,
431    <vscale x 4 x float> %1,
432    float %2,
433    <vscale x 4 x i1> %3,
434    iXLen %4, iXLen 1)
435
436  ret <vscale x 4 x float> %a
437}
438
439declare <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
440  <vscale x 8 x float>,
441  <vscale x 8 x float>,
442  float,
443  iXLen);
444
445define <vscale x 8 x float> @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
446; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32:
447; CHECK:       # %bb.0: # %entry
448; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
449; CHECK-NEXT:    vfslide1up.vf v12, v8, fa0
450; CHECK-NEXT:    vmv.v.v v8, v12
451; CHECK-NEXT:    ret
452entry:
453  %a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
454    <vscale x 8 x float> undef,
455    <vscale x 8 x float> %0,
456    float %1,
457    iXLen %2)
458
459  ret <vscale x 8 x float> %a
460}
461
462declare <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32(
463  <vscale x 8 x float>,
464  <vscale x 8 x float>,
465  float,
466  <vscale x 8 x i1>,
467  iXLen,
468  iXLen);
469
470define <vscale x 8 x float> @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
471; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32:
472; CHECK:       # %bb.0: # %entry
473; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
474; CHECK-NEXT:    vfslide1up.vf v8, v12, fa0, v0.t
475; CHECK-NEXT:    ret
476entry:
477  %a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32(
478    <vscale x 8 x float> %0,
479    <vscale x 8 x float> %1,
480    float %2,
481    <vscale x 8 x i1> %3,
482    iXLen %4, iXLen 1)
483
484  ret <vscale x 8 x float> %a
485}
486
487declare <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
488  <vscale x 16 x float>,
489  <vscale x 16 x float>,
490  float,
491  iXLen);
492
493define <vscale x 16 x float> @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
494; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32:
495; CHECK:       # %bb.0: # %entry
496; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
497; CHECK-NEXT:    vfslide1up.vf v16, v8, fa0
498; CHECK-NEXT:    vmv.v.v v8, v16
499; CHECK-NEXT:    ret
500entry:
501  %a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
502    <vscale x 16 x float> undef,
503    <vscale x 16 x float> %0,
504    float %1,
505    iXLen %2)
506
507  ret <vscale x 16 x float> %a
508}
509
510declare <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32(
511  <vscale x 16 x float>,
512  <vscale x 16 x float>,
513  float,
514  <vscale x 16 x i1>,
515  iXLen,
516  iXLen);
517
518define <vscale x 16 x float> @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
519; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32:
520; CHECK:       # %bb.0: # %entry
521; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
522; CHECK-NEXT:    vfslide1up.vf v8, v16, fa0, v0.t
523; CHECK-NEXT:    ret
524entry:
525  %a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32(
526    <vscale x 16 x float> %0,
527    <vscale x 16 x float> %1,
528    float %2,
529    <vscale x 16 x i1> %3,
530    iXLen %4, iXLen 1)
531
532  ret <vscale x 16 x float> %a
533}
534
535declare <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
536  <vscale x 1 x double>,
537  <vscale x 1 x double>,
538  double,
539  iXLen);
540
541define <vscale x 1 x double> @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
542; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64:
543; CHECK:       # %bb.0: # %entry
544; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
545; CHECK-NEXT:    vfslide1up.vf v9, v8, fa0
546; CHECK-NEXT:    vmv.v.v v8, v9
547; CHECK-NEXT:    ret
548entry:
549  %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
550    <vscale x 1 x double> undef,
551    <vscale x 1 x double> %0,
552    double %1,
553    iXLen %2)
554
555  ret <vscale x 1 x double> %a
556}
557
558declare <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
559  <vscale x 1 x double>,
560  <vscale x 1 x double>,
561  double,
562  <vscale x 1 x i1>,
563  iXLen,
564  iXLen);
565
566define <vscale x 1 x double> @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
567; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64:
568; CHECK:       # %bb.0: # %entry
569; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
570; CHECK-NEXT:    vfslide1up.vf v8, v9, fa0, v0.t
571; CHECK-NEXT:    ret
572entry:
573  %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
574    <vscale x 1 x double> %0,
575    <vscale x 1 x double> %1,
576    double %2,
577    <vscale x 1 x i1> %3,
578    iXLen %4, iXLen 1)
579
580  ret <vscale x 1 x double> %a
581}
582
583declare <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
584  <vscale x 2 x double>,
585  <vscale x 2 x double>,
586  double,
587  iXLen);
588
589define <vscale x 2 x double> @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
590; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64:
591; CHECK:       # %bb.0: # %entry
592; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
593; CHECK-NEXT:    vfslide1up.vf v10, v8, fa0
594; CHECK-NEXT:    vmv.v.v v8, v10
595; CHECK-NEXT:    ret
596entry:
597  %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
598    <vscale x 2 x double> undef,
599    <vscale x 2 x double> %0,
600    double %1,
601    iXLen %2)
602
603  ret <vscale x 2 x double> %a
604}
605
606declare <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
607  <vscale x 2 x double>,
608  <vscale x 2 x double>,
609  double,
610  <vscale x 2 x i1>,
611  iXLen,
612  iXLen);
613
614define <vscale x 2 x double> @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
615; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64:
616; CHECK:       # %bb.0: # %entry
617; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
618; CHECK-NEXT:    vfslide1up.vf v8, v10, fa0, v0.t
619; CHECK-NEXT:    ret
620entry:
621  %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
622    <vscale x 2 x double> %0,
623    <vscale x 2 x double> %1,
624    double %2,
625    <vscale x 2 x i1> %3,
626    iXLen %4, iXLen 1)
627
628  ret <vscale x 2 x double> %a
629}
630
631declare <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
632  <vscale x 4 x double>,
633  <vscale x 4 x double>,
634  double,
635  iXLen);
636
637define <vscale x 4 x double> @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
638; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64:
639; CHECK:       # %bb.0: # %entry
640; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
641; CHECK-NEXT:    vfslide1up.vf v12, v8, fa0
642; CHECK-NEXT:    vmv.v.v v8, v12
643; CHECK-NEXT:    ret
644entry:
645  %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
646    <vscale x 4 x double> undef,
647    <vscale x 4 x double> %0,
648    double %1,
649    iXLen %2)
650
651  ret <vscale x 4 x double> %a
652}
653
654declare <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
655  <vscale x 4 x double>,
656  <vscale x 4 x double>,
657  double,
658  <vscale x 4 x i1>,
659  iXLen,
660  iXLen);
661
662define <vscale x 4 x double> @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
663; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64:
664; CHECK:       # %bb.0: # %entry
665; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
666; CHECK-NEXT:    vfslide1up.vf v8, v12, fa0, v0.t
667; CHECK-NEXT:    ret
668entry:
669  %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
670    <vscale x 4 x double> %0,
671    <vscale x 4 x double> %1,
672    double %2,
673    <vscale x 4 x i1> %3,
674    iXLen %4, iXLen 1)
675
676  ret <vscale x 4 x double> %a
677}
678
679declare <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
680  <vscale x 8 x double>,
681  <vscale x 8 x double>,
682  double,
683  iXLen);
684
685define <vscale x 8 x double> @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
686; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64:
687; CHECK:       # %bb.0: # %entry
688; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
689; CHECK-NEXT:    vfslide1up.vf v16, v8, fa0
690; CHECK-NEXT:    vmv.v.v v8, v16
691; CHECK-NEXT:    ret
692entry:
693  %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
694    <vscale x 8 x double> undef,
695    <vscale x 8 x double> %0,
696    double %1,
697    iXLen %2)
698
699  ret <vscale x 8 x double> %a
700}
701
702declare <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
703  <vscale x 8 x double>,
704  <vscale x 8 x double>,
705  double,
706  <vscale x 8 x i1>,
707  iXLen,
708  iXLen);
709
710define <vscale x 8 x double> @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
711; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64:
712; CHECK:       # %bb.0: # %entry
713; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
714; CHECK-NEXT:    vfslide1up.vf v8, v16, fa0, v0.t
715; CHECK-NEXT:    ret
716entry:
717  %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
718    <vscale x 8 x double> %0,
719    <vscale x 8 x double> %1,
720    double %2,
721    <vscale x 8 x i1> %3,
722    iXLen %4, iXLen 1)
723
724  ret <vscale x 8 x double> %a
725}
726