xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
8  <vscale x 1 x half>,
9  <vscale x 1 x half>,
10  half,
11  iXLen, iXLen);
12
13define <vscale x 1 x half> @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    fsrmi a1, 0
17; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
18; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
19; CHECK-NEXT:    fsrm a1
20; CHECK-NEXT:    ret
21entry:
22  %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.nxv1f16.f16(
23    <vscale x 1 x half> undef,
24    <vscale x 1 x half> %0,
25    half %1,
26    iXLen 0, iXLen %2)
27
28  ret <vscale x 1 x half> %a
29}
30
31declare <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
32  <vscale x 1 x half>,
33  <vscale x 1 x half>,
34  half,
35  <vscale x 1 x i1>,
36  iXLen, iXLen, iXLen);
37
38define <vscale x 1 x half> @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
39; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16:
40; CHECK:       # %bb.0: # %entry
41; CHECK-NEXT:    fsrmi a1, 0
42; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
43; CHECK-NEXT:    vfrdiv.vf v8, v9, fa0, v0.t
44; CHECK-NEXT:    fsrm a1
45; CHECK-NEXT:    ret
46entry:
47  %a = call <vscale x 1 x half> @llvm.riscv.vfrdiv.mask.nxv1f16.f16(
48    <vscale x 1 x half> %0,
49    <vscale x 1 x half> %1,
50    half %2,
51    <vscale x 1 x i1> %3,
52    iXLen 0, iXLen %4, iXLen 1)
53
54  ret <vscale x 1 x half> %a
55}
56
57declare <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16(
58  <vscale x 2 x half>,
59  <vscale x 2 x half>,
60  half,
61  iXLen, iXLen);
62
63define <vscale x 2 x half> @intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
64; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16:
65; CHECK:       # %bb.0: # %entry
66; CHECK-NEXT:    fsrmi a1, 0
67; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
68; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
69; CHECK-NEXT:    fsrm a1
70; CHECK-NEXT:    ret
71entry:
72  %a = call <vscale x 2 x half> @llvm.riscv.vfrdiv.nxv2f16.f16(
73    <vscale x 2 x half> undef,
74    <vscale x 2 x half> %0,
75    half %1,
76    iXLen 0, iXLen %2)
77
78  ret <vscale x 2 x half> %a
79}
80
81declare <vscale x 2 x half> @llvm.riscv.vfrdiv.mask.nxv2f16.f16(
82  <vscale x 2 x half>,
83  <vscale x 2 x half>,
84  half,
85  <vscale x 2 x i1>,
86  iXLen, iXLen, iXLen);
87
88define <vscale x 2 x half> @intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
89; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_nxv2f16_f16:
90; CHECK:       # %bb.0: # %entry
91; CHECK-NEXT:    fsrmi a1, 0
92; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
93; CHECK-NEXT:    vfrdiv.vf v8, v9, fa0, v0.t
94; CHECK-NEXT:    fsrm a1
95; CHECK-NEXT:    ret
96entry:
97  %a = call <vscale x 2 x half> @llvm.riscv.vfrdiv.mask.nxv2f16.f16(
98    <vscale x 2 x half> %0,
99    <vscale x 2 x half> %1,
100    half %2,
101    <vscale x 2 x i1> %3,
102    iXLen 0, iXLen %4, iXLen 1)
103
104  ret <vscale x 2 x half> %a
105}
106
107declare <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16(
108  <vscale x 4 x half>,
109  <vscale x 4 x half>,
110  half,
111  iXLen, iXLen);
112
113define <vscale x 4 x half> @intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
114; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16:
115; CHECK:       # %bb.0: # %entry
116; CHECK-NEXT:    fsrmi a1, 0
117; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
118; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
119; CHECK-NEXT:    fsrm a1
120; CHECK-NEXT:    ret
121entry:
122  %a = call <vscale x 4 x half> @llvm.riscv.vfrdiv.nxv4f16.f16(
123    <vscale x 4 x half> undef,
124    <vscale x 4 x half> %0,
125    half %1,
126    iXLen 0, iXLen %2)
127
128  ret <vscale x 4 x half> %a
129}
130
131declare <vscale x 4 x half> @llvm.riscv.vfrdiv.mask.nxv4f16.f16(
132  <vscale x 4 x half>,
133  <vscale x 4 x half>,
134  half,
135  <vscale x 4 x i1>,
136  iXLen, iXLen, iXLen);
137
138define <vscale x 4 x half> @intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
139; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_nxv4f16_f16:
140; CHECK:       # %bb.0: # %entry
141; CHECK-NEXT:    fsrmi a1, 0
142; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
143; CHECK-NEXT:    vfrdiv.vf v8, v9, fa0, v0.t
144; CHECK-NEXT:    fsrm a1
145; CHECK-NEXT:    ret
146entry:
147  %a = call <vscale x 4 x half> @llvm.riscv.vfrdiv.mask.nxv4f16.f16(
148    <vscale x 4 x half> %0,
149    <vscale x 4 x half> %1,
150    half %2,
151    <vscale x 4 x i1> %3,
152    iXLen 0, iXLen %4, iXLen 1)
153
154  ret <vscale x 4 x half> %a
155}
156
157declare <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16(
158  <vscale x 8 x half>,
159  <vscale x 8 x half>,
160  half,
161  iXLen, iXLen);
162
163define <vscale x 8 x half> @intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
164; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16:
165; CHECK:       # %bb.0: # %entry
166; CHECK-NEXT:    fsrmi a1, 0
167; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
168; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
169; CHECK-NEXT:    fsrm a1
170; CHECK-NEXT:    ret
171entry:
172  %a = call <vscale x 8 x half> @llvm.riscv.vfrdiv.nxv8f16.f16(
173    <vscale x 8 x half> undef,
174    <vscale x 8 x half> %0,
175    half %1,
176    iXLen 0, iXLen %2)
177
178  ret <vscale x 8 x half> %a
179}
180
181declare <vscale x 8 x half> @llvm.riscv.vfrdiv.mask.nxv8f16.f16(
182  <vscale x 8 x half>,
183  <vscale x 8 x half>,
184  half,
185  <vscale x 8 x i1>,
186  iXLen, iXLen, iXLen);
187
188define <vscale x 8 x half> @intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
189; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_nxv8f16_f16:
190; CHECK:       # %bb.0: # %entry
191; CHECK-NEXT:    fsrmi a1, 0
192; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
193; CHECK-NEXT:    vfrdiv.vf v8, v10, fa0, v0.t
194; CHECK-NEXT:    fsrm a1
195; CHECK-NEXT:    ret
196entry:
197  %a = call <vscale x 8 x half> @llvm.riscv.vfrdiv.mask.nxv8f16.f16(
198    <vscale x 8 x half> %0,
199    <vscale x 8 x half> %1,
200    half %2,
201    <vscale x 8 x i1> %3,
202    iXLen 0, iXLen %4, iXLen 1)
203
204  ret <vscale x 8 x half> %a
205}
206
207declare <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16(
208  <vscale x 16 x half>,
209  <vscale x 16 x half>,
210  half,
211  iXLen, iXLen);
212
213define <vscale x 16 x half> @intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
214; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16:
215; CHECK:       # %bb.0: # %entry
216; CHECK-NEXT:    fsrmi a1, 0
217; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
218; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
219; CHECK-NEXT:    fsrm a1
220; CHECK-NEXT:    ret
221entry:
222  %a = call <vscale x 16 x half> @llvm.riscv.vfrdiv.nxv16f16.f16(
223    <vscale x 16 x half> undef,
224    <vscale x 16 x half> %0,
225    half %1,
226    iXLen 0, iXLen %2)
227
228  ret <vscale x 16 x half> %a
229}
230
231declare <vscale x 16 x half> @llvm.riscv.vfrdiv.mask.nxv16f16.f16(
232  <vscale x 16 x half>,
233  <vscale x 16 x half>,
234  half,
235  <vscale x 16 x i1>,
236  iXLen, iXLen, iXLen);
237
238define <vscale x 16 x half> @intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
239; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_nxv16f16_f16:
240; CHECK:       # %bb.0: # %entry
241; CHECK-NEXT:    fsrmi a1, 0
242; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
243; CHECK-NEXT:    vfrdiv.vf v8, v12, fa0, v0.t
244; CHECK-NEXT:    fsrm a1
245; CHECK-NEXT:    ret
246entry:
247  %a = call <vscale x 16 x half> @llvm.riscv.vfrdiv.mask.nxv16f16.f16(
248    <vscale x 16 x half> %0,
249    <vscale x 16 x half> %1,
250    half %2,
251    <vscale x 16 x i1> %3,
252    iXLen 0, iXLen %4, iXLen 1)
253
254  ret <vscale x 16 x half> %a
255}
256
257declare <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16(
258  <vscale x 32 x half>,
259  <vscale x 32 x half>,
260  half,
261  iXLen, iXLen);
262
263define <vscale x 32 x half> @intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
264; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16:
265; CHECK:       # %bb.0: # %entry
266; CHECK-NEXT:    fsrmi a1, 0
267; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
268; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
269; CHECK-NEXT:    fsrm a1
270; CHECK-NEXT:    ret
271entry:
272  %a = call <vscale x 32 x half> @llvm.riscv.vfrdiv.nxv32f16.f16(
273    <vscale x 32 x half> undef,
274    <vscale x 32 x half> %0,
275    half %1,
276    iXLen 0, iXLen %2)
277
278  ret <vscale x 32 x half> %a
279}
280
281declare <vscale x 32 x half> @llvm.riscv.vfrdiv.mask.nxv32f16.f16(
282  <vscale x 32 x half>,
283  <vscale x 32 x half>,
284  half,
285  <vscale x 32 x i1>,
286  iXLen, iXLen, iXLen);
287
288define <vscale x 32 x half> @intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
289; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_nxv32f16_f16:
290; CHECK:       # %bb.0: # %entry
291; CHECK-NEXT:    fsrmi a1, 0
292; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
293; CHECK-NEXT:    vfrdiv.vf v8, v16, fa0, v0.t
294; CHECK-NEXT:    fsrm a1
295; CHECK-NEXT:    ret
296entry:
297  %a = call <vscale x 32 x half> @llvm.riscv.vfrdiv.mask.nxv32f16.f16(
298    <vscale x 32 x half> %0,
299    <vscale x 32 x half> %1,
300    half %2,
301    <vscale x 32 x i1> %3,
302    iXLen 0, iXLen %4, iXLen 1)
303
304  ret <vscale x 32 x half> %a
305}
306
307declare <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32(
308  <vscale x 1 x float>,
309  <vscale x 1 x float>,
310  float,
311  iXLen, iXLen);
312
313define <vscale x 1 x float> @intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
314; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32:
315; CHECK:       # %bb.0: # %entry
316; CHECK-NEXT:    fsrmi a1, 0
317; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
318; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
319; CHECK-NEXT:    fsrm a1
320; CHECK-NEXT:    ret
321entry:
322  %a = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32(
323    <vscale x 1 x float> undef,
324    <vscale x 1 x float> %0,
325    float %1,
326    iXLen 0, iXLen %2)
327
328  ret <vscale x 1 x float> %a
329}
330
331declare <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32(
332  <vscale x 1 x float>,
333  <vscale x 1 x float>,
334  float,
335  <vscale x 1 x i1>,
336  iXLen, iXLen, iXLen);
337
338define <vscale x 1 x float> @intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
339; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_nxv1f32_f32:
340; CHECK:       # %bb.0: # %entry
341; CHECK-NEXT:    fsrmi a1, 0
342; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
343; CHECK-NEXT:    vfrdiv.vf v8, v9, fa0, v0.t
344; CHECK-NEXT:    fsrm a1
345; CHECK-NEXT:    ret
346entry:
347  %a = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32(
348    <vscale x 1 x float> %0,
349    <vscale x 1 x float> %1,
350    float %2,
351    <vscale x 1 x i1> %3,
352    iXLen 0, iXLen %4, iXLen 1)
353
354  ret <vscale x 1 x float> %a
355}
356
357declare <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32(
358  <vscale x 2 x float>,
359  <vscale x 2 x float>,
360  float,
361  iXLen, iXLen);
362
363define <vscale x 2 x float> @intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
364; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32:
365; CHECK:       # %bb.0: # %entry
366; CHECK-NEXT:    fsrmi a1, 0
367; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
368; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
369; CHECK-NEXT:    fsrm a1
370; CHECK-NEXT:    ret
371entry:
372  %a = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32(
373    <vscale x 2 x float> undef,
374    <vscale x 2 x float> %0,
375    float %1,
376    iXLen 0, iXLen %2)
377
378  ret <vscale x 2 x float> %a
379}
380
381declare <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32(
382  <vscale x 2 x float>,
383  <vscale x 2 x float>,
384  float,
385  <vscale x 2 x i1>,
386  iXLen, iXLen, iXLen);
387
388define <vscale x 2 x float> @intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
389; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_nxv2f32_f32:
390; CHECK:       # %bb.0: # %entry
391; CHECK-NEXT:    fsrmi a1, 0
392; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
393; CHECK-NEXT:    vfrdiv.vf v8, v9, fa0, v0.t
394; CHECK-NEXT:    fsrm a1
395; CHECK-NEXT:    ret
396entry:
397  %a = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32(
398    <vscale x 2 x float> %0,
399    <vscale x 2 x float> %1,
400    float %2,
401    <vscale x 2 x i1> %3,
402    iXLen 0, iXLen %4, iXLen 1)
403
404  ret <vscale x 2 x float> %a
405}
406
407declare <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32(
408  <vscale x 4 x float>,
409  <vscale x 4 x float>,
410  float,
411  iXLen, iXLen);
412
413define <vscale x 4 x float> @intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
414; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32:
415; CHECK:       # %bb.0: # %entry
416; CHECK-NEXT:    fsrmi a1, 0
417; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
418; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
419; CHECK-NEXT:    fsrm a1
420; CHECK-NEXT:    ret
421entry:
422  %a = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32(
423    <vscale x 4 x float> undef,
424    <vscale x 4 x float> %0,
425    float %1,
426    iXLen 0, iXLen %2)
427
428  ret <vscale x 4 x float> %a
429}
430
431declare <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32(
432  <vscale x 4 x float>,
433  <vscale x 4 x float>,
434  float,
435  <vscale x 4 x i1>,
436  iXLen, iXLen, iXLen);
437
438define <vscale x 4 x float> @intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
439; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_nxv4f32_f32:
440; CHECK:       # %bb.0: # %entry
441; CHECK-NEXT:    fsrmi a1, 0
442; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
443; CHECK-NEXT:    vfrdiv.vf v8, v10, fa0, v0.t
444; CHECK-NEXT:    fsrm a1
445; CHECK-NEXT:    ret
446entry:
447  %a = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32(
448    <vscale x 4 x float> %0,
449    <vscale x 4 x float> %1,
450    float %2,
451    <vscale x 4 x i1> %3,
452    iXLen 0, iXLen %4, iXLen 1)
453
454  ret <vscale x 4 x float> %a
455}
456
457declare <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32(
458  <vscale x 8 x float>,
459  <vscale x 8 x float>,
460  float,
461  iXLen, iXLen);
462
463define <vscale x 8 x float> @intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
464; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32:
465; CHECK:       # %bb.0: # %entry
466; CHECK-NEXT:    fsrmi a1, 0
467; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
468; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
469; CHECK-NEXT:    fsrm a1
470; CHECK-NEXT:    ret
471entry:
472  %a = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32(
473    <vscale x 8 x float> undef,
474    <vscale x 8 x float> %0,
475    float %1,
476    iXLen 0, iXLen %2)
477
478  ret <vscale x 8 x float> %a
479}
480
481declare <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32(
482  <vscale x 8 x float>,
483  <vscale x 8 x float>,
484  float,
485  <vscale x 8 x i1>,
486  iXLen, iXLen, iXLen);
487
488define <vscale x 8 x float> @intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
489; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_nxv8f32_f32:
490; CHECK:       # %bb.0: # %entry
491; CHECK-NEXT:    fsrmi a1, 0
492; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
493; CHECK-NEXT:    vfrdiv.vf v8, v12, fa0, v0.t
494; CHECK-NEXT:    fsrm a1
495; CHECK-NEXT:    ret
496entry:
497  %a = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32(
498    <vscale x 8 x float> %0,
499    <vscale x 8 x float> %1,
500    float %2,
501    <vscale x 8 x i1> %3,
502    iXLen 0, iXLen %4, iXLen 1)
503
504  ret <vscale x 8 x float> %a
505}
506
507declare <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32(
508  <vscale x 16 x float>,
509  <vscale x 16 x float>,
510  float,
511  iXLen, iXLen);
512
513define <vscale x 16 x float> @intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
514; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32:
515; CHECK:       # %bb.0: # %entry
516; CHECK-NEXT:    fsrmi a1, 0
517; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
518; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
519; CHECK-NEXT:    fsrm a1
520; CHECK-NEXT:    ret
521entry:
522  %a = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32(
523    <vscale x 16 x float> undef,
524    <vscale x 16 x float> %0,
525    float %1,
526    iXLen 0, iXLen %2)
527
528  ret <vscale x 16 x float> %a
529}
530
531declare <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32(
532  <vscale x 16 x float>,
533  <vscale x 16 x float>,
534  float,
535  <vscale x 16 x i1>,
536  iXLen, iXLen, iXLen);
537
538define <vscale x 16 x float> @intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
539; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_nxv16f32_f32:
540; CHECK:       # %bb.0: # %entry
541; CHECK-NEXT:    fsrmi a1, 0
542; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
543; CHECK-NEXT:    vfrdiv.vf v8, v16, fa0, v0.t
544; CHECK-NEXT:    fsrm a1
545; CHECK-NEXT:    ret
546entry:
547  %a = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32(
548    <vscale x 16 x float> %0,
549    <vscale x 16 x float> %1,
550    float %2,
551    <vscale x 16 x i1> %3,
552    iXLen 0, iXLen %4, iXLen 1)
553
554  ret <vscale x 16 x float> %a
555}
556
557declare <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64(
558  <vscale x 1 x double>,
559  <vscale x 1 x double>,
560  double,
561  iXLen, iXLen);
562
563define <vscale x 1 x double> @intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
564; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64:
565; CHECK:       # %bb.0: # %entry
566; CHECK-NEXT:    fsrmi a1, 0
567; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
568; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
569; CHECK-NEXT:    fsrm a1
570; CHECK-NEXT:    ret
571entry:
572  %a = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64(
573    <vscale x 1 x double> undef,
574    <vscale x 1 x double> %0,
575    double %1,
576    iXLen 0, iXLen %2)
577
578  ret <vscale x 1 x double> %a
579}
580
581declare <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64(
582  <vscale x 1 x double>,
583  <vscale x 1 x double>,
584  double,
585  <vscale x 1 x i1>,
586  iXLen, iXLen, iXLen);
587
588define <vscale x 1 x double> @intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
589; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f64_nxv1f64_f64:
590; CHECK:       # %bb.0: # %entry
591; CHECK-NEXT:    fsrmi a1, 0
592; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
593; CHECK-NEXT:    vfrdiv.vf v8, v9, fa0, v0.t
594; CHECK-NEXT:    fsrm a1
595; CHECK-NEXT:    ret
596entry:
597  %a = call <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64(
598    <vscale x 1 x double> %0,
599    <vscale x 1 x double> %1,
600    double %2,
601    <vscale x 1 x i1> %3,
602    iXLen 0, iXLen %4, iXLen 1)
603
604  ret <vscale x 1 x double> %a
605}
606
607declare <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64(
608  <vscale x 2 x double>,
609  <vscale x 2 x double>,
610  double,
611  iXLen, iXLen);
612
613define <vscale x 2 x double> @intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
614; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64:
615; CHECK:       # %bb.0: # %entry
616; CHECK-NEXT:    fsrmi a1, 0
617; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
618; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
619; CHECK-NEXT:    fsrm a1
620; CHECK-NEXT:    ret
621entry:
622  %a = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64(
623    <vscale x 2 x double> undef,
624    <vscale x 2 x double> %0,
625    double %1,
626    iXLen 0, iXLen %2)
627
628  ret <vscale x 2 x double> %a
629}
630
631declare <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64(
632  <vscale x 2 x double>,
633  <vscale x 2 x double>,
634  double,
635  <vscale x 2 x i1>,
636  iXLen, iXLen, iXLen);
637
638define <vscale x 2 x double> @intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
639; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f64_nxv2f64_f64:
640; CHECK:       # %bb.0: # %entry
641; CHECK-NEXT:    fsrmi a1, 0
642; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
643; CHECK-NEXT:    vfrdiv.vf v8, v10, fa0, v0.t
644; CHECK-NEXT:    fsrm a1
645; CHECK-NEXT:    ret
646entry:
647  %a = call <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64(
648    <vscale x 2 x double> %0,
649    <vscale x 2 x double> %1,
650    double %2,
651    <vscale x 2 x i1> %3,
652    iXLen 0, iXLen %4, iXLen 1)
653
654  ret <vscale x 2 x double> %a
655}
656
657declare <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64(
658  <vscale x 4 x double>,
659  <vscale x 4 x double>,
660  double,
661  iXLen, iXLen);
662
663define <vscale x 4 x double> @intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
664; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64:
665; CHECK:       # %bb.0: # %entry
666; CHECK-NEXT:    fsrmi a1, 0
667; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
668; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
669; CHECK-NEXT:    fsrm a1
670; CHECK-NEXT:    ret
671entry:
672  %a = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64(
673    <vscale x 4 x double> undef,
674    <vscale x 4 x double> %0,
675    double %1,
676    iXLen 0, iXLen %2)
677
678  ret <vscale x 4 x double> %a
679}
680
681declare <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64(
682  <vscale x 4 x double>,
683  <vscale x 4 x double>,
684  double,
685  <vscale x 4 x i1>,
686  iXLen, iXLen, iXLen);
687
688define <vscale x 4 x double> @intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
689; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f64_nxv4f64_f64:
690; CHECK:       # %bb.0: # %entry
691; CHECK-NEXT:    fsrmi a1, 0
692; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
693; CHECK-NEXT:    vfrdiv.vf v8, v12, fa0, v0.t
694; CHECK-NEXT:    fsrm a1
695; CHECK-NEXT:    ret
696entry:
697  %a = call <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64(
698    <vscale x 4 x double> %0,
699    <vscale x 4 x double> %1,
700    double %2,
701    <vscale x 4 x i1> %3,
702    iXLen 0, iXLen %4, iXLen 1)
703
704  ret <vscale x 4 x double> %a
705}
706
707declare <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64(
708  <vscale x 8 x double>,
709  <vscale x 8 x double>,
710  double,
711  iXLen, iXLen);
712
713define <vscale x 8 x double> @intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
714; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64:
715; CHECK:       # %bb.0: # %entry
716; CHECK-NEXT:    fsrmi a1, 0
717; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
718; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
719; CHECK-NEXT:    fsrm a1
720; CHECK-NEXT:    ret
721entry:
722  %a = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64(
723    <vscale x 8 x double> undef,
724    <vscale x 8 x double> %0,
725    double %1,
726    iXLen 0, iXLen %2)
727
728  ret <vscale x 8 x double> %a
729}
730
731declare <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64(
732  <vscale x 8 x double>,
733  <vscale x 8 x double>,
734  double,
735  <vscale x 8 x i1>,
736  iXLen, iXLen, iXLen);
737
738define <vscale x 8 x double> @intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
739; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f64_nxv8f64_f64:
740; CHECK:       # %bb.0: # %entry
741; CHECK-NEXT:    fsrmi a1, 0
742; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
743; CHECK-NEXT:    vfrdiv.vf v8, v16, fa0, v0.t
744; CHECK-NEXT:    fsrm a1
745; CHECK-NEXT:    ret
746entry:
747  %a = call <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64(
748    <vscale x 8 x double> %0,
749    <vscale x 8 x double> %1,
750    double %2,
751    <vscale x 8 x i1> %3,
752    iXLen 0, iXLen %4, iXLen 1)
753
754  ret <vscale x 8 x double> %a
755}
756