xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vmfne.ll (revision b6c0f1bfa79a3a32d841ac5ab1f94c3aee3b5d90)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
8  <vscale x 1 x half>,
9  <vscale x 1 x half>,
10  iXLen);
11
12define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
13; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f16_nxv1f16:
14; CHECK:       # %bb.0: # %entry
15; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
16; CHECK-NEXT:    vmfne.vv v0, v8, v9
17; CHECK-NEXT:    ret
18entry:
19  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
20    <vscale x 1 x half> %0,
21    <vscale x 1 x half> %1,
22    iXLen %2)
23
24  ret <vscale x 1 x i1> %a
25}
26
27declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16(
28  <vscale x 1 x i1>,
29  <vscale x 1 x half>,
30  <vscale x 1 x half>,
31  <vscale x 1 x i1>,
32  iXLen);
33
34define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
35; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16:
36; CHECK:       # %bb.0: # %entry
37; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
38; CHECK-NEXT:    vmv1r.v v11, v0
39; CHECK-NEXT:    vmfne.vv v0, v8, v9
40; CHECK-NEXT:    vmfne.vv v11, v9, v10, v0.t
41; CHECK-NEXT:    vmv1r.v v0, v11
42; CHECK-NEXT:    ret
43entry:
44  %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
45    <vscale x 1 x half> %1,
46    <vscale x 1 x half> %2,
47    iXLen %4)
48  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16(
49    <vscale x 1 x i1> %0,
50    <vscale x 1 x half> %2,
51    <vscale x 1 x half> %3,
52    <vscale x 1 x i1> %mask,
53    iXLen %4)
54
55  ret <vscale x 1 x i1> %a
56}
57
58declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
59  <vscale x 2 x half>,
60  <vscale x 2 x half>,
61  iXLen);
62
63define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
64; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f16_nxv2f16:
65; CHECK:       # %bb.0: # %entry
66; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
67; CHECK-NEXT:    vmfne.vv v0, v8, v9
68; CHECK-NEXT:    ret
69entry:
70  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
71    <vscale x 2 x half> %0,
72    <vscale x 2 x half> %1,
73    iXLen %2)
74
75  ret <vscale x 2 x i1> %a
76}
77
78declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16(
79  <vscale x 2 x i1>,
80  <vscale x 2 x half>,
81  <vscale x 2 x half>,
82  <vscale x 2 x i1>,
83  iXLen);
84
85define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
86; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16:
87; CHECK:       # %bb.0: # %entry
88; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
89; CHECK-NEXT:    vmv1r.v v11, v0
90; CHECK-NEXT:    vmfne.vv v0, v8, v9
91; CHECK-NEXT:    vmfne.vv v11, v9, v10, v0.t
92; CHECK-NEXT:    vmv1r.v v0, v11
93; CHECK-NEXT:    ret
94entry:
95  %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
96    <vscale x 2 x half> %1,
97    <vscale x 2 x half> %2,
98    iXLen %4)
99  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16(
100    <vscale x 2 x i1> %0,
101    <vscale x 2 x half> %2,
102    <vscale x 2 x half> %3,
103    <vscale x 2 x i1> %mask,
104    iXLen %4)
105
106  ret <vscale x 2 x i1> %a
107}
108
109declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
110  <vscale x 4 x half>,
111  <vscale x 4 x half>,
112  iXLen);
113
114define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
115; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f16_nxv4f16:
116; CHECK:       # %bb.0: # %entry
117; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
118; CHECK-NEXT:    vmfne.vv v0, v8, v9
119; CHECK-NEXT:    ret
120entry:
121  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
122    <vscale x 4 x half> %0,
123    <vscale x 4 x half> %1,
124    iXLen %2)
125
126  ret <vscale x 4 x i1> %a
127}
128
129declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16(
130  <vscale x 4 x i1>,
131  <vscale x 4 x half>,
132  <vscale x 4 x half>,
133  <vscale x 4 x i1>,
134  iXLen);
135
136define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
137; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16:
138; CHECK:       # %bb.0: # %entry
139; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
140; CHECK-NEXT:    vmv1r.v v11, v0
141; CHECK-NEXT:    vmfne.vv v0, v8, v9
142; CHECK-NEXT:    vmfne.vv v11, v9, v10, v0.t
143; CHECK-NEXT:    vmv.v.v v0, v11
144; CHECK-NEXT:    ret
145entry:
146  %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
147    <vscale x 4 x half> %1,
148    <vscale x 4 x half> %2,
149    iXLen %4)
150  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16(
151    <vscale x 4 x i1> %0,
152    <vscale x 4 x half> %2,
153    <vscale x 4 x half> %3,
154    <vscale x 4 x i1> %mask,
155    iXLen %4)
156
157  ret <vscale x 4 x i1> %a
158}
159
160declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
161  <vscale x 8 x half>,
162  <vscale x 8 x half>,
163  iXLen);
164
165define <vscale x 8 x i1> @intrinsic_vmfne_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
166; CHECK-LABEL: intrinsic_vmfne_vv_nxv8f16_nxv8f16:
167; CHECK:       # %bb.0: # %entry
168; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
169; CHECK-NEXT:    vmfne.vv v0, v8, v10
170; CHECK-NEXT:    ret
171entry:
172  %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
173    <vscale x 8 x half> %0,
174    <vscale x 8 x half> %1,
175    iXLen %2)
176
177  ret <vscale x 8 x i1> %a
178}
179
180declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16(
181  <vscale x 8 x i1>,
182  <vscale x 8 x half>,
183  <vscale x 8 x half>,
184  <vscale x 8 x i1>,
185  iXLen);
186
187define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
188; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16:
189; CHECK:       # %bb.0: # %entry
190; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
191; CHECK-NEXT:    vmv1r.v v14, v0
192; CHECK-NEXT:    vmfne.vv v0, v8, v10
193; CHECK-NEXT:    vmfne.vv v14, v10, v12, v0.t
194; CHECK-NEXT:    vmv1r.v v0, v14
195; CHECK-NEXT:    ret
196entry:
197  %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
198    <vscale x 8 x half> %1,
199    <vscale x 8 x half> %2,
200    iXLen %4)
201  %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16(
202    <vscale x 8 x i1> %0,
203    <vscale x 8 x half> %2,
204    <vscale x 8 x half> %3,
205    <vscale x 8 x i1> %mask,
206    iXLen %4)
207
208  ret <vscale x 8 x i1> %a
209}
210
211declare <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
212  <vscale x 16 x half>,
213  <vscale x 16 x half>,
214  iXLen);
215
216define <vscale x 16 x i1> @intrinsic_vmfne_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
217; CHECK-LABEL: intrinsic_vmfne_vv_nxv16f16_nxv16f16:
218; CHECK:       # %bb.0: # %entry
219; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
220; CHECK-NEXT:    vmfne.vv v0, v8, v12
221; CHECK-NEXT:    ret
222entry:
223  %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
224    <vscale x 16 x half> %0,
225    <vscale x 16 x half> %1,
226    iXLen %2)
227
228  ret <vscale x 16 x i1> %a
229}
230
231declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16(
232  <vscale x 16 x i1>,
233  <vscale x 16 x half>,
234  <vscale x 16 x half>,
235  <vscale x 16 x i1>,
236  iXLen);
237
238define <vscale x 16 x i1> @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
239; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16:
240; CHECK:       # %bb.0: # %entry
241; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
242; CHECK-NEXT:    vmv1r.v v20, v0
243; CHECK-NEXT:    vmfne.vv v0, v8, v12
244; CHECK-NEXT:    vmfne.vv v20, v12, v16, v0.t
245; CHECK-NEXT:    vmv1r.v v0, v20
246; CHECK-NEXT:    ret
247entry:
248  %mask = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
249    <vscale x 16 x half> %1,
250    <vscale x 16 x half> %2,
251    iXLen %4)
252  %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16(
253    <vscale x 16 x i1> %0,
254    <vscale x 16 x half> %2,
255    <vscale x 16 x half> %3,
256    <vscale x 16 x i1> %mask,
257    iXLen %4)
258
259  ret <vscale x 16 x i1> %a
260}
261
262declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
263  <vscale x 1 x float>,
264  <vscale x 1 x float>,
265  iXLen);
266
267define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
268; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f32_nxv1f32:
269; CHECK:       # %bb.0: # %entry
270; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
271; CHECK-NEXT:    vmfne.vv v0, v8, v9
272; CHECK-NEXT:    ret
273entry:
274  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
275    <vscale x 1 x float> %0,
276    <vscale x 1 x float> %1,
277    iXLen %2)
278
279  ret <vscale x 1 x i1> %a
280}
281
282declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32(
283  <vscale x 1 x i1>,
284  <vscale x 1 x float>,
285  <vscale x 1 x float>,
286  <vscale x 1 x i1>,
287  iXLen);
288
289define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
290; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32:
291; CHECK:       # %bb.0: # %entry
292; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
293; CHECK-NEXT:    vmv1r.v v11, v0
294; CHECK-NEXT:    vmfne.vv v0, v8, v9
295; CHECK-NEXT:    vmfne.vv v11, v9, v10, v0.t
296; CHECK-NEXT:    vmv1r.v v0, v11
297; CHECK-NEXT:    ret
298entry:
299  %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
300    <vscale x 1 x float> %1,
301    <vscale x 1 x float> %2,
302    iXLen %4)
303  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32(
304    <vscale x 1 x i1> %0,
305    <vscale x 1 x float> %2,
306    <vscale x 1 x float> %3,
307    <vscale x 1 x i1> %mask,
308    iXLen %4)
309
310  ret <vscale x 1 x i1> %a
311}
312
313declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
314  <vscale x 2 x float>,
315  <vscale x 2 x float>,
316  iXLen);
317
318define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
319; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f32_nxv2f32:
320; CHECK:       # %bb.0: # %entry
321; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
322; CHECK-NEXT:    vmfne.vv v0, v8, v9
323; CHECK-NEXT:    ret
324entry:
325  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
326    <vscale x 2 x float> %0,
327    <vscale x 2 x float> %1,
328    iXLen %2)
329
330  ret <vscale x 2 x i1> %a
331}
332
333declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32(
334  <vscale x 2 x i1>,
335  <vscale x 2 x float>,
336  <vscale x 2 x float>,
337  <vscale x 2 x i1>,
338  iXLen);
339
340define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
341; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32:
342; CHECK:       # %bb.0: # %entry
343; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
344; CHECK-NEXT:    vmv1r.v v11, v0
345; CHECK-NEXT:    vmfne.vv v0, v8, v9
346; CHECK-NEXT:    vmfne.vv v11, v9, v10, v0.t
347; CHECK-NEXT:    vmv.v.v v0, v11
348; CHECK-NEXT:    ret
349entry:
350  %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
351    <vscale x 2 x float> %1,
352    <vscale x 2 x float> %2,
353    iXLen %4)
354  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32(
355    <vscale x 2 x i1> %0,
356    <vscale x 2 x float> %2,
357    <vscale x 2 x float> %3,
358    <vscale x 2 x i1> %mask,
359    iXLen %4)
360
361  ret <vscale x 2 x i1> %a
362}
363
364declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
365  <vscale x 4 x float>,
366  <vscale x 4 x float>,
367  iXLen);
368
369define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
370; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f32_nxv4f32:
371; CHECK:       # %bb.0: # %entry
372; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
373; CHECK-NEXT:    vmfne.vv v0, v8, v10
374; CHECK-NEXT:    ret
375entry:
376  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
377    <vscale x 4 x float> %0,
378    <vscale x 4 x float> %1,
379    iXLen %2)
380
381  ret <vscale x 4 x i1> %a
382}
383
384declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32(
385  <vscale x 4 x i1>,
386  <vscale x 4 x float>,
387  <vscale x 4 x float>,
388  <vscale x 4 x i1>,
389  iXLen);
390
391define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
392; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32:
393; CHECK:       # %bb.0: # %entry
394; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
395; CHECK-NEXT:    vmv1r.v v14, v0
396; CHECK-NEXT:    vmfne.vv v0, v8, v10
397; CHECK-NEXT:    vmfne.vv v14, v10, v12, v0.t
398; CHECK-NEXT:    vmv1r.v v0, v14
399; CHECK-NEXT:    ret
400entry:
401  %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
402    <vscale x 4 x float> %1,
403    <vscale x 4 x float> %2,
404    iXLen %4)
405  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32(
406    <vscale x 4 x i1> %0,
407    <vscale x 4 x float> %2,
408    <vscale x 4 x float> %3,
409    <vscale x 4 x i1> %mask,
410    iXLen %4)
411
412  ret <vscale x 4 x i1> %a
413}
414
415declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
416  <vscale x 8 x float>,
417  <vscale x 8 x float>,
418  iXLen);
419
420define <vscale x 8 x i1> @intrinsic_vmfne_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
421; CHECK-LABEL: intrinsic_vmfne_vv_nxv8f32_nxv8f32:
422; CHECK:       # %bb.0: # %entry
423; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
424; CHECK-NEXT:    vmfne.vv v0, v8, v12
425; CHECK-NEXT:    ret
426entry:
427  %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
428    <vscale x 8 x float> %0,
429    <vscale x 8 x float> %1,
430    iXLen %2)
431
432  ret <vscale x 8 x i1> %a
433}
434
435declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32(
436  <vscale x 8 x i1>,
437  <vscale x 8 x float>,
438  <vscale x 8 x float>,
439  <vscale x 8 x i1>,
440  iXLen);
441
442define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
443; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32:
444; CHECK:       # %bb.0: # %entry
445; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
446; CHECK-NEXT:    vmv1r.v v20, v0
447; CHECK-NEXT:    vmfne.vv v0, v8, v12
448; CHECK-NEXT:    vmfne.vv v20, v12, v16, v0.t
449; CHECK-NEXT:    vmv1r.v v0, v20
450; CHECK-NEXT:    ret
451entry:
452  %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
453    <vscale x 8 x float> %1,
454    <vscale x 8 x float> %2,
455    iXLen %4)
456  %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32(
457    <vscale x 8 x i1> %0,
458    <vscale x 8 x float> %2,
459    <vscale x 8 x float> %3,
460    <vscale x 8 x i1> %mask,
461    iXLen %4)
462
463  ret <vscale x 8 x i1> %a
464}
465
466declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
467  <vscale x 1 x double>,
468  <vscale x 1 x double>,
469  iXLen);
470
471define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
472; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f64_nxv1f64:
473; CHECK:       # %bb.0: # %entry
474; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
475; CHECK-NEXT:    vmfne.vv v0, v8, v9
476; CHECK-NEXT:    ret
477entry:
478  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
479    <vscale x 1 x double> %0,
480    <vscale x 1 x double> %1,
481    iXLen %2)
482
483  ret <vscale x 1 x i1> %a
484}
485
486declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64(
487  <vscale x 1 x i1>,
488  <vscale x 1 x double>,
489  <vscale x 1 x double>,
490  <vscale x 1 x i1>,
491  iXLen);
492
493define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
494; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64:
495; CHECK:       # %bb.0: # %entry
496; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
497; CHECK-NEXT:    vmv1r.v v11, v0
498; CHECK-NEXT:    vmfne.vv v0, v8, v9
499; CHECK-NEXT:    vmfne.vv v11, v9, v10, v0.t
500; CHECK-NEXT:    vmv.v.v v0, v11
501; CHECK-NEXT:    ret
502entry:
503  %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
504    <vscale x 1 x double> %1,
505    <vscale x 1 x double> %2,
506    iXLen %4)
507  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64(
508    <vscale x 1 x i1> %0,
509    <vscale x 1 x double> %2,
510    <vscale x 1 x double> %3,
511    <vscale x 1 x i1> %mask,
512    iXLen %4)
513
514  ret <vscale x 1 x i1> %a
515}
516
517declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
518  <vscale x 2 x double>,
519  <vscale x 2 x double>,
520  iXLen);
521
522define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
523; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f64_nxv2f64:
524; CHECK:       # %bb.0: # %entry
525; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
526; CHECK-NEXT:    vmfne.vv v0, v8, v10
527; CHECK-NEXT:    ret
528entry:
529  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
530    <vscale x 2 x double> %0,
531    <vscale x 2 x double> %1,
532    iXLen %2)
533
534  ret <vscale x 2 x i1> %a
535}
536
537declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64(
538  <vscale x 2 x i1>,
539  <vscale x 2 x double>,
540  <vscale x 2 x double>,
541  <vscale x 2 x i1>,
542  iXLen);
543
544define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
545; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64:
546; CHECK:       # %bb.0: # %entry
547; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
548; CHECK-NEXT:    vmv1r.v v14, v0
549; CHECK-NEXT:    vmfne.vv v0, v8, v10
550; CHECK-NEXT:    vmfne.vv v14, v10, v12, v0.t
551; CHECK-NEXT:    vmv1r.v v0, v14
552; CHECK-NEXT:    ret
553entry:
554  %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
555    <vscale x 2 x double> %1,
556    <vscale x 2 x double> %2,
557    iXLen %4)
558  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64(
559    <vscale x 2 x i1> %0,
560    <vscale x 2 x double> %2,
561    <vscale x 2 x double> %3,
562    <vscale x 2 x i1> %mask,
563    iXLen %4)
564
565  ret <vscale x 2 x i1> %a
566}
567
568declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
569  <vscale x 4 x double>,
570  <vscale x 4 x double>,
571  iXLen);
572
573define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
574; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f64_nxv4f64:
575; CHECK:       # %bb.0: # %entry
576; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
577; CHECK-NEXT:    vmfne.vv v0, v8, v12
578; CHECK-NEXT:    ret
579entry:
580  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
581    <vscale x 4 x double> %0,
582    <vscale x 4 x double> %1,
583    iXLen %2)
584
585  ret <vscale x 4 x i1> %a
586}
587
588declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64(
589  <vscale x 4 x i1>,
590  <vscale x 4 x double>,
591  <vscale x 4 x double>,
592  <vscale x 4 x i1>,
593  iXLen);
594
595define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
596; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64:
597; CHECK:       # %bb.0: # %entry
598; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
599; CHECK-NEXT:    vmv1r.v v20, v0
600; CHECK-NEXT:    vmfne.vv v0, v8, v12
601; CHECK-NEXT:    vmfne.vv v20, v12, v16, v0.t
602; CHECK-NEXT:    vmv1r.v v0, v20
603; CHECK-NEXT:    ret
604entry:
605  %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
606    <vscale x 4 x double> %1,
607    <vscale x 4 x double> %2,
608    iXLen %4)
609  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64(
610    <vscale x 4 x i1> %0,
611    <vscale x 4 x double> %2,
612    <vscale x 4 x double> %3,
613    <vscale x 4 x i1> %mask,
614    iXLen %4)
615
616  ret <vscale x 4 x i1> %a
617}
618
619declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16.f16(
620  <vscale x 1 x half>,
621  half,
622  iXLen);
623
624define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
625; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f16_f16:
626; CHECK:       # %bb.0: # %entry
627; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
628; CHECK-NEXT:    vmfne.vf v0, v8, fa0
629; CHECK-NEXT:    ret
630entry:
631  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16.f16(
632    <vscale x 1 x half> %0,
633    half %1,
634    iXLen %2)
635
636  ret <vscale x 1 x i1> %a
637}
638
639declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
640  <vscale x 1 x i1>,
641  <vscale x 1 x half>,
642  half,
643  <vscale x 1 x i1>,
644  iXLen);
645
646define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
647; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f16_f16:
648; CHECK:       # %bb.0: # %entry
649; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
650; CHECK-NEXT:    vmv1r.v v10, v0
651; CHECK-NEXT:    vmv1r.v v0, v9
652; CHECK-NEXT:    vmfne.vf v10, v8, fa0, v0.t
653; CHECK-NEXT:    vmv1r.v v0, v10
654; CHECK-NEXT:    ret
655entry:
656  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
657    <vscale x 1 x i1> %0,
658    <vscale x 1 x half> %1,
659    half %2,
660    <vscale x 1 x i1> %3,
661    iXLen %4)
662
663  ret <vscale x 1 x i1> %a
664}
665
666declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16.f16(
667  <vscale x 2 x half>,
668  half,
669  iXLen);
670
671define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
672; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f16_f16:
673; CHECK:       # %bb.0: # %entry
674; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
675; CHECK-NEXT:    vmfne.vf v0, v8, fa0
676; CHECK-NEXT:    ret
677entry:
678  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16.f16(
679    <vscale x 2 x half> %0,
680    half %1,
681    iXLen %2)
682
683  ret <vscale x 2 x i1> %a
684}
685
686declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
687  <vscale x 2 x i1>,
688  <vscale x 2 x half>,
689  half,
690  <vscale x 2 x i1>,
691  iXLen);
692
693define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
694; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f16_f16:
695; CHECK:       # %bb.0: # %entry
696; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
697; CHECK-NEXT:    vmv1r.v v10, v0
698; CHECK-NEXT:    vmv1r.v v0, v9
699; CHECK-NEXT:    vmfne.vf v10, v8, fa0, v0.t
700; CHECK-NEXT:    vmv1r.v v0, v10
701; CHECK-NEXT:    ret
702entry:
703  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
704    <vscale x 2 x i1> %0,
705    <vscale x 2 x half> %1,
706    half %2,
707    <vscale x 2 x i1> %3,
708    iXLen %4)
709
710  ret <vscale x 2 x i1> %a
711}
712
713declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16.f16(
714  <vscale x 4 x half>,
715  half,
716  iXLen);
717
718define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
719; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f16_f16:
720; CHECK:       # %bb.0: # %entry
721; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
722; CHECK-NEXT:    vmfne.vf v0, v8, fa0
723; CHECK-NEXT:    ret
724entry:
725  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16.f16(
726    <vscale x 4 x half> %0,
727    half %1,
728    iXLen %2)
729
730  ret <vscale x 4 x i1> %a
731}
732
733declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
734  <vscale x 4 x i1>,
735  <vscale x 4 x half>,
736  half,
737  <vscale x 4 x i1>,
738  iXLen);
739
740define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
741; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f16_f16:
742; CHECK:       # %bb.0: # %entry
743; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
744; CHECK-NEXT:    vmv1r.v v10, v0
745; CHECK-NEXT:    vmv1r.v v0, v9
746; CHECK-NEXT:    vmfne.vf v10, v8, fa0, v0.t
747; CHECK-NEXT:    vmv.v.v v0, v10
748; CHECK-NEXT:    ret
749entry:
750  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
751    <vscale x 4 x i1> %0,
752    <vscale x 4 x half> %1,
753    half %2,
754    <vscale x 4 x i1> %3,
755    iXLen %4)
756
757  ret <vscale x 4 x i1> %a
758}
759
760declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16.f16(
761  <vscale x 8 x half>,
762  half,
763  iXLen);
764
765define <vscale x 8 x i1> @intrinsic_vmfne_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
766; CHECK-LABEL: intrinsic_vmfne_vf_nxv8f16_f16:
767; CHECK:       # %bb.0: # %entry
768; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
769; CHECK-NEXT:    vmfne.vf v0, v8, fa0
770; CHECK-NEXT:    ret
771entry:
772  %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16.f16(
773    <vscale x 8 x half> %0,
774    half %1,
775    iXLen %2)
776
777  ret <vscale x 8 x i1> %a
778}
779
780declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
781  <vscale x 8 x i1>,
782  <vscale x 8 x half>,
783  half,
784  <vscale x 8 x i1>,
785  iXLen);
786
787define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
788; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f16_f16:
789; CHECK:       # %bb.0: # %entry
790; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
791; CHECK-NEXT:    vmv1r.v v11, v0
792; CHECK-NEXT:    vmv1r.v v0, v10
793; CHECK-NEXT:    vmfne.vf v11, v8, fa0, v0.t
794; CHECK-NEXT:    vmv1r.v v0, v11
795; CHECK-NEXT:    ret
796entry:
797  %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
798    <vscale x 8 x i1> %0,
799    <vscale x 8 x half> %1,
800    half %2,
801    <vscale x 8 x i1> %3,
802    iXLen %4)
803
804  ret <vscale x 8 x i1> %a
805}
806
807declare <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16.f16(
808  <vscale x 16 x half>,
809  half,
810  iXLen);
811
812define <vscale x 16 x i1> @intrinsic_vmfne_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
813; CHECK-LABEL: intrinsic_vmfne_vf_nxv16f16_f16:
814; CHECK:       # %bb.0: # %entry
815; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
816; CHECK-NEXT:    vmfne.vf v0, v8, fa0
817; CHECK-NEXT:    ret
818entry:
819  %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16.f16(
820    <vscale x 16 x half> %0,
821    half %1,
822    iXLen %2)
823
824  ret <vscale x 16 x i1> %a
825}
826
827declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
828  <vscale x 16 x i1>,
829  <vscale x 16 x half>,
830  half,
831  <vscale x 16 x i1>,
832  iXLen);
833
834define <vscale x 16 x i1> @intrinsic_vmfne_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
835; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16f16_f16:
836; CHECK:       # %bb.0: # %entry
837; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
838; CHECK-NEXT:    vmv1r.v v13, v0
839; CHECK-NEXT:    vmv1r.v v0, v12
840; CHECK-NEXT:    vmfne.vf v13, v8, fa0, v0.t
841; CHECK-NEXT:    vmv1r.v v0, v13
842; CHECK-NEXT:    ret
843entry:
844  %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
845    <vscale x 16 x i1> %0,
846    <vscale x 16 x half> %1,
847    half %2,
848    <vscale x 16 x i1> %3,
849    iXLen %4)
850
851  ret <vscale x 16 x i1> %a
852}
853
854declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.f32(
855  <vscale x 1 x float>,
856  float,
857  iXLen);
858
859define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
860; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f32_f32:
861; CHECK:       # %bb.0: # %entry
862; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
863; CHECK-NEXT:    vmfne.vf v0, v8, fa0
864; CHECK-NEXT:    ret
865entry:
866  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.f32(
867    <vscale x 1 x float> %0,
868    float %1,
869    iXLen %2)
870
871  ret <vscale x 1 x i1> %a
872}
873
874declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
875  <vscale x 1 x i1>,
876  <vscale x 1 x float>,
877  float,
878  <vscale x 1 x i1>,
879  iXLen);
880
881define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
882; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f32_f32:
883; CHECK:       # %bb.0: # %entry
884; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
885; CHECK-NEXT:    vmv1r.v v10, v0
886; CHECK-NEXT:    vmv1r.v v0, v9
887; CHECK-NEXT:    vmfne.vf v10, v8, fa0, v0.t
888; CHECK-NEXT:    vmv1r.v v0, v10
889; CHECK-NEXT:    ret
890entry:
891  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
892    <vscale x 1 x i1> %0,
893    <vscale x 1 x float> %1,
894    float %2,
895    <vscale x 1 x i1> %3,
896    iXLen %4)
897
898  ret <vscale x 1 x i1> %a
899}
900
901declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.f32(
902  <vscale x 2 x float>,
903  float,
904  iXLen);
905
906define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
907; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f32_f32:
908; CHECK:       # %bb.0: # %entry
909; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
910; CHECK-NEXT:    vmfne.vf v0, v8, fa0
911; CHECK-NEXT:    ret
912entry:
913  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.f32(
914    <vscale x 2 x float> %0,
915    float %1,
916    iXLen %2)
917
918  ret <vscale x 2 x i1> %a
919}
920
921declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
922  <vscale x 2 x i1>,
923  <vscale x 2 x float>,
924  float,
925  <vscale x 2 x i1>,
926  iXLen);
927
928define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
929; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f32_f32:
930; CHECK:       # %bb.0: # %entry
931; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
932; CHECK-NEXT:    vmv1r.v v10, v0
933; CHECK-NEXT:    vmv1r.v v0, v9
934; CHECK-NEXT:    vmfne.vf v10, v8, fa0, v0.t
935; CHECK-NEXT:    vmv.v.v v0, v10
936; CHECK-NEXT:    ret
937entry:
938  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
939    <vscale x 2 x i1> %0,
940    <vscale x 2 x float> %1,
941    float %2,
942    <vscale x 2 x i1> %3,
943    iXLen %4)
944
945  ret <vscale x 2 x i1> %a
946}
947
948declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32(
949  <vscale x 4 x float>,
950  float,
951  iXLen);
952
953define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
954; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f32_f32:
955; CHECK:       # %bb.0: # %entry
956; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
957; CHECK-NEXT:    vmfne.vf v0, v8, fa0
958; CHECK-NEXT:    ret
959entry:
960  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32(
961    <vscale x 4 x float> %0,
962    float %1,
963    iXLen %2)
964
965  ret <vscale x 4 x i1> %a
966}
967
968declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
969  <vscale x 4 x i1>,
970  <vscale x 4 x float>,
971  float,
972  <vscale x 4 x i1>,
973  iXLen);
974
975define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
976; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f32_f32:
977; CHECK:       # %bb.0: # %entry
978; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
979; CHECK-NEXT:    vmv1r.v v11, v0
980; CHECK-NEXT:    vmv1r.v v0, v10
981; CHECK-NEXT:    vmfne.vf v11, v8, fa0, v0.t
982; CHECK-NEXT:    vmv1r.v v0, v11
983; CHECK-NEXT:    ret
984entry:
985  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
986    <vscale x 4 x i1> %0,
987    <vscale x 4 x float> %1,
988    float %2,
989    <vscale x 4 x i1> %3,
990    iXLen %4)
991
992  ret <vscale x 4 x i1> %a
993}
994
995declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.f32(
996  <vscale x 8 x float>,
997  float,
998  iXLen);
999
1000define <vscale x 8 x i1> @intrinsic_vmfne_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
1001; CHECK-LABEL: intrinsic_vmfne_vf_nxv8f32_f32:
1002; CHECK:       # %bb.0: # %entry
1003; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
1004; CHECK-NEXT:    vmfne.vf v0, v8, fa0
1005; CHECK-NEXT:    ret
1006entry:
1007  %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.f32(
1008    <vscale x 8 x float> %0,
1009    float %1,
1010    iXLen %2)
1011
1012  ret <vscale x 8 x i1> %a
1013}
1014
1015declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
1016  <vscale x 8 x i1>,
1017  <vscale x 8 x float>,
1018  float,
1019  <vscale x 8 x i1>,
1020  iXLen);
1021
1022define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1023; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f32_f32:
1024; CHECK:       # %bb.0: # %entry
1025; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
1026; CHECK-NEXT:    vmv1r.v v13, v0
1027; CHECK-NEXT:    vmv1r.v v0, v12
1028; CHECK-NEXT:    vmfne.vf v13, v8, fa0, v0.t
1029; CHECK-NEXT:    vmv1r.v v0, v13
1030; CHECK-NEXT:    ret
1031entry:
1032  %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
1033    <vscale x 8 x i1> %0,
1034    <vscale x 8 x float> %1,
1035    float %2,
1036    <vscale x 8 x i1> %3,
1037    iXLen %4)
1038
1039  ret <vscale x 8 x i1> %a
1040}
1041
1042declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64(
1043  <vscale x 1 x double>,
1044  double,
1045  iXLen);
1046
1047define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
1048; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f64_f64:
1049; CHECK:       # %bb.0: # %entry
1050; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1051; CHECK-NEXT:    vmfne.vf v0, v8, fa0
1052; CHECK-NEXT:    ret
1053entry:
1054  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64(
1055    <vscale x 1 x double> %0,
1056    double %1,
1057    iXLen %2)
1058
1059  ret <vscale x 1 x i1> %a
1060}
1061
1062declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
1063  <vscale x 1 x i1>,
1064  <vscale x 1 x double>,
1065  double,
1066  <vscale x 1 x i1>,
1067  iXLen);
1068
1069define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1070; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f64_f64:
1071; CHECK:       # %bb.0: # %entry
1072; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
1073; CHECK-NEXT:    vmv1r.v v10, v0
1074; CHECK-NEXT:    vmv1r.v v0, v9
1075; CHECK-NEXT:    vmfne.vf v10, v8, fa0, v0.t
1076; CHECK-NEXT:    vmv.v.v v0, v10
1077; CHECK-NEXT:    ret
1078entry:
1079  %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
1080    <vscale x 1 x i1> %0,
1081    <vscale x 1 x double> %1,
1082    double %2,
1083    <vscale x 1 x i1> %3,
1084    iXLen %4)
1085
1086  ret <vscale x 1 x i1> %a
1087}
1088
1089declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64(
1090  <vscale x 2 x double>,
1091  double,
1092  iXLen);
1093
1094define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
1095; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f64_f64:
1096; CHECK:       # %bb.0: # %entry
1097; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1098; CHECK-NEXT:    vmfne.vf v0, v8, fa0
1099; CHECK-NEXT:    ret
1100entry:
1101  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64(
1102    <vscale x 2 x double> %0,
1103    double %1,
1104    iXLen %2)
1105
1106  ret <vscale x 2 x i1> %a
1107}
1108
1109declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
1110  <vscale x 2 x i1>,
1111  <vscale x 2 x double>,
1112  double,
1113  <vscale x 2 x i1>,
1114  iXLen);
1115
1116define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1117; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f64_f64:
1118; CHECK:       # %bb.0: # %entry
1119; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
1120; CHECK-NEXT:    vmv1r.v v11, v0
1121; CHECK-NEXT:    vmv1r.v v0, v10
1122; CHECK-NEXT:    vmfne.vf v11, v8, fa0, v0.t
1123; CHECK-NEXT:    vmv1r.v v0, v11
1124; CHECK-NEXT:    ret
1125entry:
1126  %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
1127    <vscale x 2 x i1> %0,
1128    <vscale x 2 x double> %1,
1129    double %2,
1130    <vscale x 2 x i1> %3,
1131    iXLen %4)
1132
1133  ret <vscale x 2 x i1> %a
1134}
1135
1136declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64(
1137  <vscale x 4 x double>,
1138  double,
1139  iXLen);
1140
1141define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
1142; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f64_f64:
1143; CHECK:       # %bb.0: # %entry
1144; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1145; CHECK-NEXT:    vmfne.vf v0, v8, fa0
1146; CHECK-NEXT:    ret
1147entry:
1148  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64(
1149    <vscale x 4 x double> %0,
1150    double %1,
1151    iXLen %2)
1152
1153  ret <vscale x 4 x i1> %a
1154}
1155
1156declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
1157  <vscale x 4 x i1>,
1158  <vscale x 4 x double>,
1159  double,
1160  <vscale x 4 x i1>,
1161  iXLen);
1162
1163define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1164; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f64_f64:
1165; CHECK:       # %bb.0: # %entry
1166; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
1167; CHECK-NEXT:    vmv1r.v v13, v0
1168; CHECK-NEXT:    vmv1r.v v0, v12
1169; CHECK-NEXT:    vmfne.vf v13, v8, fa0, v0.t
1170; CHECK-NEXT:    vmv1r.v v0, v13
1171; CHECK-NEXT:    ret
1172entry:
1173  %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
1174    <vscale x 4 x i1> %0,
1175    <vscale x 4 x double> %1,
1176    double %2,
1177    <vscale x 4 x i1> %3,
1178    iXLen %4)
1179
1180  ret <vscale x 4 x i1> %a
1181}
1182