xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll (revision 22d26ae3040095c7bfe4e2f1678b9738bf81fd4a)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfh,+zvfbfmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVFH32
3; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfh,+zvfbfmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVFH64
4; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfhmin,+zvfbfmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN32
5; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfhmin,+zvfbfmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN64
6
7define void @fp2si_v2f32_v2i32(ptr %x, ptr %y) {
8; CHECK-LABEL: fp2si_v2f32_v2i32:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
11; CHECK-NEXT:    vle32.v v8, (a0)
12; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
13; CHECK-NEXT:    vse32.v v8, (a1)
14; CHECK-NEXT:    ret
15  %a = load <2 x float>, ptr %x
16  %d = fptosi <2 x float> %a to <2 x i32>
17  store <2 x i32> %d, ptr %y
18  ret void
19}
20
21define void @fp2ui_v2f32_v2i32(ptr %x, ptr %y) {
22; CHECK-LABEL: fp2ui_v2f32_v2i32:
23; CHECK:       # %bb.0:
24; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
25; CHECK-NEXT:    vle32.v v8, (a0)
26; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
27; CHECK-NEXT:    vse32.v v8, (a1)
28; CHECK-NEXT:    ret
29  %a = load <2 x float>, ptr %x
30  %d = fptoui <2 x float> %a to <2 x i32>
31  store <2 x i32> %d, ptr %y
32  ret void
33}
34
35define <2 x i1> @fp2si_v2f32_v2i1(<2 x float> %x) {
36; CHECK-LABEL: fp2si_v2f32_v2i1:
37; CHECK:       # %bb.0:
38; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
39; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
40; CHECK-NEXT:    vand.vi v8, v9, 1
41; CHECK-NEXT:    vmsne.vi v0, v8, 0
42; CHECK-NEXT:    ret
43  %z = fptosi <2 x float> %x to <2 x i1>
44  ret <2 x i1> %z
45}
46
47define <2 x i15> @fp2si_v2f32_v2i15(<2 x float> %x) {
48; CHECK-LABEL: fp2si_v2f32_v2i15:
49; CHECK:       # %bb.0:
50; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
51; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
52; CHECK-NEXT:    vmv1r.v v8, v9
53; CHECK-NEXT:    ret
54  %z = fptosi <2 x float> %x to <2 x i15>
55  ret <2 x i15> %z
56}
57
58define <2 x i15> @fp2ui_v2f32_v2i15(<2 x float> %x) {
59; CHECK-LABEL: fp2ui_v2f32_v2i15:
60; CHECK:       # %bb.0:
61; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
62; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
63; CHECK-NEXT:    vmv1r.v v8, v9
64; CHECK-NEXT:    ret
65  %z = fptoui <2 x float> %x to <2 x i15>
66  ret <2 x i15> %z
67}
68
69define <2 x i1> @fp2ui_v2f32_v2i1(<2 x float> %x) {
70; CHECK-LABEL: fp2ui_v2f32_v2i1:
71; CHECK:       # %bb.0:
72; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
73; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
74; CHECK-NEXT:    vand.vi v8, v9, 1
75; CHECK-NEXT:    vmsne.vi v0, v8, 0
76; CHECK-NEXT:    ret
77  %z = fptoui <2 x float> %x to <2 x i1>
78  ret <2 x i1> %z
79}
80
81define void @fp2si_v3f32_v3i32(ptr %x, ptr %y) {
82; CHECK-LABEL: fp2si_v3f32_v3i32:
83; CHECK:       # %bb.0:
84; CHECK-NEXT:    vsetivli zero, 3, e32, m1, ta, ma
85; CHECK-NEXT:    vle32.v v8, (a0)
86; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
87; CHECK-NEXT:    vse32.v v8, (a1)
88; CHECK-NEXT:    ret
89  %a = load <3 x float>, ptr %x
90  %d = fptosi <3 x float> %a to <3 x i32>
91  store <3 x i32> %d, ptr %y
92  ret void
93}
94
95define void @fp2ui_v3f32_v3i32(ptr %x, ptr %y) {
96; CHECK-LABEL: fp2ui_v3f32_v3i32:
97; CHECK:       # %bb.0:
98; CHECK-NEXT:    vsetivli zero, 3, e32, m1, ta, ma
99; CHECK-NEXT:    vle32.v v8, (a0)
100; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
101; CHECK-NEXT:    vse32.v v8, (a1)
102; CHECK-NEXT:    ret
103  %a = load <3 x float>, ptr %x
104  %d = fptoui <3 x float> %a to <3 x i32>
105  store <3 x i32> %d, ptr %y
106  ret void
107}
108
109define <3 x i1> @fp2si_v3f32_v3i1(<3 x float> %x) {
110; CHECK-LABEL: fp2si_v3f32_v3i1:
111; CHECK:       # %bb.0:
112; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
113; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
114; CHECK-NEXT:    vand.vi v8, v9, 1
115; CHECK-NEXT:    vmsne.vi v0, v8, 0
116; CHECK-NEXT:    ret
117  %z = fptosi <3 x float> %x to <3 x i1>
118  ret <3 x i1> %z
119}
120
121; FIXME: This is expanded when they could be widened + promoted
122define <3 x i15> @fp2si_v3f32_v3i15(<3 x float> %x) {
123; ZVFH32-LABEL: fp2si_v3f32_v3i15:
124; ZVFH32:       # %bb.0:
125; ZVFH32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
126; ZVFH32-NEXT:    vfncvt.rtz.x.f.w v9, v8
127; ZVFH32-NEXT:    vslidedown.vi v8, v9, 2
128; ZVFH32-NEXT:    vmv.x.s a1, v9
129; ZVFH32-NEXT:    vslidedown.vi v9, v9, 1
130; ZVFH32-NEXT:    vmv.x.s a2, v8
131; ZVFH32-NEXT:    slli a1, a1, 17
132; ZVFH32-NEXT:    srli a1, a1, 17
133; ZVFH32-NEXT:    slli a3, a2, 30
134; ZVFH32-NEXT:    or a1, a1, a3
135; ZVFH32-NEXT:    vmv.x.s a3, v9
136; ZVFH32-NEXT:    slli a2, a2, 17
137; ZVFH32-NEXT:    slli a3, a3, 17
138; ZVFH32-NEXT:    srli a2, a2, 19
139; ZVFH32-NEXT:    srli a3, a3, 2
140; ZVFH32-NEXT:    or a1, a1, a3
141; ZVFH32-NEXT:    sw a1, 0(a0)
142; ZVFH32-NEXT:    sh a2, 4(a0)
143; ZVFH32-NEXT:    ret
144;
145; ZVFH64-LABEL: fp2si_v3f32_v3i15:
146; ZVFH64:       # %bb.0:
147; ZVFH64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
148; ZVFH64-NEXT:    vfncvt.rtz.x.f.w v9, v8
149; ZVFH64-NEXT:    vmv.x.s a1, v9
150; ZVFH64-NEXT:    vslidedown.vi v8, v9, 1
151; ZVFH64-NEXT:    vslidedown.vi v9, v9, 2
152; ZVFH64-NEXT:    slli a1, a1, 49
153; ZVFH64-NEXT:    vmv.x.s a2, v8
154; ZVFH64-NEXT:    vmv.x.s a3, v9
155; ZVFH64-NEXT:    srli a1, a1, 49
156; ZVFH64-NEXT:    slli a2, a2, 49
157; ZVFH64-NEXT:    slli a3, a3, 30
158; ZVFH64-NEXT:    srli a2, a2, 34
159; ZVFH64-NEXT:    or a1, a1, a3
160; ZVFH64-NEXT:    or a1, a1, a2
161; ZVFH64-NEXT:    slli a2, a1, 19
162; ZVFH64-NEXT:    srli a2, a2, 51
163; ZVFH64-NEXT:    sw a1, 0(a0)
164; ZVFH64-NEXT:    sh a2, 4(a0)
165; ZVFH64-NEXT:    ret
166;
167; ZVFHMIN32-LABEL: fp2si_v3f32_v3i15:
168; ZVFHMIN32:       # %bb.0:
169; ZVFHMIN32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
170; ZVFHMIN32-NEXT:    vfncvt.rtz.x.f.w v9, v8
171; ZVFHMIN32-NEXT:    vslidedown.vi v8, v9, 2
172; ZVFHMIN32-NEXT:    vmv.x.s a1, v9
173; ZVFHMIN32-NEXT:    vslidedown.vi v9, v9, 1
174; ZVFHMIN32-NEXT:    vmv.x.s a2, v8
175; ZVFHMIN32-NEXT:    slli a1, a1, 17
176; ZVFHMIN32-NEXT:    srli a1, a1, 17
177; ZVFHMIN32-NEXT:    slli a3, a2, 30
178; ZVFHMIN32-NEXT:    or a1, a1, a3
179; ZVFHMIN32-NEXT:    vmv.x.s a3, v9
180; ZVFHMIN32-NEXT:    slli a2, a2, 17
181; ZVFHMIN32-NEXT:    slli a3, a3, 17
182; ZVFHMIN32-NEXT:    srli a2, a2, 19
183; ZVFHMIN32-NEXT:    srli a3, a3, 2
184; ZVFHMIN32-NEXT:    or a1, a1, a3
185; ZVFHMIN32-NEXT:    sw a1, 0(a0)
186; ZVFHMIN32-NEXT:    sh a2, 4(a0)
187; ZVFHMIN32-NEXT:    ret
188;
189; ZVFHMIN64-LABEL: fp2si_v3f32_v3i15:
190; ZVFHMIN64:       # %bb.0:
191; ZVFHMIN64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
192; ZVFHMIN64-NEXT:    vfncvt.rtz.x.f.w v9, v8
193; ZVFHMIN64-NEXT:    vmv.x.s a1, v9
194; ZVFHMIN64-NEXT:    vslidedown.vi v8, v9, 1
195; ZVFHMIN64-NEXT:    vslidedown.vi v9, v9, 2
196; ZVFHMIN64-NEXT:    slli a1, a1, 49
197; ZVFHMIN64-NEXT:    vmv.x.s a2, v8
198; ZVFHMIN64-NEXT:    vmv.x.s a3, v9
199; ZVFHMIN64-NEXT:    srli a1, a1, 49
200; ZVFHMIN64-NEXT:    slli a2, a2, 49
201; ZVFHMIN64-NEXT:    slli a3, a3, 30
202; ZVFHMIN64-NEXT:    srli a2, a2, 34
203; ZVFHMIN64-NEXT:    or a1, a1, a3
204; ZVFHMIN64-NEXT:    or a1, a1, a2
205; ZVFHMIN64-NEXT:    slli a2, a1, 19
206; ZVFHMIN64-NEXT:    srli a2, a2, 51
207; ZVFHMIN64-NEXT:    sw a1, 0(a0)
208; ZVFHMIN64-NEXT:    sh a2, 4(a0)
209; ZVFHMIN64-NEXT:    ret
210  %z = fptosi <3 x float> %x to <3 x i15>
211  ret <3 x i15> %z
212}
213
214; FIXME: This is expanded when they could be widened + promoted
215define <3 x i15> @fp2ui_v3f32_v3i15(<3 x float> %x) {
216; ZVFH32-LABEL: fp2ui_v3f32_v3i15:
217; ZVFH32:       # %bb.0:
218; ZVFH32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
219; ZVFH32-NEXT:    vfncvt.rtz.x.f.w v9, v8
220; ZVFH32-NEXT:    vslidedown.vi v8, v9, 2
221; ZVFH32-NEXT:    vmv.x.s a1, v9
222; ZVFH32-NEXT:    vslidedown.vi v9, v9, 1
223; ZVFH32-NEXT:    vmv.x.s a2, v8
224; ZVFH32-NEXT:    slli a1, a1, 16
225; ZVFH32-NEXT:    srli a1, a1, 16
226; ZVFH32-NEXT:    slli a3, a2, 30
227; ZVFH32-NEXT:    or a1, a1, a3
228; ZVFH32-NEXT:    vmv.x.s a3, v9
229; ZVFH32-NEXT:    slli a2, a2, 17
230; ZVFH32-NEXT:    slli a3, a3, 16
231; ZVFH32-NEXT:    srli a2, a2, 19
232; ZVFH32-NEXT:    srli a3, a3, 1
233; ZVFH32-NEXT:    or a1, a1, a3
234; ZVFH32-NEXT:    sw a1, 0(a0)
235; ZVFH32-NEXT:    sh a2, 4(a0)
236; ZVFH32-NEXT:    ret
237;
238; ZVFH64-LABEL: fp2ui_v3f32_v3i15:
239; ZVFH64:       # %bb.0:
240; ZVFH64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
241; ZVFH64-NEXT:    vfncvt.rtz.x.f.w v9, v8
242; ZVFH64-NEXT:    vmv.x.s a1, v9
243; ZVFH64-NEXT:    vslidedown.vi v8, v9, 1
244; ZVFH64-NEXT:    vslidedown.vi v9, v9, 2
245; ZVFH64-NEXT:    slli a1, a1, 48
246; ZVFH64-NEXT:    vmv.x.s a2, v8
247; ZVFH64-NEXT:    vmv.x.s a3, v9
248; ZVFH64-NEXT:    srli a1, a1, 48
249; ZVFH64-NEXT:    slli a2, a2, 48
250; ZVFH64-NEXT:    slli a3, a3, 30
251; ZVFH64-NEXT:    srli a2, a2, 33
252; ZVFH64-NEXT:    or a1, a1, a3
253; ZVFH64-NEXT:    or a1, a1, a2
254; ZVFH64-NEXT:    slli a2, a1, 19
255; ZVFH64-NEXT:    srli a2, a2, 51
256; ZVFH64-NEXT:    sw a1, 0(a0)
257; ZVFH64-NEXT:    sh a2, 4(a0)
258; ZVFH64-NEXT:    ret
259;
260; ZVFHMIN32-LABEL: fp2ui_v3f32_v3i15:
261; ZVFHMIN32:       # %bb.0:
262; ZVFHMIN32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
263; ZVFHMIN32-NEXT:    vfncvt.rtz.x.f.w v9, v8
264; ZVFHMIN32-NEXT:    vslidedown.vi v8, v9, 2
265; ZVFHMIN32-NEXT:    vmv.x.s a1, v9
266; ZVFHMIN32-NEXT:    vslidedown.vi v9, v9, 1
267; ZVFHMIN32-NEXT:    vmv.x.s a2, v8
268; ZVFHMIN32-NEXT:    slli a1, a1, 16
269; ZVFHMIN32-NEXT:    srli a1, a1, 16
270; ZVFHMIN32-NEXT:    slli a3, a2, 30
271; ZVFHMIN32-NEXT:    or a1, a1, a3
272; ZVFHMIN32-NEXT:    vmv.x.s a3, v9
273; ZVFHMIN32-NEXT:    slli a2, a2, 17
274; ZVFHMIN32-NEXT:    slli a3, a3, 16
275; ZVFHMIN32-NEXT:    srli a2, a2, 19
276; ZVFHMIN32-NEXT:    srli a3, a3, 1
277; ZVFHMIN32-NEXT:    or a1, a1, a3
278; ZVFHMIN32-NEXT:    sw a1, 0(a0)
279; ZVFHMIN32-NEXT:    sh a2, 4(a0)
280; ZVFHMIN32-NEXT:    ret
281;
282; ZVFHMIN64-LABEL: fp2ui_v3f32_v3i15:
283; ZVFHMIN64:       # %bb.0:
284; ZVFHMIN64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
285; ZVFHMIN64-NEXT:    vfncvt.rtz.x.f.w v9, v8
286; ZVFHMIN64-NEXT:    vmv.x.s a1, v9
287; ZVFHMIN64-NEXT:    vslidedown.vi v8, v9, 1
288; ZVFHMIN64-NEXT:    vslidedown.vi v9, v9, 2
289; ZVFHMIN64-NEXT:    slli a1, a1, 48
290; ZVFHMIN64-NEXT:    vmv.x.s a2, v8
291; ZVFHMIN64-NEXT:    vmv.x.s a3, v9
292; ZVFHMIN64-NEXT:    srli a1, a1, 48
293; ZVFHMIN64-NEXT:    slli a2, a2, 48
294; ZVFHMIN64-NEXT:    slli a3, a3, 30
295; ZVFHMIN64-NEXT:    srli a2, a2, 33
296; ZVFHMIN64-NEXT:    or a1, a1, a3
297; ZVFHMIN64-NEXT:    or a1, a1, a2
298; ZVFHMIN64-NEXT:    slli a2, a1, 19
299; ZVFHMIN64-NEXT:    srli a2, a2, 51
300; ZVFHMIN64-NEXT:    sw a1, 0(a0)
301; ZVFHMIN64-NEXT:    sh a2, 4(a0)
302; ZVFHMIN64-NEXT:    ret
303  %z = fptoui <3 x float> %x to <3 x i15>
304  ret <3 x i15> %z
305}
306
307define <3 x i1> @fp2ui_v3f32_v3i1(<3 x float> %x) {
308; CHECK-LABEL: fp2ui_v3f32_v3i1:
309; CHECK:       # %bb.0:
310; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
311; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
312; CHECK-NEXT:    vand.vi v8, v9, 1
313; CHECK-NEXT:    vmsne.vi v0, v8, 0
314; CHECK-NEXT:    ret
315  %z = fptoui <3 x float> %x to <3 x i1>
316  ret <3 x i1> %z
317}
318
319define void @fp2si_v8f32_v8i32(ptr %x, ptr %y) {
320; CHECK-LABEL: fp2si_v8f32_v8i32:
321; CHECK:       # %bb.0:
322; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
323; CHECK-NEXT:    vle32.v v8, (a0)
324; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
325; CHECK-NEXT:    vse32.v v8, (a1)
326; CHECK-NEXT:    ret
327  %a = load <8 x float>, ptr %x
328  %d = fptosi <8 x float> %a to <8 x i32>
329  store <8 x i32> %d, ptr %y
330  ret void
331}
332
333define void @fp2ui_v8f32_v8i32(ptr %x, ptr %y) {
334; CHECK-LABEL: fp2ui_v8f32_v8i32:
335; CHECK:       # %bb.0:
336; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
337; CHECK-NEXT:    vle32.v v8, (a0)
338; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
339; CHECK-NEXT:    vse32.v v8, (a1)
340; CHECK-NEXT:    ret
341  %a = load <8 x float>, ptr %x
342  %d = fptoui <8 x float> %a to <8 x i32>
343  store <8 x i32> %d, ptr %y
344  ret void
345}
346
347define <8 x i1> @fp2si_v8f32_v8i1(<8 x float> %x) {
348; CHECK-LABEL: fp2si_v8f32_v8i1:
349; CHECK:       # %bb.0:
350; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
351; CHECK-NEXT:    vfncvt.rtz.x.f.w v10, v8
352; CHECK-NEXT:    vand.vi v8, v10, 1
353; CHECK-NEXT:    vmsne.vi v0, v8, 0
354; CHECK-NEXT:    ret
355  %z = fptosi <8 x float> %x to <8 x i1>
356  ret <8 x i1> %z
357}
358
359define <8 x i1> @fp2ui_v8f32_v8i1(<8 x float> %x) {
360; CHECK-LABEL: fp2ui_v8f32_v8i1:
361; CHECK:       # %bb.0:
362; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
363; CHECK-NEXT:    vfncvt.rtz.xu.f.w v10, v8
364; CHECK-NEXT:    vand.vi v8, v10, 1
365; CHECK-NEXT:    vmsne.vi v0, v8, 0
366; CHECK-NEXT:    ret
367  %z = fptoui <8 x float> %x to <8 x i1>
368  ret <8 x i1> %z
369}
370
371define void @fp2si_v2f32_v2i64(ptr %x, ptr %y) {
372; CHECK-LABEL: fp2si_v2f32_v2i64:
373; CHECK:       # %bb.0:
374; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
375; CHECK-NEXT:    vle32.v v8, (a0)
376; CHECK-NEXT:    vfwcvt.rtz.x.f.v v9, v8
377; CHECK-NEXT:    vse64.v v9, (a1)
378; CHECK-NEXT:    ret
379  %a = load <2 x float>, ptr %x
380  %d = fptosi <2 x float> %a to <2 x i64>
381  store <2 x i64> %d, ptr %y
382  ret void
383}
384
385define void @fp2ui_v2f32_v2i64(ptr %x, ptr %y) {
386; CHECK-LABEL: fp2ui_v2f32_v2i64:
387; CHECK:       # %bb.0:
388; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
389; CHECK-NEXT:    vle32.v v8, (a0)
390; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v9, v8
391; CHECK-NEXT:    vse64.v v9, (a1)
392; CHECK-NEXT:    ret
393  %a = load <2 x float>, ptr %x
394  %d = fptoui <2 x float> %a to <2 x i64>
395  store <2 x i64> %d, ptr %y
396  ret void
397}
398
399define void @fp2si_v8f32_v8i64(ptr %x, ptr %y) {
400; CHECK-LABEL: fp2si_v8f32_v8i64:
401; CHECK:       # %bb.0:
402; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
403; CHECK-NEXT:    vle32.v v8, (a0)
404; CHECK-NEXT:    vfwcvt.rtz.x.f.v v12, v8
405; CHECK-NEXT:    vse64.v v12, (a1)
406; CHECK-NEXT:    ret
407  %a = load <8 x float>, ptr %x
408  %d = fptosi <8 x float> %a to <8 x i64>
409  store <8 x i64> %d, ptr %y
410  ret void
411}
412
413define void @fp2ui_v8f32_v8i64(ptr %x, ptr %y) {
414; CHECK-LABEL: fp2ui_v8f32_v8i64:
415; CHECK:       # %bb.0:
416; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
417; CHECK-NEXT:    vle32.v v8, (a0)
418; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v12, v8
419; CHECK-NEXT:    vse64.v v12, (a1)
420; CHECK-NEXT:    ret
421  %a = load <8 x float>, ptr %x
422  %d = fptoui <8 x float> %a to <8 x i64>
423  store <8 x i64> %d, ptr %y
424  ret void
425}
426
427define void @fp2si_v2bf16_v2i64(ptr %x, ptr %y) {
428; CHECK-LABEL: fp2si_v2bf16_v2i64:
429; CHECK:       # %bb.0:
430; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
431; CHECK-NEXT:    vle16.v v8, (a0)
432; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
433; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
434; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9
435; CHECK-NEXT:    vse64.v v8, (a1)
436; CHECK-NEXT:    ret
437  %a = load <2 x bfloat>, ptr %x
438  %d = fptosi <2 x bfloat> %a to <2 x i64>
439  store <2 x i64> %d, ptr %y
440  ret void
441}
442
443define void @fp2ui_v2bf16_v2i64(ptr %x, ptr %y) {
444; CHECK-LABEL: fp2ui_v2bf16_v2i64:
445; CHECK:       # %bb.0:
446; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
447; CHECK-NEXT:    vle16.v v8, (a0)
448; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
449; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
450; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9
451; CHECK-NEXT:    vse64.v v8, (a1)
452; CHECK-NEXT:    ret
453  %a = load <2 x bfloat>, ptr %x
454  %d = fptoui <2 x bfloat> %a to <2 x i64>
455  store <2 x i64> %d, ptr %y
456  ret void
457}
458
459define <2 x i1> @fp2si_v2bf16_v2i1(<2 x bfloat> %x) {
460; CHECK-LABEL: fp2si_v2bf16_v2i1:
461; CHECK:       # %bb.0:
462; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
463; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
464; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9
465; CHECK-NEXT:    vand.vi v8, v8, 1
466; CHECK-NEXT:    vmsne.vi v0, v8, 0
467; CHECK-NEXT:    ret
468  %z = fptosi <2 x bfloat> %x to <2 x i1>
469  ret <2 x i1> %z
470}
471
472define <2 x i1> @fp2ui_v2bf16_v2i1(<2 x bfloat> %x) {
473; CHECK-LABEL: fp2ui_v2bf16_v2i1:
474; CHECK:       # %bb.0:
475; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
476; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
477; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9
478; CHECK-NEXT:    vand.vi v8, v8, 1
479; CHECK-NEXT:    vmsne.vi v0, v8, 0
480; CHECK-NEXT:    ret
481  %z = fptoui <2 x bfloat> %x to <2 x i1>
482  ret <2 x i1> %z
483}
484
485define void @fp2si_v2f16_v2i64(ptr %x, ptr %y) {
486; CHECK-LABEL: fp2si_v2f16_v2i64:
487; CHECK:       # %bb.0:
488; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
489; CHECK-NEXT:    vle16.v v8, (a0)
490; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
491; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
492; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9
493; CHECK-NEXT:    vse64.v v8, (a1)
494; CHECK-NEXT:    ret
495  %a = load <2 x half>, ptr %x
496  %d = fptosi <2 x half> %a to <2 x i64>
497  store <2 x i64> %d, ptr %y
498  ret void
499}
500
501define void @fp2ui_v2f16_v2i64(ptr %x, ptr %y) {
502; CHECK-LABEL: fp2ui_v2f16_v2i64:
503; CHECK:       # %bb.0:
504; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
505; CHECK-NEXT:    vle16.v v8, (a0)
506; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
507; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
508; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9
509; CHECK-NEXT:    vse64.v v8, (a1)
510; CHECK-NEXT:    ret
511  %a = load <2 x half>, ptr %x
512  %d = fptoui <2 x half> %a to <2 x i64>
513  store <2 x i64> %d, ptr %y
514  ret void
515}
516
517define <2 x i1> @fp2si_v2f16_v2i1(<2 x half> %x) {
518; ZVFH-LABEL: fp2si_v2f16_v2i1:
519; ZVFH:       # %bb.0:
520; ZVFH-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
521; ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8
522; ZVFH-NEXT:    vand.vi v8, v9, 1
523; ZVFH-NEXT:    vmsne.vi v0, v8, 0
524; ZVFH-NEXT:    ret
525;
526; ZVFHMIN-LABEL: fp2si_v2f16_v2i1:
527; ZVFHMIN:       # %bb.0:
528; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
529; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
530; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9
531; ZVFHMIN-NEXT:    vand.vi v8, v8, 1
532; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
533; ZVFHMIN-NEXT:    ret
534  %z = fptosi <2 x half> %x to <2 x i1>
535  ret <2 x i1> %z
536}
537
538define <2 x i1> @fp2ui_v2f16_v2i1(<2 x half> %x) {
539; ZVFH-LABEL: fp2ui_v2f16_v2i1:
540; ZVFH:       # %bb.0:
541; ZVFH-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
542; ZVFH-NEXT:    vfncvt.rtz.xu.f.w v9, v8
543; ZVFH-NEXT:    vand.vi v8, v9, 1
544; ZVFH-NEXT:    vmsne.vi v0, v8, 0
545; ZVFH-NEXT:    ret
546;
547; ZVFHMIN-LABEL: fp2ui_v2f16_v2i1:
548; ZVFHMIN:       # %bb.0:
549; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
550; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
551; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9
552; ZVFHMIN-NEXT:    vand.vi v8, v8, 1
553; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
554; ZVFHMIN-NEXT:    ret
555  %z = fptoui <2 x half> %x to <2 x i1>
556  ret <2 x i1> %z
557}
558
559define void @fp2si_v2f64_v2i8(ptr %x, ptr %y) {
560; CHECK-LABEL: fp2si_v2f64_v2i8:
561; CHECK:       # %bb.0:
562; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
563; CHECK-NEXT:    vle64.v v8, (a0)
564; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
565; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
566; CHECK-NEXT:    vnsrl.wi v8, v9, 0
567; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
568; CHECK-NEXT:    vnsrl.wi v8, v8, 0
569; CHECK-NEXT:    vse8.v v8, (a1)
570; CHECK-NEXT:    ret
571  %a = load <2 x double>, ptr %x
572  %d = fptosi <2 x double> %a to <2 x i8>
573  store <2 x i8> %d, ptr %y
574  ret void
575}
576
577define void @fp2ui_v2f64_v2i8(ptr %x, ptr %y) {
578; CHECK-LABEL: fp2ui_v2f64_v2i8:
579; CHECK:       # %bb.0:
580; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
581; CHECK-NEXT:    vle64.v v8, (a0)
582; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
583; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
584; CHECK-NEXT:    vnsrl.wi v8, v9, 0
585; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
586; CHECK-NEXT:    vnsrl.wi v8, v8, 0
587; CHECK-NEXT:    vse8.v v8, (a1)
588; CHECK-NEXT:    ret
589  %a = load <2 x double>, ptr %x
590  %d = fptoui <2 x double> %a to <2 x i8>
591  store <2 x i8> %d, ptr %y
592  ret void
593}
594
595define <2 x i1> @fp2si_v2f64_v2i1(<2 x double> %x) {
596; CHECK-LABEL: fp2si_v2f64_v2i1:
597; CHECK:       # %bb.0:
598; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
599; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
600; CHECK-NEXT:    vand.vi v8, v9, 1
601; CHECK-NEXT:    vmsne.vi v0, v8, 0
602; CHECK-NEXT:    ret
603  %z = fptosi <2 x double> %x to <2 x i1>
604  ret <2 x i1> %z
605}
606
607define <2 x i1> @fp2ui_v2f64_v2i1(<2 x double> %x) {
608; CHECK-LABEL: fp2ui_v2f64_v2i1:
609; CHECK:       # %bb.0:
610; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
611; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
612; CHECK-NEXT:    vand.vi v8, v9, 1
613; CHECK-NEXT:    vmsne.vi v0, v8, 0
614; CHECK-NEXT:    ret
615  %z = fptoui <2 x double> %x to <2 x i1>
616  ret <2 x i1> %z
617}
618
619define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
620; CHECK-LABEL: fp2si_v8f64_v8i8:
621; CHECK:       # %bb.0:
622; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
623; CHECK-NEXT:    vle64.v v8, (a0)
624; CHECK-NEXT:    vfncvt.rtz.x.f.w v12, v8
625; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
626; CHECK-NEXT:    vnsrl.wi v8, v12, 0
627; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
628; CHECK-NEXT:    vnsrl.wi v8, v8, 0
629; CHECK-NEXT:    vse8.v v8, (a1)
630; CHECK-NEXT:    ret
631  %a = load <8 x double>, ptr %x
632  %d = fptosi <8 x double> %a to <8 x i8>
633  store <8 x i8> %d, ptr %y
634  ret void
635}
636
637define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
638; CHECK-LABEL: fp2ui_v8f64_v8i8:
639; CHECK:       # %bb.0:
640; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
641; CHECK-NEXT:    vle64.v v8, (a0)
642; CHECK-NEXT:    vfncvt.rtz.xu.f.w v12, v8
643; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
644; CHECK-NEXT:    vnsrl.wi v8, v12, 0
645; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
646; CHECK-NEXT:    vnsrl.wi v8, v8, 0
647; CHECK-NEXT:    vse8.v v8, (a1)
648; CHECK-NEXT:    ret
649  %a = load <8 x double>, ptr %x
650  %d = fptoui <8 x double> %a to <8 x i8>
651  store <8 x i8> %d, ptr %y
652  ret void
653}
654
655define <8 x i1> @fp2si_v8f64_v8i1(<8 x double> %x) {
656; CHECK-LABEL: fp2si_v8f64_v8i1:
657; CHECK:       # %bb.0:
658; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
659; CHECK-NEXT:    vfncvt.rtz.x.f.w v12, v8
660; CHECK-NEXT:    vand.vi v8, v12, 1
661; CHECK-NEXT:    vmsne.vi v0, v8, 0
662; CHECK-NEXT:    ret
663  %z = fptosi <8 x double> %x to <8 x i1>
664  ret <8 x i1> %z
665}
666
667define <8 x i1> @fp2ui_v8f64_v8i1(<8 x double> %x) {
668; CHECK-LABEL: fp2ui_v8f64_v8i1:
669; CHECK:       # %bb.0:
670; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
671; CHECK-NEXT:    vfncvt.rtz.xu.f.w v12, v8
672; CHECK-NEXT:    vand.vi v8, v12, 1
673; CHECK-NEXT:    vmsne.vi v0, v8, 0
674; CHECK-NEXT:    ret
675  %z = fptoui <8 x double> %x to <8 x i1>
676  ret <8 x i1> %z
677}
678