xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
3; RUN:   -verify-machineinstrs < %s | FileCheck %s \
4; RUN:   --check-prefixes=CHECK,RV32-FP
5; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
6; RUN:   -verify-machineinstrs < %s | FileCheck %s \
7; RUN:   --check-prefixes=CHECK,RV64-FP
8
9define i16 @bitcast_v1f16_i16(<1 x half> %a) {
10; CHECK-LABEL: bitcast_v1f16_i16:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
13; CHECK-NEXT:    vmv.x.s a0, v8
14; CHECK-NEXT:    ret
15  %b = bitcast <1 x half> %a to i16
16  ret i16 %b
17}
18
19define half @bitcast_v1f16_f16(<1 x half> %a) {
20; CHECK-LABEL: bitcast_v1f16_f16:
21; CHECK:       # %bb.0:
22; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
23; CHECK-NEXT:    vfmv.f.s fa0, v8
24; CHECK-NEXT:    ret
25  %b = bitcast <1 x half> %a to half
26  ret half %b
27}
28
29define i32 @bitcast_v2f16_i32(<2 x half> %a) {
30; CHECK-LABEL: bitcast_v2f16_i32:
31; CHECK:       # %bb.0:
32; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
33; CHECK-NEXT:    vmv.x.s a0, v8
34; CHECK-NEXT:    ret
35  %b = bitcast <2 x half> %a to i32
36  ret i32 %b
37}
38
39define i32 @bitcast_v1f32_i32(<1 x float> %a) {
40; CHECK-LABEL: bitcast_v1f32_i32:
41; CHECK:       # %bb.0:
42; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
43; CHECK-NEXT:    vmv.x.s a0, v8
44; CHECK-NEXT:    ret
45  %b = bitcast <1 x float> %a to i32
46  ret i32 %b
47}
48
49define float @bitcast_v2f16_f32(<2 x half> %a) {
50; CHECK-LABEL: bitcast_v2f16_f32:
51; CHECK:       # %bb.0:
52; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
53; CHECK-NEXT:    vfmv.f.s fa0, v8
54; CHECK-NEXT:    ret
55  %b = bitcast <2 x half> %a to float
56  ret float %b
57}
58
59define float @bitcast_v1f32_f32(<1 x float> %a) {
60; CHECK-LABEL: bitcast_v1f32_f32:
61; CHECK:       # %bb.0:
62; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
63; CHECK-NEXT:    vfmv.f.s fa0, v8
64; CHECK-NEXT:    ret
65  %b = bitcast <1 x float> %a to float
66  ret float %b
67}
68
69define i64 @bitcast_v4f16_i64(<4 x half> %a) {
70; RV32-FP-LABEL: bitcast_v4f16_i64:
71; RV32-FP:       # %bb.0:
72; RV32-FP-NEXT:    li a0, 32
73; RV32-FP-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
74; RV32-FP-NEXT:    vsrl.vx v9, v8, a0
75; RV32-FP-NEXT:    vmv.x.s a1, v9
76; RV32-FP-NEXT:    vmv.x.s a0, v8
77; RV32-FP-NEXT:    ret
78;
79; RV64-FP-LABEL: bitcast_v4f16_i64:
80; RV64-FP:       # %bb.0:
81; RV64-FP-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
82; RV64-FP-NEXT:    vmv.x.s a0, v8
83; RV64-FP-NEXT:    ret
84  %b = bitcast <4 x half> %a to i64
85  ret i64 %b
86}
87
88define i64 @bitcast_v2f32_i64(<2 x float> %a) {
89; RV32-FP-LABEL: bitcast_v2f32_i64:
90; RV32-FP:       # %bb.0:
91; RV32-FP-NEXT:    li a0, 32
92; RV32-FP-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
93; RV32-FP-NEXT:    vsrl.vx v9, v8, a0
94; RV32-FP-NEXT:    vmv.x.s a1, v9
95; RV32-FP-NEXT:    vmv.x.s a0, v8
96; RV32-FP-NEXT:    ret
97;
98; RV64-FP-LABEL: bitcast_v2f32_i64:
99; RV64-FP:       # %bb.0:
100; RV64-FP-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
101; RV64-FP-NEXT:    vmv.x.s a0, v8
102; RV64-FP-NEXT:    ret
103  %b = bitcast <2 x float> %a to i64
104  ret i64 %b
105}
106
107define i64 @bitcast_v1f64_i64(<1 x double> %a) {
108; RV32-FP-LABEL: bitcast_v1f64_i64:
109; RV32-FP:       # %bb.0:
110; RV32-FP-NEXT:    li a0, 32
111; RV32-FP-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
112; RV32-FP-NEXT:    vsrl.vx v9, v8, a0
113; RV32-FP-NEXT:    vmv.x.s a1, v9
114; RV32-FP-NEXT:    vmv.x.s a0, v8
115; RV32-FP-NEXT:    ret
116;
117; RV64-FP-LABEL: bitcast_v1f64_i64:
118; RV64-FP:       # %bb.0:
119; RV64-FP-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
120; RV64-FP-NEXT:    vmv.x.s a0, v8
121; RV64-FP-NEXT:    ret
122  %b = bitcast <1 x double> %a to i64
123  ret i64 %b
124}
125
126define double @bitcast_v4f16_f64(<4 x half> %a) {
127; CHECK-LABEL: bitcast_v4f16_f64:
128; CHECK:       # %bb.0:
129; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
130; CHECK-NEXT:    vfmv.f.s fa0, v8
131; CHECK-NEXT:    ret
132  %b = bitcast <4 x half> %a to double
133  ret double %b
134}
135
136define double @bitcast_v2f32_f64(<2 x float> %a) {
137; CHECK-LABEL: bitcast_v2f32_f64:
138; CHECK:       # %bb.0:
139; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
140; CHECK-NEXT:    vfmv.f.s fa0, v8
141; CHECK-NEXT:    ret
142  %b = bitcast <2 x float> %a to double
143  ret double %b
144}
145
146define double @bitcast_v1f64_f64(<1 x double> %a) {
147; CHECK-LABEL: bitcast_v1f64_f64:
148; CHECK:       # %bb.0:
149; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
150; CHECK-NEXT:    vfmv.f.s fa0, v8
151; CHECK-NEXT:    ret
152  %b = bitcast <1 x double> %a to double
153  ret double %b
154}
155
156define <1 x half> @bitcast_i16_v1f16(i16 %a) {
157; CHECK-LABEL: bitcast_i16_v1f16:
158; CHECK:       # %bb.0:
159; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
160; CHECK-NEXT:    vmv.s.x v8, a0
161; CHECK-NEXT:    ret
162  %b = bitcast i16 %a to <1 x half>
163  ret <1 x half> %b
164}
165
166define <2 x half> @bitcast_i32_v2f16(i32 %a) {
167; CHECK-LABEL: bitcast_i32_v2f16:
168; CHECK:       # %bb.0:
169; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
170; CHECK-NEXT:    vmv.s.x v8, a0
171; CHECK-NEXT:    ret
172  %b = bitcast i32 %a to <2 x half>
173  ret <2 x half> %b
174}
175
176define <1 x float> @bitcast_i32_v1f32(i32 %a) {
177; CHECK-LABEL: bitcast_i32_v1f32:
178; CHECK:       # %bb.0:
179; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
180; CHECK-NEXT:    vmv.s.x v8, a0
181; CHECK-NEXT:    ret
182  %b = bitcast i32 %a to <1 x float>
183  ret <1 x float> %b
184}
185
186define <4 x half> @bitcast_i64_v4f16(i64 %a) {
187; RV32-FP-LABEL: bitcast_i64_v4f16:
188; RV32-FP:       # %bb.0:
189; RV32-FP-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
190; RV32-FP-NEXT:    vslide1down.vx v8, v8, a0
191; RV32-FP-NEXT:    vslide1down.vx v8, v8, a1
192; RV32-FP-NEXT:    ret
193;
194; RV64-FP-LABEL: bitcast_i64_v4f16:
195; RV64-FP:       # %bb.0:
196; RV64-FP-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
197; RV64-FP-NEXT:    vmv.s.x v8, a0
198; RV64-FP-NEXT:    ret
199  %b = bitcast i64 %a to <4 x half>
200  ret <4 x half> %b
201}
202
203define <2 x float> @bitcast_i64_v2f32(i64 %a) {
204; RV32-FP-LABEL: bitcast_i64_v2f32:
205; RV32-FP:       # %bb.0:
206; RV32-FP-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
207; RV32-FP-NEXT:    vslide1down.vx v8, v8, a0
208; RV32-FP-NEXT:    vslide1down.vx v8, v8, a1
209; RV32-FP-NEXT:    ret
210;
211; RV64-FP-LABEL: bitcast_i64_v2f32:
212; RV64-FP:       # %bb.0:
213; RV64-FP-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
214; RV64-FP-NEXT:    vmv.s.x v8, a0
215; RV64-FP-NEXT:    ret
216  %b = bitcast i64 %a to <2 x float>
217  ret <2 x float> %b
218}
219
220define <1 x double> @bitcast_i64_v1f64(i64 %a) {
221; RV32-FP-LABEL: bitcast_i64_v1f64:
222; RV32-FP:       # %bb.0:
223; RV32-FP-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
224; RV32-FP-NEXT:    vslide1down.vx v8, v8, a0
225; RV32-FP-NEXT:    vslide1down.vx v8, v8, a1
226; RV32-FP-NEXT:    ret
227;
228; RV64-FP-LABEL: bitcast_i64_v1f64:
229; RV64-FP:       # %bb.0:
230; RV64-FP-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
231; RV64-FP-NEXT:    vmv.s.x v8, a0
232; RV64-FP-NEXT:    ret
233  %b = bitcast i64 %a to <1 x double>
234  ret <1 x double> %b
235}
236
237define <1 x i16> @bitcast_f16_v1i16(half %a) {
238; CHECK-LABEL: bitcast_f16_v1i16:
239; CHECK:       # %bb.0:
240; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
241; CHECK-NEXT:    vfmv.s.f v8, fa0
242; CHECK-NEXT:    ret
243  %b = bitcast half %a to <1 x i16>
244  ret <1 x i16> %b
245}
246
247define <1 x half> @bitcast_f16_v1f16(half %a) {
248; CHECK-LABEL: bitcast_f16_v1f16:
249; CHECK:       # %bb.0:
250; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
251; CHECK-NEXT:    vfmv.s.f v8, fa0
252; CHECK-NEXT:    ret
253  %b = bitcast half %a to <1 x half>
254  ret <1 x half> %b
255}
256
257define <2 x i16> @bitcast_f32_v2i16(float %a) {
258; CHECK-LABEL: bitcast_f32_v2i16:
259; CHECK:       # %bb.0:
260; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
261; CHECK-NEXT:    vfmv.s.f v8, fa0
262; CHECK-NEXT:    ret
263  %b = bitcast float %a to <2 x i16>
264  ret <2 x i16> %b
265}
266
267define <2 x half> @bitcast_f32_v2f16(float %a) {
268; CHECK-LABEL: bitcast_f32_v2f16:
269; CHECK:       # %bb.0:
270; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
271; CHECK-NEXT:    vfmv.s.f v8, fa0
272; CHECK-NEXT:    ret
273  %b = bitcast float %a to <2 x half>
274  ret <2 x half> %b
275}
276
277define <1 x i32> @bitcast_f32_v1i32(float %a) {
278; CHECK-LABEL: bitcast_f32_v1i32:
279; CHECK:       # %bb.0:
280; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
281; CHECK-NEXT:    vfmv.s.f v8, fa0
282; CHECK-NEXT:    ret
283  %b = bitcast float %a to <1 x i32>
284  ret <1 x i32> %b
285}
286
287define <1 x float> @bitcast_f32_v1f32(float %a) {
288; CHECK-LABEL: bitcast_f32_v1f32:
289; CHECK:       # %bb.0:
290; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
291; CHECK-NEXT:    vfmv.s.f v8, fa0
292; CHECK-NEXT:    ret
293  %b = bitcast float %a to <1 x float>
294  ret <1 x float> %b
295}
296
297define <4 x i16> @bitcast_f64_v4i16(double %a) {
298; CHECK-LABEL: bitcast_f64_v4i16:
299; CHECK:       # %bb.0:
300; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
301; CHECK-NEXT:    vfmv.s.f v8, fa0
302; CHECK-NEXT:    ret
303  %b = bitcast double %a to <4 x i16>
304  ret <4 x i16> %b
305}
306
307define <4 x half> @bitcast_f64_v4f16(double %a) {
308; CHECK-LABEL: bitcast_f64_v4f16:
309; CHECK:       # %bb.0:
310; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
311; CHECK-NEXT:    vfmv.s.f v8, fa0
312; CHECK-NEXT:    ret
313  %b = bitcast double %a to <4 x half>
314  ret <4 x half> %b
315}
316
317define <2 x i32> @bitcast_f64_v2i32(double %a) {
318; CHECK-LABEL: bitcast_f64_v2i32:
319; CHECK:       # %bb.0:
320; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
321; CHECK-NEXT:    vfmv.s.f v8, fa0
322; CHECK-NEXT:    ret
323  %b = bitcast double %a to <2 x i32>
324  ret <2 x i32> %b
325}
326
327define <2 x float> @bitcast_f64_v2f32(double %a) {
328; CHECK-LABEL: bitcast_f64_v2f32:
329; CHECK:       # %bb.0:
330; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
331; CHECK-NEXT:    vfmv.s.f v8, fa0
332; CHECK-NEXT:    ret
333  %b = bitcast double %a to <2 x float>
334  ret <2 x float> %b
335}
336
337define <1 x i64> @bitcast_f64_v1i64(double %a) {
338; CHECK-LABEL: bitcast_f64_v1i64:
339; CHECK:       # %bb.0:
340; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
341; CHECK-NEXT:    vfmv.s.f v8, fa0
342; CHECK-NEXT:    ret
343  %b = bitcast double %a to <1 x i64>
344  ret <1 x i64> %b
345}
346
347define <1 x double> @bitcast_f64_v1f64(double %a) {
348; CHECK-LABEL: bitcast_f64_v1f64:
349; CHECK:       # %bb.0:
350; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
351; CHECK-NEXT:    vfmv.s.f v8, fa0
352; CHECK-NEXT:    ret
353  %b = bitcast double %a to <1 x double>
354  ret <1 x double> %b
355}
356