xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll (revision 97982a8c605fac7c86d02e641a6cd7898b3ca343)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zvfh,+zfbfmin,+zvfbfmin -verify-machineinstrs \
3; RUN:     -target-abi=ilp32d < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32
4; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zvfh,+zfbfmin,+zvfbfmin -verify-machineinstrs \
5; RUN:     -target-abi=lp64d < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64
6; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin -verify-machineinstrs \
7; RUN:     -target-abi=ilp32d < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32
8; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin -verify-machineinstrs \
9; RUN:     -target-abi=lp64d < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64
10; RUN: llc -mtriple=riscv32 -mattr=+zve32f,+zvl128b,+d,+zvfh,+zfbfmin,+zvfbfmin \
11; RUN:     -verify-machineinstrs -target-abi=ilp32d < %s | FileCheck %s \
12; RUN:     --check-prefixes=ELEN32,RV32ELEN32
13; RUN: llc -mtriple=riscv64 -mattr=+zve32f,+zvl128b,+d,+zvfh,+zfbfmin,+zvfbfmin \
14; RUN:     -verify-machineinstrs -target-abi=lp64d < %s | FileCheck %s \
15; RUN:     --check-prefixes=ELEN32,RV64ELEN32
16
17define <32 x i1> @bitcast_v4i8_v32i1(<4 x i8> %a, <32 x i1> %b) {
18; CHECK-LABEL: bitcast_v4i8_v32i1:
19; CHECK:       # %bb.0:
20; CHECK-NEXT:    li a0, 32
21; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
22; CHECK-NEXT:    vmxor.mm v0, v0, v8
23; CHECK-NEXT:    ret
24;
25; ELEN32-LABEL: bitcast_v4i8_v32i1:
26; ELEN32:       # %bb.0:
27; ELEN32-NEXT:    li a0, 32
28; ELEN32-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
29; ELEN32-NEXT:    vmxor.mm v0, v0, v8
30; ELEN32-NEXT:    ret
31  %c = bitcast <4 x i8> %a to <32 x i1>
32  %d = xor <32 x i1> %b, %c
33  ret <32 x i1> %d
34}
35
36define i8 @bitcast_v1i8_i8(<1 x i8> %a) {
37; CHECK-LABEL: bitcast_v1i8_i8:
38; CHECK:       # %bb.0:
39; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
40; CHECK-NEXT:    vmv.x.s a0, v8
41; CHECK-NEXT:    ret
42;
43; ELEN32-LABEL: bitcast_v1i8_i8:
44; ELEN32:       # %bb.0:
45; ELEN32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
46; ELEN32-NEXT:    vmv.x.s a0, v8
47; ELEN32-NEXT:    ret
48  %b = bitcast <1 x i8> %a to i8
49  ret i8 %b
50}
51
52define i16 @bitcast_v2i8_i16(<2 x i8> %a) {
53; CHECK-LABEL: bitcast_v2i8_i16:
54; CHECK:       # %bb.0:
55; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
56; CHECK-NEXT:    vmv.x.s a0, v8
57; CHECK-NEXT:    ret
58;
59; ELEN32-LABEL: bitcast_v2i8_i16:
60; ELEN32:       # %bb.0:
61; ELEN32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
62; ELEN32-NEXT:    vmv.x.s a0, v8
63; ELEN32-NEXT:    ret
64  %b = bitcast <2 x i8> %a to i16
65  ret i16 %b
66}
67
68define i16 @bitcast_v1i16_i16(<1 x i16> %a) {
69; CHECK-LABEL: bitcast_v1i16_i16:
70; CHECK:       # %bb.0:
71; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
72; CHECK-NEXT:    vmv.x.s a0, v8
73; CHECK-NEXT:    ret
74;
75; ELEN32-LABEL: bitcast_v1i16_i16:
76; ELEN32:       # %bb.0:
77; ELEN32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
78; ELEN32-NEXT:    vmv.x.s a0, v8
79; ELEN32-NEXT:    ret
80  %b = bitcast <1 x i16> %a to i16
81  ret i16 %b
82}
83
84define i32 @bitcast_v4i8_i32(<4 x i8> %a) {
85; CHECK-LABEL: bitcast_v4i8_i32:
86; CHECK:       # %bb.0:
87; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
88; CHECK-NEXT:    vmv.x.s a0, v8
89; CHECK-NEXT:    ret
90;
91; ELEN32-LABEL: bitcast_v4i8_i32:
92; ELEN32:       # %bb.0:
93; ELEN32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
94; ELEN32-NEXT:    vmv.x.s a0, v8
95; ELEN32-NEXT:    ret
96  %b = bitcast <4 x i8> %a to i32
97  ret i32 %b
98}
99
100define i32 @bitcast_v2i16_i32(<2 x i16> %a) {
101; CHECK-LABEL: bitcast_v2i16_i32:
102; CHECK:       # %bb.0:
103; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
104; CHECK-NEXT:    vmv.x.s a0, v8
105; CHECK-NEXT:    ret
106;
107; ELEN32-LABEL: bitcast_v2i16_i32:
108; ELEN32:       # %bb.0:
109; ELEN32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
110; ELEN32-NEXT:    vmv.x.s a0, v8
111; ELEN32-NEXT:    ret
112  %b = bitcast <2 x i16> %a to i32
113  ret i32 %b
114}
115
116define i32 @bitcast_v1i32_i32(<1 x i32> %a) {
117; CHECK-LABEL: bitcast_v1i32_i32:
118; CHECK:       # %bb.0:
119; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
120; CHECK-NEXT:    vmv.x.s a0, v8
121; CHECK-NEXT:    ret
122;
123; ELEN32-LABEL: bitcast_v1i32_i32:
124; ELEN32:       # %bb.0:
125; ELEN32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
126; ELEN32-NEXT:    vmv.x.s a0, v8
127; ELEN32-NEXT:    ret
128  %b = bitcast <1 x i32> %a to i32
129  ret i32 %b
130}
131
132define i64 @bitcast_v8i8_i64(<8 x i8> %a) {
133; RV32-LABEL: bitcast_v8i8_i64:
134; RV32:       # %bb.0:
135; RV32-NEXT:    li a0, 32
136; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
137; RV32-NEXT:    vsrl.vx v9, v8, a0
138; RV32-NEXT:    vmv.x.s a1, v9
139; RV32-NEXT:    vmv.x.s a0, v8
140; RV32-NEXT:    ret
141;
142; RV64-LABEL: bitcast_v8i8_i64:
143; RV64:       # %bb.0:
144; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
145; RV64-NEXT:    vmv.x.s a0, v8
146; RV64-NEXT:    ret
147;
148; RV32ELEN32-LABEL: bitcast_v8i8_i64:
149; RV32ELEN32:       # %bb.0:
150; RV32ELEN32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
151; RV32ELEN32-NEXT:    vmv.x.s a0, v8
152; RV32ELEN32-NEXT:    vslidedown.vi v8, v8, 1
153; RV32ELEN32-NEXT:    vmv.x.s a1, v8
154; RV32ELEN32-NEXT:    ret
155;
156; RV64ELEN32-LABEL: bitcast_v8i8_i64:
157; RV64ELEN32:       # %bb.0:
158; RV64ELEN32-NEXT:    addi sp, sp, -16
159; RV64ELEN32-NEXT:    .cfi_def_cfa_offset 16
160; RV64ELEN32-NEXT:    addi a0, sp, 8
161; RV64ELEN32-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
162; RV64ELEN32-NEXT:    vse8.v v8, (a0)
163; RV64ELEN32-NEXT:    ld a0, 8(sp)
164; RV64ELEN32-NEXT:    addi sp, sp, 16
165; RV64ELEN32-NEXT:    .cfi_def_cfa_offset 0
166; RV64ELEN32-NEXT:    ret
167  %b = bitcast <8 x i8> %a to i64
168  ret i64 %b
169}
170
171define i64 @bitcast_v4i16_i64(<4 x i16> %a) {
172; RV32-LABEL: bitcast_v4i16_i64:
173; RV32:       # %bb.0:
174; RV32-NEXT:    li a0, 32
175; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
176; RV32-NEXT:    vsrl.vx v9, v8, a0
177; RV32-NEXT:    vmv.x.s a1, v9
178; RV32-NEXT:    vmv.x.s a0, v8
179; RV32-NEXT:    ret
180;
181; RV64-LABEL: bitcast_v4i16_i64:
182; RV64:       # %bb.0:
183; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
184; RV64-NEXT:    vmv.x.s a0, v8
185; RV64-NEXT:    ret
186;
187; RV32ELEN32-LABEL: bitcast_v4i16_i64:
188; RV32ELEN32:       # %bb.0:
189; RV32ELEN32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
190; RV32ELEN32-NEXT:    vmv.x.s a0, v8
191; RV32ELEN32-NEXT:    vslidedown.vi v8, v8, 1
192; RV32ELEN32-NEXT:    vmv.x.s a1, v8
193; RV32ELEN32-NEXT:    ret
194;
195; RV64ELEN32-LABEL: bitcast_v4i16_i64:
196; RV64ELEN32:       # %bb.0:
197; RV64ELEN32-NEXT:    addi sp, sp, -16
198; RV64ELEN32-NEXT:    .cfi_def_cfa_offset 16
199; RV64ELEN32-NEXT:    addi a0, sp, 8
200; RV64ELEN32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
201; RV64ELEN32-NEXT:    vse16.v v8, (a0)
202; RV64ELEN32-NEXT:    ld a0, 8(sp)
203; RV64ELEN32-NEXT:    addi sp, sp, 16
204; RV64ELEN32-NEXT:    .cfi_def_cfa_offset 0
205; RV64ELEN32-NEXT:    ret
206  %b = bitcast <4 x i16> %a to i64
207  ret i64 %b
208}
209
210define i64 @bitcast_v2i32_i64(<2 x i32> %a) {
211; RV32-LABEL: bitcast_v2i32_i64:
212; RV32:       # %bb.0:
213; RV32-NEXT:    li a0, 32
214; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
215; RV32-NEXT:    vsrl.vx v9, v8, a0
216; RV32-NEXT:    vmv.x.s a1, v9
217; RV32-NEXT:    vmv.x.s a0, v8
218; RV32-NEXT:    ret
219;
220; RV64-LABEL: bitcast_v2i32_i64:
221; RV64:       # %bb.0:
222; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
223; RV64-NEXT:    vmv.x.s a0, v8
224; RV64-NEXT:    ret
225;
226; RV32ELEN32-LABEL: bitcast_v2i32_i64:
227; RV32ELEN32:       # %bb.0:
228; RV32ELEN32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
229; RV32ELEN32-NEXT:    vmv.x.s a0, v8
230; RV32ELEN32-NEXT:    vslidedown.vi v8, v8, 1
231; RV32ELEN32-NEXT:    vmv.x.s a1, v8
232; RV32ELEN32-NEXT:    ret
233;
234; RV64ELEN32-LABEL: bitcast_v2i32_i64:
235; RV64ELEN32:       # %bb.0:
236; RV64ELEN32-NEXT:    addi sp, sp, -16
237; RV64ELEN32-NEXT:    .cfi_def_cfa_offset 16
238; RV64ELEN32-NEXT:    addi a0, sp, 8
239; RV64ELEN32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
240; RV64ELEN32-NEXT:    vse32.v v8, (a0)
241; RV64ELEN32-NEXT:    ld a0, 8(sp)
242; RV64ELEN32-NEXT:    addi sp, sp, 16
243; RV64ELEN32-NEXT:    .cfi_def_cfa_offset 0
244; RV64ELEN32-NEXT:    ret
245  %b = bitcast <2 x i32> %a to i64
246  ret i64 %b
247}
248
249define i64 @bitcast_v1i64_i64(<1 x i64> %a) {
250; RV32-LABEL: bitcast_v1i64_i64:
251; RV32:       # %bb.0:
252; RV32-NEXT:    li a0, 32
253; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
254; RV32-NEXT:    vsrl.vx v9, v8, a0
255; RV32-NEXT:    vmv.x.s a1, v9
256; RV32-NEXT:    vmv.x.s a0, v8
257; RV32-NEXT:    ret
258;
259; RV64-LABEL: bitcast_v1i64_i64:
260; RV64:       # %bb.0:
261; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
262; RV64-NEXT:    vmv.x.s a0, v8
263; RV64-NEXT:    ret
264;
265; ELEN32-LABEL: bitcast_v1i64_i64:
266; ELEN32:       # %bb.0:
267; ELEN32-NEXT:    ret
268  %b = bitcast <1 x i64> %a to i64
269  ret i64 %b
270}
271
272define bfloat @bitcast_v2i8_bf16(<2 x i8> %a) {
273; CHECK-LABEL: bitcast_v2i8_bf16:
274; CHECK:       # %bb.0:
275; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
276; CHECK-NEXT:    vmv.x.s a0, v8
277; CHECK-NEXT:    fmv.h.x fa0, a0
278; CHECK-NEXT:    ret
279;
280; ELEN32-LABEL: bitcast_v2i8_bf16:
281; ELEN32:       # %bb.0:
282; ELEN32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
283; ELEN32-NEXT:    vmv.x.s a0, v8
284; ELEN32-NEXT:    fmv.h.x fa0, a0
285; ELEN32-NEXT:    ret
286  %b = bitcast <2 x i8> %a to bfloat
287  ret bfloat %b
288}
289
290define bfloat @bitcast_v1i16_bf16(<1 x i16> %a) {
291; CHECK-LABEL: bitcast_v1i16_bf16:
292; CHECK:       # %bb.0:
293; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
294; CHECK-NEXT:    vmv.x.s a0, v8
295; CHECK-NEXT:    fmv.h.x fa0, a0
296; CHECK-NEXT:    ret
297;
298; ELEN32-LABEL: bitcast_v1i16_bf16:
299; ELEN32:       # %bb.0:
300; ELEN32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
301; ELEN32-NEXT:    vmv.x.s a0, v8
302; ELEN32-NEXT:    fmv.h.x fa0, a0
303; ELEN32-NEXT:    ret
304  %b = bitcast <1 x i16> %a to bfloat
305  ret bfloat %b
306}
307
308define bfloat @bitcast_v1bf16_bf16(<1 x bfloat> %a) {
309; CHECK-LABEL: bitcast_v1bf16_bf16:
310; CHECK:       # %bb.0:
311; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
312; CHECK-NEXT:    vmv.x.s a0, v8
313; CHECK-NEXT:    fmv.h.x fa0, a0
314; CHECK-NEXT:    ret
315;
316; ELEN32-LABEL: bitcast_v1bf16_bf16:
317; ELEN32:       # %bb.0:
318; ELEN32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
319; ELEN32-NEXT:    vmv.x.s a0, v8
320; ELEN32-NEXT:    fmv.h.x fa0, a0
321; ELEN32-NEXT:    ret
322  %b = bitcast <1 x bfloat> %a to bfloat
323  ret bfloat %b
324}
325
326define <1 x bfloat> @bitcast_bf16_v1bf16(bfloat %a) {
327; CHECK-LABEL: bitcast_bf16_v1bf16:
328; CHECK:       # %bb.0:
329; CHECK-NEXT:    fmv.x.h a0, fa0
330; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
331; CHECK-NEXT:    vmv.s.x v8, a0
332; CHECK-NEXT:    ret
333;
334; ELEN32-LABEL: bitcast_bf16_v1bf16:
335; ELEN32:       # %bb.0:
336; ELEN32-NEXT:    fmv.x.h a0, fa0
337; ELEN32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
338; ELEN32-NEXT:    vmv.s.x v8, a0
339; ELEN32-NEXT:    ret
340  %b = bitcast bfloat %a to <1 x bfloat>
341  ret <1 x bfloat> %b
342}
343
344define half @bitcast_v2i8_f16(<2 x i8> %a) {
345; ZVFH-LABEL: bitcast_v2i8_f16:
346; ZVFH:       # %bb.0:
347; ZVFH-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
348; ZVFH-NEXT:    vfmv.f.s fa0, v8
349; ZVFH-NEXT:    ret
350;
351; ZVFHMIN-LABEL: bitcast_v2i8_f16:
352; ZVFHMIN:       # %bb.0:
353; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
354; ZVFHMIN-NEXT:    vmv.x.s a0, v8
355; ZVFHMIN-NEXT:    fmv.h.x fa0, a0
356; ZVFHMIN-NEXT:    ret
357;
358; ELEN32-LABEL: bitcast_v2i8_f16:
359; ELEN32:       # %bb.0:
360; ELEN32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
361; ELEN32-NEXT:    vfmv.f.s fa0, v8
362; ELEN32-NEXT:    ret
363  %b = bitcast <2 x i8> %a to half
364  ret half %b
365}
366
367define half @bitcast_v1i16_f16(<1 x i16> %a) {
368; ZVFH-LABEL: bitcast_v1i16_f16:
369; ZVFH:       # %bb.0:
370; ZVFH-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
371; ZVFH-NEXT:    vfmv.f.s fa0, v8
372; ZVFH-NEXT:    ret
373;
374; ZVFHMIN-LABEL: bitcast_v1i16_f16:
375; ZVFHMIN:       # %bb.0:
376; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
377; ZVFHMIN-NEXT:    vmv.x.s a0, v8
378; ZVFHMIN-NEXT:    fmv.h.x fa0, a0
379; ZVFHMIN-NEXT:    ret
380;
381; ELEN32-LABEL: bitcast_v1i16_f16:
382; ELEN32:       # %bb.0:
383; ELEN32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
384; ELEN32-NEXT:    vfmv.f.s fa0, v8
385; ELEN32-NEXT:    ret
386  %b = bitcast <1 x i16> %a to half
387  ret half %b
388}
389
390define half @bitcast_v1f16_f16(<1 x half> %a) {
391; ZVFH-LABEL: bitcast_v1f16_f16:
392; ZVFH:       # %bb.0:
393; ZVFH-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
394; ZVFH-NEXT:    vfmv.f.s fa0, v8
395; ZVFH-NEXT:    ret
396;
397; ZVFHMIN-LABEL: bitcast_v1f16_f16:
398; ZVFHMIN:       # %bb.0:
399; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
400; ZVFHMIN-NEXT:    vmv.x.s a0, v8
401; ZVFHMIN-NEXT:    fmv.h.x fa0, a0
402; ZVFHMIN-NEXT:    ret
403;
404; ELEN32-LABEL: bitcast_v1f16_f16:
405; ELEN32:       # %bb.0:
406; ELEN32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
407; ELEN32-NEXT:    vfmv.f.s fa0, v8
408; ELEN32-NEXT:    ret
409  %b = bitcast <1 x half> %a to half
410  ret half %b
411}
412
413define <1 x half> @bitcast_f16_v1f16(half %a) {
414; ZVFH-LABEL: bitcast_f16_v1f16:
415; ZVFH:       # %bb.0:
416; ZVFH-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
417; ZVFH-NEXT:    vfmv.s.f v8, fa0
418; ZVFH-NEXT:    ret
419;
420; ZVFHMIN-LABEL: bitcast_f16_v1f16:
421; ZVFHMIN:       # %bb.0:
422; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
423; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
424; ZVFHMIN-NEXT:    vmv.s.x v8, a0
425; ZVFHMIN-NEXT:    ret
426;
427; ELEN32-LABEL: bitcast_f16_v1f16:
428; ELEN32:       # %bb.0:
429; ELEN32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
430; ELEN32-NEXT:    vfmv.s.f v8, fa0
431; ELEN32-NEXT:    ret
432  %b = bitcast half %a to <1 x half>
433  ret <1 x half> %b
434}
435
436define float @bitcast_v4i8_f32(<4 x i8> %a) {
437; CHECK-LABEL: bitcast_v4i8_f32:
438; CHECK:       # %bb.0:
439; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
440; CHECK-NEXT:    vfmv.f.s fa0, v8
441; CHECK-NEXT:    ret
442;
443; ELEN32-LABEL: bitcast_v4i8_f32:
444; ELEN32:       # %bb.0:
445; ELEN32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
446; ELEN32-NEXT:    vfmv.f.s fa0, v8
447; ELEN32-NEXT:    ret
448  %b = bitcast <4 x i8> %a to float
449  ret float %b
450}
451
452define float @bitcast_v2i16_f32(<2 x i16> %a) {
453; CHECK-LABEL: bitcast_v2i16_f32:
454; CHECK:       # %bb.0:
455; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
456; CHECK-NEXT:    vfmv.f.s fa0, v8
457; CHECK-NEXT:    ret
458;
459; ELEN32-LABEL: bitcast_v2i16_f32:
460; ELEN32:       # %bb.0:
461; ELEN32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
462; ELEN32-NEXT:    vfmv.f.s fa0, v8
463; ELEN32-NEXT:    ret
464  %b = bitcast <2 x i16> %a to float
465  ret float %b
466}
467
468define float @bitcast_v1i32_f32(<1 x i32> %a) {
469; CHECK-LABEL: bitcast_v1i32_f32:
470; CHECK:       # %bb.0:
471; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
472; CHECK-NEXT:    vfmv.f.s fa0, v8
473; CHECK-NEXT:    ret
474;
475; ELEN32-LABEL: bitcast_v1i32_f32:
476; ELEN32:       # %bb.0:
477; ELEN32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
478; ELEN32-NEXT:    vfmv.f.s fa0, v8
479; ELEN32-NEXT:    ret
480  %b = bitcast <1 x i32> %a to float
481  ret float %b
482}
483
484define double @bitcast_v8i8_f64(<8 x i8> %a) {
485; CHECK-LABEL: bitcast_v8i8_f64:
486; CHECK:       # %bb.0:
487; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
488; CHECK-NEXT:    vfmv.f.s fa0, v8
489; CHECK-NEXT:    ret
490;
491; ELEN32-LABEL: bitcast_v8i8_f64:
492; ELEN32:       # %bb.0:
493; ELEN32-NEXT:    addi sp, sp, -16
494; ELEN32-NEXT:    .cfi_def_cfa_offset 16
495; ELEN32-NEXT:    addi a0, sp, 8
496; ELEN32-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
497; ELEN32-NEXT:    vse8.v v8, (a0)
498; ELEN32-NEXT:    fld fa0, 8(sp)
499; ELEN32-NEXT:    addi sp, sp, 16
500; ELEN32-NEXT:    .cfi_def_cfa_offset 0
501; ELEN32-NEXT:    ret
502  %b = bitcast <8 x i8> %a to double
503  ret double %b
504}
505
506define double @bitcast_v4i16_f64(<4 x i16> %a) {
507; CHECK-LABEL: bitcast_v4i16_f64:
508; CHECK:       # %bb.0:
509; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
510; CHECK-NEXT:    vfmv.f.s fa0, v8
511; CHECK-NEXT:    ret
512;
513; ELEN32-LABEL: bitcast_v4i16_f64:
514; ELEN32:       # %bb.0:
515; ELEN32-NEXT:    addi sp, sp, -16
516; ELEN32-NEXT:    .cfi_def_cfa_offset 16
517; ELEN32-NEXT:    addi a0, sp, 8
518; ELEN32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
519; ELEN32-NEXT:    vse16.v v8, (a0)
520; ELEN32-NEXT:    fld fa0, 8(sp)
521; ELEN32-NEXT:    addi sp, sp, 16
522; ELEN32-NEXT:    .cfi_def_cfa_offset 0
523; ELEN32-NEXT:    ret
524  %b = bitcast <4 x i16> %a to double
525  ret double %b
526}
527
528define double @bitcast_v2i32_f64(<2 x i32> %a) {
529; CHECK-LABEL: bitcast_v2i32_f64:
530; CHECK:       # %bb.0:
531; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
532; CHECK-NEXT:    vfmv.f.s fa0, v8
533; CHECK-NEXT:    ret
534;
535; ELEN32-LABEL: bitcast_v2i32_f64:
536; ELEN32:       # %bb.0:
537; ELEN32-NEXT:    addi sp, sp, -16
538; ELEN32-NEXT:    .cfi_def_cfa_offset 16
539; ELEN32-NEXT:    addi a0, sp, 8
540; ELEN32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
541; ELEN32-NEXT:    vse32.v v8, (a0)
542; ELEN32-NEXT:    fld fa0, 8(sp)
543; ELEN32-NEXT:    addi sp, sp, 16
544; ELEN32-NEXT:    .cfi_def_cfa_offset 0
545; ELEN32-NEXT:    ret
546  %b = bitcast <2 x i32> %a to double
547  ret double %b
548}
549
550define double @bitcast_v1i64_f64(<1 x i64> %a) {
551; CHECK-LABEL: bitcast_v1i64_f64:
552; CHECK:       # %bb.0:
553; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
554; CHECK-NEXT:    vfmv.f.s fa0, v8
555; CHECK-NEXT:    ret
556;
557; RV32ELEN32-LABEL: bitcast_v1i64_f64:
558; RV32ELEN32:       # %bb.0:
559; RV32ELEN32-NEXT:    addi sp, sp, -16
560; RV32ELEN32-NEXT:    .cfi_def_cfa_offset 16
561; RV32ELEN32-NEXT:    sw a0, 8(sp)
562; RV32ELEN32-NEXT:    sw a1, 12(sp)
563; RV32ELEN32-NEXT:    fld fa0, 8(sp)
564; RV32ELEN32-NEXT:    addi sp, sp, 16
565; RV32ELEN32-NEXT:    .cfi_def_cfa_offset 0
566; RV32ELEN32-NEXT:    ret
567;
568; RV64ELEN32-LABEL: bitcast_v1i64_f64:
569; RV64ELEN32:       # %bb.0:
570; RV64ELEN32-NEXT:    fmv.d.x fa0, a0
571; RV64ELEN32-NEXT:    ret
572  %b = bitcast <1 x i64> %a to double
573  ret double %b
574}
575
576define <1 x i16> @bitcast_i16_v1i16(i16 %a) {
577; CHECK-LABEL: bitcast_i16_v1i16:
578; CHECK:       # %bb.0:
579; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
580; CHECK-NEXT:    vmv.s.x v8, a0
581; CHECK-NEXT:    ret
582;
583; ELEN32-LABEL: bitcast_i16_v1i16:
584; ELEN32:       # %bb.0:
585; ELEN32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
586; ELEN32-NEXT:    vmv.s.x v8, a0
587; ELEN32-NEXT:    ret
588  %b = bitcast i16 %a to <1 x i16>
589  ret <1 x i16> %b
590}
591
592define <2 x i16> @bitcast_i32_v2i16(i32 %a) {
593; CHECK-LABEL: bitcast_i32_v2i16:
594; CHECK:       # %bb.0:
595; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
596; CHECK-NEXT:    vmv.s.x v8, a0
597; CHECK-NEXT:    ret
598;
599; ELEN32-LABEL: bitcast_i32_v2i16:
600; ELEN32:       # %bb.0:
601; ELEN32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
602; ELEN32-NEXT:    vmv.s.x v8, a0
603; ELEN32-NEXT:    ret
604  %b = bitcast i32 %a to <2 x i16>
605  ret <2 x i16> %b
606}
607
608define <1 x i32> @bitcast_i32_v1i32(i32 %a) {
609; CHECK-LABEL: bitcast_i32_v1i32:
610; CHECK:       # %bb.0:
611; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
612; CHECK-NEXT:    vmv.s.x v8, a0
613; CHECK-NEXT:    ret
614;
615; ELEN32-LABEL: bitcast_i32_v1i32:
616; ELEN32:       # %bb.0:
617; ELEN32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
618; ELEN32-NEXT:    vmv.s.x v8, a0
619; ELEN32-NEXT:    ret
620  %b = bitcast i32 %a to <1 x i32>
621  ret <1 x i32> %b
622}
623
624define <4 x i16> @bitcast_i64_v4i16(i64 %a) {
625; RV32-LABEL: bitcast_i64_v4i16:
626; RV32:       # %bb.0:
627; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
628; RV32-NEXT:    vslide1down.vx v8, v8, a0
629; RV32-NEXT:    vslide1down.vx v8, v8, a1
630; RV32-NEXT:    ret
631;
632; RV64-LABEL: bitcast_i64_v4i16:
633; RV64:       # %bb.0:
634; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
635; RV64-NEXT:    vmv.s.x v8, a0
636; RV64-NEXT:    ret
637;
638; RV32ELEN32-LABEL: bitcast_i64_v4i16:
639; RV32ELEN32:       # %bb.0:
640; RV32ELEN32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
641; RV32ELEN32-NEXT:    vmv.v.x v8, a0
642; RV32ELEN32-NEXT:    vslide1down.vx v8, v8, a1
643; RV32ELEN32-NEXT:    ret
644;
645; RV64ELEN32-LABEL: bitcast_i64_v4i16:
646; RV64ELEN32:       # %bb.0:
647; RV64ELEN32-NEXT:    addi sp, sp, -16
648; RV64ELEN32-NEXT:    .cfi_def_cfa_offset 16
649; RV64ELEN32-NEXT:    sd a0, 8(sp)
650; RV64ELEN32-NEXT:    addi a0, sp, 8
651; RV64ELEN32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
652; RV64ELEN32-NEXT:    vle16.v v8, (a0)
653; RV64ELEN32-NEXT:    addi sp, sp, 16
654; RV64ELEN32-NEXT:    .cfi_def_cfa_offset 0
655; RV64ELEN32-NEXT:    ret
656  %b = bitcast i64 %a to <4 x i16>
657  ret <4 x i16> %b
658}
659
660define <2 x i32> @bitcast_i64_v2i32(i64 %a) {
661; RV32-LABEL: bitcast_i64_v2i32:
662; RV32:       # %bb.0:
663; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
664; RV32-NEXT:    vslide1down.vx v8, v8, a0
665; RV32-NEXT:    vslide1down.vx v8, v8, a1
666; RV32-NEXT:    ret
667;
668; RV64-LABEL: bitcast_i64_v2i32:
669; RV64:       # %bb.0:
670; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
671; RV64-NEXT:    vmv.s.x v8, a0
672; RV64-NEXT:    ret
673;
674; RV32ELEN32-LABEL: bitcast_i64_v2i32:
675; RV32ELEN32:       # %bb.0:
676; RV32ELEN32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
677; RV32ELEN32-NEXT:    vmv.v.x v8, a0
678; RV32ELEN32-NEXT:    vslide1down.vx v8, v8, a1
679; RV32ELEN32-NEXT:    ret
680;
681; RV64ELEN32-LABEL: bitcast_i64_v2i32:
682; RV64ELEN32:       # %bb.0:
683; RV64ELEN32-NEXT:    addi sp, sp, -16
684; RV64ELEN32-NEXT:    .cfi_def_cfa_offset 16
685; RV64ELEN32-NEXT:    sd a0, 8(sp)
686; RV64ELEN32-NEXT:    addi a0, sp, 8
687; RV64ELEN32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
688; RV64ELEN32-NEXT:    vle32.v v8, (a0)
689; RV64ELEN32-NEXT:    addi sp, sp, 16
690; RV64ELEN32-NEXT:    .cfi_def_cfa_offset 0
691; RV64ELEN32-NEXT:    ret
692  %b = bitcast i64 %a to <2 x i32>
693  ret <2 x i32> %b
694}
695
696define <1 x i64> @bitcast_i64_v1i64(i64 %a) {
697; RV32-LABEL: bitcast_i64_v1i64:
698; RV32:       # %bb.0:
699; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
700; RV32-NEXT:    vslide1down.vx v8, v8, a0
701; RV32-NEXT:    vslide1down.vx v8, v8, a1
702; RV32-NEXT:    ret
703;
704; RV64-LABEL: bitcast_i64_v1i64:
705; RV64:       # %bb.0:
706; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
707; RV64-NEXT:    vmv.s.x v8, a0
708; RV64-NEXT:    ret
709;
710; ELEN32-LABEL: bitcast_i64_v1i64:
711; ELEN32:       # %bb.0:
712; ELEN32-NEXT:    ret
713  %b = bitcast i64 %a to <1 x i64>
714  ret <1 x i64> %b
715}
716