xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-scalarized.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2; RUN: llc < %s -mtriple=riscv32 -mattr=+v -verify-machineinstrs | FileCheck %s
3; RUN: llc < %s -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s
4
5define <8 x float> @fpext_v8bf16(<8 x bfloat> %x) {
6; CHECK-LABEL: fpext_v8bf16:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    fmv.x.w a0, fa0
9; CHECK-NEXT:    fmv.x.w a1, fa1
10; CHECK-NEXT:    fmv.x.w a2, fa2
11; CHECK-NEXT:    fmv.x.w a3, fa3
12; CHECK-NEXT:    fmv.x.w a4, fa4
13; CHECK-NEXT:    fmv.x.w a5, fa5
14; CHECK-NEXT:    fmv.x.w a6, fa6
15; CHECK-NEXT:    fmv.x.w a7, fa7
16; CHECK-NEXT:    slli a7, a7, 16
17; CHECK-NEXT:    slli a6, a6, 16
18; CHECK-NEXT:    slli a5, a5, 16
19; CHECK-NEXT:    slli a4, a4, 16
20; CHECK-NEXT:    slli a3, a3, 16
21; CHECK-NEXT:    slli a2, a2, 16
22; CHECK-NEXT:    slli a1, a1, 16
23; CHECK-NEXT:    slli a0, a0, 16
24; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
25; CHECK-NEXT:    vmv.s.x v8, a7
26; CHECK-NEXT:    vmv.s.x v9, a6
27; CHECK-NEXT:    vmv.s.x v10, a5
28; CHECK-NEXT:    vmv.s.x v12, a4
29; CHECK-NEXT:    vmv.s.x v11, a3
30; CHECK-NEXT:    vmv.s.x v13, a2
31; CHECK-NEXT:    vslideup.vi v9, v8, 1
32; CHECK-NEXT:    vmv.s.x v14, a1
33; CHECK-NEXT:    vslideup.vi v12, v10, 1
34; CHECK-NEXT:    vslideup.vi v13, v11, 1
35; CHECK-NEXT:    vmv.s.x v8, a0
36; CHECK-NEXT:    vslideup.vi v8, v14, 1
37; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
38; CHECK-NEXT:    vslideup.vi v12, v9, 2
39; CHECK-NEXT:    vslideup.vi v8, v13, 2
40; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
41; CHECK-NEXT:    vslideup.vi v8, v12, 4
42; CHECK-NEXT:    ret
43  %y = fpext <8 x bfloat> %x to <8 x float>
44  ret <8 x float> %y
45}
46
47define <8 x float> @fpext_v8f16(<8 x bfloat> %x) {
48; CHECK-LABEL: fpext_v8f16:
49; CHECK:       # %bb.0:
50; CHECK-NEXT:    fmv.x.w a0, fa0
51; CHECK-NEXT:    fmv.x.w a1, fa1
52; CHECK-NEXT:    fmv.x.w a2, fa2
53; CHECK-NEXT:    fmv.x.w a3, fa3
54; CHECK-NEXT:    fmv.x.w a4, fa4
55; CHECK-NEXT:    fmv.x.w a5, fa5
56; CHECK-NEXT:    fmv.x.w a6, fa6
57; CHECK-NEXT:    fmv.x.w a7, fa7
58; CHECK-NEXT:    slli a7, a7, 16
59; CHECK-NEXT:    slli a6, a6, 16
60; CHECK-NEXT:    slli a5, a5, 16
61; CHECK-NEXT:    slli a4, a4, 16
62; CHECK-NEXT:    slli a3, a3, 16
63; CHECK-NEXT:    slli a2, a2, 16
64; CHECK-NEXT:    slli a1, a1, 16
65; CHECK-NEXT:    slli a0, a0, 16
66; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
67; CHECK-NEXT:    vmv.s.x v8, a7
68; CHECK-NEXT:    vmv.s.x v9, a6
69; CHECK-NEXT:    vmv.s.x v10, a5
70; CHECK-NEXT:    vmv.s.x v12, a4
71; CHECK-NEXT:    vmv.s.x v11, a3
72; CHECK-NEXT:    vmv.s.x v13, a2
73; CHECK-NEXT:    vslideup.vi v9, v8, 1
74; CHECK-NEXT:    vmv.s.x v14, a1
75; CHECK-NEXT:    vslideup.vi v12, v10, 1
76; CHECK-NEXT:    vslideup.vi v13, v11, 1
77; CHECK-NEXT:    vmv.s.x v8, a0
78; CHECK-NEXT:    vslideup.vi v8, v14, 1
79; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
80; CHECK-NEXT:    vslideup.vi v12, v9, 2
81; CHECK-NEXT:    vslideup.vi v8, v13, 2
82; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
83; CHECK-NEXT:    vslideup.vi v8, v12, 4
84; CHECK-NEXT:    ret
85  %y = fpext <8 x bfloat> %x to <8 x float>
86  ret <8 x float> %y
87}
88
89