xref: /llvm-project/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -O2 -mtriple riscv64 -mattr=+v,+m,+zbb -enable-subreg-liveness \
3; RUN:     -verify-machineinstrs < %s \
4; RUN:     | FileCheck %s
5
6@var_47 = dso_local global [2 x i16] [i16 -32732, i16 19439], align 2
7@__const._Z3foov.var_49 = private unnamed_addr constant [2 x i16] [i16 157, i16 24062], align 2
8@__const._Z3foov.var_48 = private unnamed_addr constant [2 x i8] c"\AEN", align 1
9@__const._Z3foov.var_46 = private unnamed_addr constant [2 x i16] [i16 729, i16 -32215], align 2
10@__const._Z3foov.var_45 = private unnamed_addr constant [2 x i16] [i16 -27462, i16 -1435], align 2
11@__const._Z3foov.var_44 = private unnamed_addr constant [2 x i16] [i16 22611, i16 -18435], align 2
12@__const._Z3foov.var_40 = private unnamed_addr constant [2 x i16] [i16 -19932, i16 -26252], align 2
13
14define void @_Z3foov() {
15; CHECK-LABEL: _Z3foov:
16; CHECK:       # %bb.0: # %entry
17; CHECK-NEXT:    addi sp, sp, -16
18; CHECK-NEXT:    .cfi_def_cfa_offset 16
19; CHECK-NEXT:    csrr a0, vlenb
20; CHECK-NEXT:    slli a1, a0, 3
21; CHECK-NEXT:    add a0, a1, a0
22; CHECK-NEXT:    sub sp, sp, a0
23; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x09, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 9 * vlenb
24; CHECK-NEXT:    lui a0, %hi(.L__const._Z3foov.var_49)
25; CHECK-NEXT:    addi a0, a0, %lo(.L__const._Z3foov.var_49)
26; CHECK-NEXT:    vsetivli zero, 2, e16, m2, ta, ma
27; CHECK-NEXT:    vle16.v v8, (a0)
28; CHECK-NEXT:    lui a0, %hi(.L__const._Z3foov.var_48)
29; CHECK-NEXT:    addi a0, a0, %lo(.L__const._Z3foov.var_48)
30; CHECK-NEXT:    vle8.v v10, (a0)
31; CHECK-NEXT:    csrr a0, vlenb
32; CHECK-NEXT:    slli a0, a0, 3
33; CHECK-NEXT:    add a0, sp, a0
34; CHECK-NEXT:    addi a0, a0, 16
35; CHECK-NEXT:    vs1r.v v10, (a0) # Unknown-size Folded Spill
36; CHECK-NEXT:    lui a0, %hi(.L__const._Z3foov.var_46)
37; CHECK-NEXT:    addi a0, a0, %lo(.L__const._Z3foov.var_46)
38; CHECK-NEXT:    vle16.v v10, (a0)
39; CHECK-NEXT:    lui a0, %hi(.L__const._Z3foov.var_45)
40; CHECK-NEXT:    addi a0, a0, %lo(.L__const._Z3foov.var_45)
41; CHECK-NEXT:    vle16.v v12, (a0)
42; CHECK-NEXT:    addi a0, sp, 16
43; CHECK-NEXT:    csrr a1, vlenb
44; CHECK-NEXT:    slli a1, a1, 1
45; CHECK-NEXT:    vs2r.v v8, (a0) # Unknown-size Folded Spill
46; CHECK-NEXT:    add a0, a0, a1
47; CHECK-NEXT:    vs2r.v v10, (a0) # Unknown-size Folded Spill
48; CHECK-NEXT:    add a0, a0, a1
49; CHECK-NEXT:    vs2r.v v12, (a0) # Unknown-size Folded Spill
50; CHECK-NEXT:    add a0, a0, a1
51; CHECK-NEXT:    vs2r.v v14, (a0) # Unknown-size Folded Spill
52; CHECK-NEXT:    lui a0, %hi(.L__const._Z3foov.var_40)
53; CHECK-NEXT:    addi a0, a0, %lo(.L__const._Z3foov.var_40)
54; CHECK-NEXT:    #APP
55; CHECK-NEXT:    #NO_APP
56; CHECK-NEXT:    vsetivli zero, 2, e16, m2, ta, ma
57; CHECK-NEXT:    vle16.v v8, (a0)
58; CHECK-NEXT:    lui a0, 1048572
59; CHECK-NEXT:    addi a0, a0, 928
60; CHECK-NEXT:    vmsbc.vx v0, v8, a0
61; CHECK-NEXT:    addi a0, sp, 16
62; CHECK-NEXT:    csrr a1, vlenb
63; CHECK-NEXT:    slli a1, a1, 1
64; CHECK-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
65; CHECK-NEXT:    add a0, a0, a1
66; CHECK-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
67; CHECK-NEXT:    add a0, a0, a1
68; CHECK-NEXT:    vl2r.v v12, (a0) # Unknown-size Folded Reload
69; CHECK-NEXT:    add a0, a0, a1
70; CHECK-NEXT:    vl2r.v v14, (a0) # Unknown-size Folded Reload
71; CHECK-NEXT:    csrr a0, vlenb
72; CHECK-NEXT:    slli a0, a0, 3
73; CHECK-NEXT:    add a0, sp, a0
74; CHECK-NEXT:    addi a0, a0, 16
75; CHECK-NEXT:    vl1r.v v14, (a0) # Unknown-size Folded Reload
76; CHECK-NEXT:    vsetvli zero, zero, e16, m2, tu, mu
77; CHECK-NEXT:    vsext.vf2 v8, v14, v0.t
78; CHECK-NEXT:    lui a0, %hi(.L__const._Z3foov.var_44)
79; CHECK-NEXT:    addi a0, a0, %lo(.L__const._Z3foov.var_44)
80; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
81; CHECK-NEXT:    vle16.v v14, (a0)
82; CHECK-NEXT:    lui a0, %hi(var_47)
83; CHECK-NEXT:    addi a0, a0, %lo(var_47)
84; CHECK-NEXT:    vsseg4e16.v v8, (a0)
85; CHECK-NEXT:    csrr a0, vlenb
86; CHECK-NEXT:    slli a1, a0, 3
87; CHECK-NEXT:    add a0, a1, a0
88; CHECK-NEXT:    add sp, sp, a0
89; CHECK-NEXT:    .cfi_def_cfa sp, 16
90; CHECK-NEXT:    addi sp, sp, 16
91; CHECK-NEXT:    .cfi_def_cfa_offset 0
92; CHECK-NEXT:    ret
93entry:
94  %0 = tail call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> undef, ptr nonnull @__const._Z3foov.var_49, i64 2)
95  %1 = tail call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8.i64(<vscale x 8 x i8> undef, ptr nonnull @__const._Z3foov.var_48, i64 2)
96  %2 = tail call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> undef, ptr nonnull @__const._Z3foov.var_46, i64 2)
97  %3 = tail call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> undef, ptr nonnull @__const._Z3foov.var_45, i64 2)
98  tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() #2
99  %4 = tail call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> undef, ptr nonnull @__const._Z3foov.var_44, i64 2)
100  %5 = tail call i64 @llvm.riscv.vsetvli.i64(i64 2, i64 1, i64 1)
101  %6 = tail call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> undef, ptr nonnull @__const._Z3foov.var_40, i64 2)
102  %7 = tail call i64 @llvm.riscv.vsetvli.i64(i64 2, i64 1, i64 1)
103  %8 = tail call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i64(<vscale x 8 x i16> %6, i16 -15456, i64 2)
104  %9 = tail call i64 @llvm.riscv.vsetvli.i64(i64 2, i64 1, i64 1)
105  %10 = tail call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %8, i64 2, i64 0)
106  %v_0 = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, <vscale x 8 x i16> %10, i32 0)
107  %v_1 = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %v_0, <vscale x 8 x i16> %2, i32 1)
108  %v_2 = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %v_1, <vscale x 8 x i16> %3, i32 2)
109  %v_3 = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %v_2, <vscale x 8 x i16> %4, i32 3)
110  tail call void @llvm.riscv.vsseg4.nxv8i16.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %v_3, ptr nonnull @var_47, i64 2, i64 4)
111  ret void
112}
113
114declare <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16>, ptr nocapture, i64)
115
116declare <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8.i64(<vscale x 8 x i8>, ptr nocapture, i64)
117
118declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg)
119
120declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i64(<vscale x 8 x i16>, i16, i64)
121
122declare <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64 immarg)
123
124declare target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv16i8_4t.nxv8i16(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), <vscale x 8 x i16>, i32)
125
126declare void @llvm.riscv.vsseg4.nxv8i16.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4), ptr nocapture, i64, i64)
127