xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-vector-tuple.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+v -verify-machineinstrs \
3; RUN:   --riscv-no-aliases < %s | FileCheck %s
4
5target triple = "riscv64-unknown-unknown-elf"
6
7define target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @load_store_m1x5(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %tuple) {
8; CHECK-LABEL: load_store_m1x5:
9; CHECK:       # %bb.0: # %entry
10; CHECK-NEXT:    addi sp, sp, -16
11; CHECK-NEXT:    .cfi_def_cfa_offset 16
12; CHECK-NEXT:    csrrs a0, vlenb, zero
13; CHECK-NEXT:    slli a0, a0, 3
14; CHECK-NEXT:    sub sp, sp, a0
15; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
16; CHECK-NEXT:    addi a0, sp, 16
17; CHECK-NEXT:    csrrs a1, vlenb, zero
18; CHECK-NEXT:    vs1r.v v8, (a0)
19; CHECK-NEXT:    add a2, a0, a1
20; CHECK-NEXT:    vs1r.v v9, (a2)
21; CHECK-NEXT:    add a3, a2, a1
22; CHECK-NEXT:    vs1r.v v10, (a3)
23; CHECK-NEXT:    add a4, a3, a1
24; CHECK-NEXT:    vs1r.v v11, (a4)
25; CHECK-NEXT:    add a1, a4, a1
26; CHECK-NEXT:    vs1r.v v12, (a1)
27; CHECK-NEXT:    #APP
28; CHECK-NEXT:    #NO_APP
29; CHECK-NEXT:    vl1re8.v v8, (a0)
30; CHECK-NEXT:    vl1re8.v v9, (a2)
31; CHECK-NEXT:    vl1re8.v v10, (a3)
32; CHECK-NEXT:    vl1re8.v v11, (a4)
33; CHECK-NEXT:    vl1re8.v v12, (a1)
34; CHECK-NEXT:    csrrs a0, vlenb, zero
35; CHECK-NEXT:    slli a0, a0, 3
36; CHECK-NEXT:    add sp, sp, a0
37; CHECK-NEXT:    .cfi_def_cfa sp, 16
38; CHECK-NEXT:    addi sp, sp, 16
39; CHECK-NEXT:    .cfi_def_cfa_offset 0
40; CHECK-NEXT:    jalr zero, 0(ra)
41entry:
42  %tuple.addr = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 5), align 1
43  store target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %tuple, ptr %tuple.addr, align 1
44  call void asm sideeffect "",
45  "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
46  %0 = load target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr %tuple.addr, align 1
47  ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %0
48}
49
50define target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @load_store_m2x2(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %tuple) {
51; CHECK-LABEL: load_store_m2x2:
52; CHECK:       # %bb.0: # %entry
53; CHECK-NEXT:    addi sp, sp, -16
54; CHECK-NEXT:    .cfi_def_cfa_offset 16
55; CHECK-NEXT:    csrrs a0, vlenb, zero
56; CHECK-NEXT:    slli a0, a0, 2
57; CHECK-NEXT:    sub sp, sp, a0
58; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
59; CHECK-NEXT:    addi a0, sp, 16
60; CHECK-NEXT:    csrrs a1, vlenb, zero
61; CHECK-NEXT:    vs2r.v v8, (a0)
62; CHECK-NEXT:    slli a1, a1, 1
63; CHECK-NEXT:    add a1, a0, a1
64; CHECK-NEXT:    vs2r.v v10, (a1)
65; CHECK-NEXT:    #APP
66; CHECK-NEXT:    #NO_APP
67; CHECK-NEXT:    vl2re8.v v8, (a0)
68; CHECK-NEXT:    vl2re8.v v10, (a1)
69; CHECK-NEXT:    csrrs a0, vlenb, zero
70; CHECK-NEXT:    slli a0, a0, 2
71; CHECK-NEXT:    add sp, sp, a0
72; CHECK-NEXT:    .cfi_def_cfa sp, 16
73; CHECK-NEXT:    addi sp, sp, 16
74; CHECK-NEXT:    .cfi_def_cfa_offset 0
75; CHECK-NEXT:    jalr zero, 0(ra)
76entry:
77  %tuple.addr = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 2), align 1
78  store target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %tuple, ptr %tuple.addr, align 1
79  call void asm sideeffect "",
80  "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
81  %0 = load target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr %tuple.addr, align 1
82  ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %0
83}
84
85define target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @load_store_m4x2(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %tuple) {
86; CHECK-LABEL: load_store_m4x2:
87; CHECK:       # %bb.0: # %entry
88; CHECK-NEXT:    addi sp, sp, -16
89; CHECK-NEXT:    .cfi_def_cfa_offset 16
90; CHECK-NEXT:    csrrs a0, vlenb, zero
91; CHECK-NEXT:    slli a0, a0, 3
92; CHECK-NEXT:    sub sp, sp, a0
93; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
94; CHECK-NEXT:    addi a0, sp, 16
95; CHECK-NEXT:    csrrs a1, vlenb, zero
96; CHECK-NEXT:    vs4r.v v8, (a0)
97; CHECK-NEXT:    slli a1, a1, 2
98; CHECK-NEXT:    add a1, a0, a1
99; CHECK-NEXT:    vs4r.v v12, (a1)
100; CHECK-NEXT:    #APP
101; CHECK-NEXT:    #NO_APP
102; CHECK-NEXT:    vl4re8.v v8, (a0)
103; CHECK-NEXT:    vl4re8.v v12, (a1)
104; CHECK-NEXT:    csrrs a0, vlenb, zero
105; CHECK-NEXT:    slli a0, a0, 3
106; CHECK-NEXT:    add sp, sp, a0
107; CHECK-NEXT:    .cfi_def_cfa sp, 16
108; CHECK-NEXT:    addi sp, sp, 16
109; CHECK-NEXT:    .cfi_def_cfa_offset 0
110; CHECK-NEXT:    jalr zero, 0(ra)
111entry:
112  %tuple.addr = alloca target("riscv.vector.tuple", <vscale x 32 x i8>, 2), align 1
113  store target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %tuple, ptr %tuple.addr, align 1
114  call void asm sideeffect "",
115  "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
116  %0 = load target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr %tuple.addr, align 1
117  ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %0
118}
119