xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-vector-tuple.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
122f98740SBrandon Wu; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
222f98740SBrandon Wu; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+v -verify-machineinstrs \
322f98740SBrandon Wu; RUN:   --riscv-no-aliases < %s | FileCheck %s
422f98740SBrandon Wu
522f98740SBrandon Wutarget triple = "riscv64-unknown-unknown-elf"
622f98740SBrandon Wu
722f98740SBrandon Wudefine target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @load_store_m1x5(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %tuple) {
822f98740SBrandon Wu; CHECK-LABEL: load_store_m1x5:
922f98740SBrandon Wu; CHECK:       # %bb.0: # %entry
1022f98740SBrandon Wu; CHECK-NEXT:    addi sp, sp, -16
1122f98740SBrandon Wu; CHECK-NEXT:    .cfi_def_cfa_offset 16
1222f98740SBrandon Wu; CHECK-NEXT:    csrrs a0, vlenb, zero
1322f98740SBrandon Wu; CHECK-NEXT:    slli a0, a0, 3
1422f98740SBrandon Wu; CHECK-NEXT:    sub sp, sp, a0
1522f98740SBrandon Wu; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
1622f98740SBrandon Wu; CHECK-NEXT:    addi a0, sp, 16
1722f98740SBrandon Wu; CHECK-NEXT:    csrrs a1, vlenb, zero
18*9122c523SPengcheng Wang; CHECK-NEXT:    vs1r.v v8, (a0)
1922f98740SBrandon Wu; CHECK-NEXT:    add a2, a0, a1
2022f98740SBrandon Wu; CHECK-NEXT:    vs1r.v v9, (a2)
2122f98740SBrandon Wu; CHECK-NEXT:    add a3, a2, a1
2222f98740SBrandon Wu; CHECK-NEXT:    vs1r.v v10, (a3)
2322f98740SBrandon Wu; CHECK-NEXT:    add a4, a3, a1
2422f98740SBrandon Wu; CHECK-NEXT:    vs1r.v v11, (a4)
2522f98740SBrandon Wu; CHECK-NEXT:    add a1, a4, a1
2622f98740SBrandon Wu; CHECK-NEXT:    vs1r.v v12, (a1)
2722f98740SBrandon Wu; CHECK-NEXT:    #APP
2822f98740SBrandon Wu; CHECK-NEXT:    #NO_APP
2922f98740SBrandon Wu; CHECK-NEXT:    vl1re8.v v8, (a0)
3022f98740SBrandon Wu; CHECK-NEXT:    vl1re8.v v9, (a2)
3122f98740SBrandon Wu; CHECK-NEXT:    vl1re8.v v10, (a3)
3222f98740SBrandon Wu; CHECK-NEXT:    vl1re8.v v11, (a4)
3322f98740SBrandon Wu; CHECK-NEXT:    vl1re8.v v12, (a1)
3422f98740SBrandon Wu; CHECK-NEXT:    csrrs a0, vlenb, zero
3522f98740SBrandon Wu; CHECK-NEXT:    slli a0, a0, 3
3622f98740SBrandon Wu; CHECK-NEXT:    add sp, sp, a0
3797982a8cSdlav-sc; CHECK-NEXT:    .cfi_def_cfa sp, 16
3822f98740SBrandon Wu; CHECK-NEXT:    addi sp, sp, 16
3997982a8cSdlav-sc; CHECK-NEXT:    .cfi_def_cfa_offset 0
4022f98740SBrandon Wu; CHECK-NEXT:    jalr zero, 0(ra)
4122f98740SBrandon Wuentry:
4222f98740SBrandon Wu  %tuple.addr = alloca target("riscv.vector.tuple", <vscale x 8 x i8>, 5), align 1
4322f98740SBrandon Wu  store target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %tuple, ptr %tuple.addr, align 1
4422f98740SBrandon Wu  call void asm sideeffect "",
4522f98740SBrandon Wu  "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
4622f98740SBrandon Wu  %0 = load target("riscv.vector.tuple", <vscale x 8 x i8>, 5), ptr %tuple.addr, align 1
4722f98740SBrandon Wu  ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) %0
4822f98740SBrandon Wu}
4922f98740SBrandon Wu
5022f98740SBrandon Wudefine target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @load_store_m2x2(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %tuple) {
5122f98740SBrandon Wu; CHECK-LABEL: load_store_m2x2:
5222f98740SBrandon Wu; CHECK:       # %bb.0: # %entry
5322f98740SBrandon Wu; CHECK-NEXT:    addi sp, sp, -16
5422f98740SBrandon Wu; CHECK-NEXT:    .cfi_def_cfa_offset 16
5522f98740SBrandon Wu; CHECK-NEXT:    csrrs a0, vlenb, zero
5622f98740SBrandon Wu; CHECK-NEXT:    slli a0, a0, 2
5722f98740SBrandon Wu; CHECK-NEXT:    sub sp, sp, a0
5822f98740SBrandon Wu; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
5922f98740SBrandon Wu; CHECK-NEXT:    addi a0, sp, 16
6022f98740SBrandon Wu; CHECK-NEXT:    csrrs a1, vlenb, zero
61*9122c523SPengcheng Wang; CHECK-NEXT:    vs2r.v v8, (a0)
6222f98740SBrandon Wu; CHECK-NEXT:    slli a1, a1, 1
6322f98740SBrandon Wu; CHECK-NEXT:    add a1, a0, a1
6422f98740SBrandon Wu; CHECK-NEXT:    vs2r.v v10, (a1)
6522f98740SBrandon Wu; CHECK-NEXT:    #APP
6622f98740SBrandon Wu; CHECK-NEXT:    #NO_APP
6722f98740SBrandon Wu; CHECK-NEXT:    vl2re8.v v8, (a0)
6822f98740SBrandon Wu; CHECK-NEXT:    vl2re8.v v10, (a1)
6922f98740SBrandon Wu; CHECK-NEXT:    csrrs a0, vlenb, zero
7022f98740SBrandon Wu; CHECK-NEXT:    slli a0, a0, 2
7122f98740SBrandon Wu; CHECK-NEXT:    add sp, sp, a0
7297982a8cSdlav-sc; CHECK-NEXT:    .cfi_def_cfa sp, 16
7322f98740SBrandon Wu; CHECK-NEXT:    addi sp, sp, 16
7497982a8cSdlav-sc; CHECK-NEXT:    .cfi_def_cfa_offset 0
7522f98740SBrandon Wu; CHECK-NEXT:    jalr zero, 0(ra)
7622f98740SBrandon Wuentry:
7722f98740SBrandon Wu  %tuple.addr = alloca target("riscv.vector.tuple", <vscale x 16 x i8>, 2), align 1
7822f98740SBrandon Wu  store target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %tuple, ptr %tuple.addr, align 1
7922f98740SBrandon Wu  call void asm sideeffect "",
8022f98740SBrandon Wu  "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
8122f98740SBrandon Wu  %0 = load target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr %tuple.addr, align 1
8222f98740SBrandon Wu  ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %0
8322f98740SBrandon Wu}
8422f98740SBrandon Wu
8522f98740SBrandon Wudefine target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @load_store_m4x2(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %tuple) {
8622f98740SBrandon Wu; CHECK-LABEL: load_store_m4x2:
8722f98740SBrandon Wu; CHECK:       # %bb.0: # %entry
8822f98740SBrandon Wu; CHECK-NEXT:    addi sp, sp, -16
8922f98740SBrandon Wu; CHECK-NEXT:    .cfi_def_cfa_offset 16
9022f98740SBrandon Wu; CHECK-NEXT:    csrrs a0, vlenb, zero
9122f98740SBrandon Wu; CHECK-NEXT:    slli a0, a0, 3
9222f98740SBrandon Wu; CHECK-NEXT:    sub sp, sp, a0
9322f98740SBrandon Wu; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
9422f98740SBrandon Wu; CHECK-NEXT:    addi a0, sp, 16
9522f98740SBrandon Wu; CHECK-NEXT:    csrrs a1, vlenb, zero
96*9122c523SPengcheng Wang; CHECK-NEXT:    vs4r.v v8, (a0)
9722f98740SBrandon Wu; CHECK-NEXT:    slli a1, a1, 2
9822f98740SBrandon Wu; CHECK-NEXT:    add a1, a0, a1
9922f98740SBrandon Wu; CHECK-NEXT:    vs4r.v v12, (a1)
10022f98740SBrandon Wu; CHECK-NEXT:    #APP
10122f98740SBrandon Wu; CHECK-NEXT:    #NO_APP
10222f98740SBrandon Wu; CHECK-NEXT:    vl4re8.v v8, (a0)
10322f98740SBrandon Wu; CHECK-NEXT:    vl4re8.v v12, (a1)
10422f98740SBrandon Wu; CHECK-NEXT:    csrrs a0, vlenb, zero
10522f98740SBrandon Wu; CHECK-NEXT:    slli a0, a0, 3
10622f98740SBrandon Wu; CHECK-NEXT:    add sp, sp, a0
10797982a8cSdlav-sc; CHECK-NEXT:    .cfi_def_cfa sp, 16
10822f98740SBrandon Wu; CHECK-NEXT:    addi sp, sp, 16
10997982a8cSdlav-sc; CHECK-NEXT:    .cfi_def_cfa_offset 0
11022f98740SBrandon Wu; CHECK-NEXT:    jalr zero, 0(ra)
11122f98740SBrandon Wuentry:
11222f98740SBrandon Wu  %tuple.addr = alloca target("riscv.vector.tuple", <vscale x 32 x i8>, 2), align 1
11322f98740SBrandon Wu  store target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %tuple, ptr %tuple.addr, align 1
11422f98740SBrandon Wu  call void asm sideeffect "",
11522f98740SBrandon Wu  "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
11622f98740SBrandon Wu  %0 = load target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr %tuple.addr, align 1
11722f98740SBrandon Wu  ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %0
11822f98740SBrandon Wu}
119