xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll (revision b6c0f1bfa79a3a32d841ac5ab1f94c3aee3b5d90)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+v < %s 2>&1 | FileCheck %s
3
4define <vscale x 16 x i32> @bar(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, <vscale x 16 x i32> %w, <vscale x 16 x i32> %x, <vscale x 16 x i32> %y, <vscale x 16 x i32> %z) {
5; CHECK-LABEL: bar:
6; CHECK:       # %bb.0:
7; CHECK-NEXT:    ld a0, 0(sp)
8; CHECK-NEXT:    ld a1, 8(sp)
9; CHECK-NEXT:    vl8re32.v v24, (a0)
10; CHECK-NEXT:    vl8re32.v v0, (a1)
11; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
12; CHECK-NEXT:    vadd.vv v8, v8, v24
13; CHECK-NEXT:    vadd.vv v16, v16, v0
14; CHECK-NEXT:    vadd.vv v8, v8, v16
15; CHECK-NEXT:    ret
16  %s0 = add <vscale x 16 x i32> %w, %y
17  %s1 = add <vscale x 16 x i32> %x, %z
18  %s = add <vscale x 16 x i32> %s0, %s1
19  ret <vscale x 16 x i32> %s
20}
21
22define <vscale x 16 x i32> @foo(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, <vscale x 16 x i32> %x) {
23; CHECK-LABEL: foo:
24; CHECK:       # %bb.0:
25; CHECK-NEXT:    addi sp, sp, -96
26; CHECK-NEXT:    .cfi_def_cfa_offset 96
27; CHECK-NEXT:    sd ra, 88(sp) # 8-byte Folded Spill
28; CHECK-NEXT:    sd s0, 80(sp) # 8-byte Folded Spill
29; CHECK-NEXT:    sd s1, 72(sp) # 8-byte Folded Spill
30; CHECK-NEXT:    .cfi_offset ra, -8
31; CHECK-NEXT:    .cfi_offset s0, -16
32; CHECK-NEXT:    .cfi_offset s1, -24
33; CHECK-NEXT:    addi s0, sp, 96
34; CHECK-NEXT:    .cfi_def_cfa s0, 0
35; CHECK-NEXT:    csrr t0, vlenb
36; CHECK-NEXT:    slli t0, t0, 4
37; CHECK-NEXT:    sub sp, sp, t0
38; CHECK-NEXT:    andi sp, sp, -64
39; CHECK-NEXT:    mv s1, sp
40; CHECK-NEXT:    addi sp, sp, -16
41; CHECK-NEXT:    addi t0, s1, 64
42; CHECK-NEXT:    csrr t1, vlenb
43; CHECK-NEXT:    slli t1, t1, 3
44; CHECK-NEXT:    add t1, s1, t1
45; CHECK-NEXT:    addi t1, t1, 64
46; CHECK-NEXT:    vs8r.v v8, (t0)
47; CHECK-NEXT:    vs8r.v v8, (t1)
48; CHECK-NEXT:    sd t1, 0(sp)
49; CHECK-NEXT:    sd t0, 8(sp)
50; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
51; CHECK-NEXT:    vmv8r.v v16, v8
52; CHECK-NEXT:    call bar
53; CHECK-NEXT:    addi sp, sp, 16
54; CHECK-NEXT:    addi sp, s0, -96
55; CHECK-NEXT:    .cfi_def_cfa sp, 96
56; CHECK-NEXT:    ld ra, 88(sp) # 8-byte Folded Reload
57; CHECK-NEXT:    ld s0, 80(sp) # 8-byte Folded Reload
58; CHECK-NEXT:    ld s1, 72(sp) # 8-byte Folded Reload
59; CHECK-NEXT:    .cfi_restore ra
60; CHECK-NEXT:    .cfi_restore s0
61; CHECK-NEXT:    .cfi_restore s1
62; CHECK-NEXT:    addi sp, sp, 96
63; CHECK-NEXT:    .cfi_def_cfa_offset 0
64; CHECK-NEXT:    ret
65  %ret = call <vscale x 16 x i32> @bar(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, <vscale x 16 x i32> %x, <vscale x 16 x i32> %x, <vscale x 16 x i32> %x, <vscale x 16 x i32> %x)
66  ret <vscale x 16 x i32> %ret
67}
68