xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll (revision ab393cee9dffdb225b94badcb9c21f80b156b74b)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+zve64x -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefixes=RV32,RV32-ZVE64
4; RUN: llc -mtriple=riscv64 -mattr=+zve64x -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefixes=RV64,RV64-ZVE64
6; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
7; RUN:   | FileCheck %s --check-prefixes=RV32,RV32-V
8; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
9; RUN:   | FileCheck %s --check-prefixes=RV64,RV64-V
10
11define ptr @scalar_stack_align16() nounwind {
12; RV32-ZVE64-LABEL: scalar_stack_align16:
13; RV32-ZVE64:       # %bb.0:
14; RV32-ZVE64-NEXT:    addi sp, sp, -48
15; RV32-ZVE64-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
16; RV32-ZVE64-NEXT:    csrr a0, vlenb
17; RV32-ZVE64-NEXT:    slli a0, a0, 1
18; RV32-ZVE64-NEXT:    sub sp, sp, a0
19; RV32-ZVE64-NEXT:    addi a0, sp, 32
20; RV32-ZVE64-NEXT:    call extern
21; RV32-ZVE64-NEXT:    addi a0, sp, 16
22; RV32-ZVE64-NEXT:    csrr a1, vlenb
23; RV32-ZVE64-NEXT:    slli a1, a1, 1
24; RV32-ZVE64-NEXT:    add sp, sp, a1
25; RV32-ZVE64-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
26; RV32-ZVE64-NEXT:    addi sp, sp, 48
27; RV32-ZVE64-NEXT:    ret
28;
29; RV64-ZVE64-LABEL: scalar_stack_align16:
30; RV64-ZVE64:       # %bb.0:
31; RV64-ZVE64-NEXT:    addi sp, sp, -48
32; RV64-ZVE64-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
33; RV64-ZVE64-NEXT:    csrr a0, vlenb
34; RV64-ZVE64-NEXT:    slli a0, a0, 1
35; RV64-ZVE64-NEXT:    sub sp, sp, a0
36; RV64-ZVE64-NEXT:    addi a0, sp, 32
37; RV64-ZVE64-NEXT:    call extern
38; RV64-ZVE64-NEXT:    addi a0, sp, 16
39; RV64-ZVE64-NEXT:    csrr a1, vlenb
40; RV64-ZVE64-NEXT:    slli a1, a1, 1
41; RV64-ZVE64-NEXT:    add sp, sp, a1
42; RV64-ZVE64-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
43; RV64-ZVE64-NEXT:    addi sp, sp, 48
44; RV64-ZVE64-NEXT:    ret
45;
46; RV32-V-LABEL: scalar_stack_align16:
47; RV32-V:       # %bb.0:
48; RV32-V-NEXT:    addi sp, sp, -48
49; RV32-V-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
50; RV32-V-NEXT:    csrr a0, vlenb
51; RV32-V-NEXT:    sub sp, sp, a0
52; RV32-V-NEXT:    addi a0, sp, 32
53; RV32-V-NEXT:    call extern
54; RV32-V-NEXT:    addi a0, sp, 16
55; RV32-V-NEXT:    csrr a1, vlenb
56; RV32-V-NEXT:    add sp, sp, a1
57; RV32-V-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
58; RV32-V-NEXT:    addi sp, sp, 48
59; RV32-V-NEXT:    ret
60;
61; RV64-V-LABEL: scalar_stack_align16:
62; RV64-V:       # %bb.0:
63; RV64-V-NEXT:    addi sp, sp, -48
64; RV64-V-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
65; RV64-V-NEXT:    csrr a0, vlenb
66; RV64-V-NEXT:    sub sp, sp, a0
67; RV64-V-NEXT:    addi a0, sp, 32
68; RV64-V-NEXT:    call extern
69; RV64-V-NEXT:    addi a0, sp, 16
70; RV64-V-NEXT:    csrr a1, vlenb
71; RV64-V-NEXT:    add sp, sp, a1
72; RV64-V-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
73; RV64-V-NEXT:    addi sp, sp, 48
74; RV64-V-NEXT:    ret
75  %a = alloca <vscale x 2 x i32>
76  %c = alloca i64, align 16
77  call void @extern(ptr %a)
78  ret ptr %c
79}
80
81declare void @extern(ptr)
82;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
83; RV32: {{.*}}
84; RV64: {{.*}}
85