xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll (revision ff9af4c43ad71eeba2cabe99609cfaa0fd54c1d0)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,VLENUNKNOWN
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,VLENUNKNOWN
6; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
7; RUN:   -riscv-v-vector-bits-max=128 -verify-machineinstrs \
8; RUN:   | FileCheck %s --check-prefixes=CHECK,VLEN128
9; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
10; RUN:   -riscv-v-vector-bits-max=128 -verify-machineinstrs \
11; RUN:   | FileCheck %s --check-prefixes=CHECK,VLEN128
12
13declare iXLen @llvm.riscv.vsetvli.iXLen(iXLen, iXLen, iXLen)
14declare iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen, iXLen)
15
16define iXLen @test_vsetvli_e8m1(iXLen %avl) nounwind {
17; CHECK-LABEL: test_vsetvli_e8m1:
18; CHECK:       # %bb.0:
19; CHECK-NEXT:    vsetvli a0, a0, e8, m1, ta, ma
20; CHECK-NEXT:    ret
21  %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 0, iXLen 0)
22  ret iXLen %vl
23}
24
25define iXLen @test_vsetvli_e16mf4(iXLen %avl) nounwind {
26; CHECK-LABEL: test_vsetvli_e16mf4:
27; CHECK:       # %bb.0:
28; CHECK-NEXT:    vsetvli a0, a0, e16, mf4, ta, ma
29; CHECK-NEXT:    ret
30  %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 1, iXLen 6)
31  ret iXLen %vl
32}
33
34define iXLen @test_vsetvli_e64mf8(iXLen %avl) nounwind {
35; CHECK-LABEL: test_vsetvli_e64mf8:
36; CHECK:       # %bb.0:
37; CHECK-NEXT:    vsetvli a0, a0, e64, mf8, ta, ma
38; CHECK-NEXT:    ret
39  %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 3, iXLen 5)
40  ret iXLen %vl
41}
42
43define iXLen @test_vsetvli_e8mf2_zero_avl() nounwind {
44; CHECK-LABEL: test_vsetvli_e8mf2_zero_avl:
45; CHECK:       # %bb.0:
46; CHECK-NEXT:    vsetivli a0, 0, e8, mf2, ta, ma
47; CHECK-NEXT:    ret
48  %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 0, iXLen 0, iXLen 7)
49  ret iXLen %vl
50}
51
52define iXLen @test_vsetvli_e32mf8_zero_avl() nounwind {
53; CHECK-LABEL: test_vsetvli_e32mf8_zero_avl:
54; CHECK:       # %bb.0:
55; CHECK-NEXT:    vsetivli a0, 0, e16, mf4, ta, ma
56; CHECK-NEXT:    ret
57  %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 0, iXLen 1, iXLen 6)
58  ret iXLen %vl
59}
60
61define iXLen @test_vsetvlimax_e32m2() nounwind {
62; CHECK-LABEL: test_vsetvlimax_e32m2:
63; CHECK:       # %bb.0:
64; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
65; CHECK-NEXT:    ret
66  %vl = call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 2, iXLen 1)
67  ret iXLen %vl
68}
69
70define iXLen @test_vsetvlimax_e64m4() nounwind {
71; CHECK-LABEL: test_vsetvlimax_e64m4:
72; CHECK:       # %bb.0:
73; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
74; CHECK-NEXT:    ret
75  %vl = call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 3, iXLen 2)
76  ret iXLen %vl
77}
78
79define iXLen @test_vsetvlimax_e64m8() nounwind {
80; CHECK-LABEL: test_vsetvlimax_e64m8:
81; CHECK:       # %bb.0:
82; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
83; CHECK-NEXT:    ret
84  %vl = call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 3, iXLen 3)
85  ret iXLen %vl
86}
87
88; Check that we remove the intrinsic if it's unused.
89define void @test_vsetvli_e8m1_nouse(iXLen %avl) nounwind {
90; CHECK-LABEL: test_vsetvli_e8m1_nouse:
91; CHECK:       # %bb.0:
92; CHECK-NEXT:    ret
93  call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 0, iXLen 0)
94  ret void
95}
96
97define void @test_vsetvlimax_e32m2_nouse() nounwind {
98; CHECK-LABEL: test_vsetvlimax_e32m2_nouse:
99; CHECK:       # %bb.0:
100; CHECK-NEXT:    ret
101  call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 2, iXLen 1)
102  ret void
103}
104
105declare <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.iXLen(<vscale x 4 x i32>, ptr, iXLen)
106
107; Check that we remove the redundant vsetvli when followed by another operation
108define <vscale x 4 x i32> @redundant_vsetvli(iXLen %avl, ptr %ptr) nounwind {
109; CHECK-LABEL: redundant_vsetvli:
110; CHECK:       # %bb.0:
111; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
112; CHECK-NEXT:    vle32.v v8, (a1)
113; CHECK-NEXT:    ret
114  %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 2, iXLen 1)
115  %x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.iXLen(<vscale x 4 x i32> undef, ptr %ptr, iXLen %vl)
116  ret <vscale x 4 x i32> %x
117}
118
119; Check that we remove the repeated/redundant vsetvli when followed by another
120; operation
121; FIXME: We don't catch the second vsetvli because it has a use of its output.
122; We could replace it with the output of the first vsetvli.
123define <vscale x 4 x i32> @repeated_vsetvli(iXLen %avl, ptr %ptr) nounwind {
124; CHECK-LABEL: repeated_vsetvli:
125; CHECK:       # %bb.0:
126; CHECK-NEXT:    vsetvli a0, a0, e32, m2, ta, ma
127; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
128; CHECK-NEXT:    vle32.v v8, (a1)
129; CHECK-NEXT:    ret
130  %vl0 = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 2, iXLen 1)
131  %vl1 = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %vl0, iXLen 2, iXLen 1)
132  %x = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.iXLen(<vscale x 4 x i32> undef, ptr %ptr, iXLen %vl1)
133  ret <vscale x 4 x i32> %x
134}
135
136define iXLen @test_vsetvli_negone_e8m1(iXLen %avl) nounwind {
137; CHECK-LABEL: test_vsetvli_negone_e8m1:
138; CHECK:       # %bb.0:
139; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
140; CHECK-NEXT:    ret
141  %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen -1, iXLen 0, iXLen 0)
142  ret iXLen %vl
143}
144
145define iXLen @test_vsetvli_eqvlmax_e8m8(iXLen %avl) nounwind {
146; VLENUNKNOWN-LABEL: test_vsetvli_eqvlmax_e8m8:
147; VLENUNKNOWN:       # %bb.0:
148; VLENUNKNOWN-NEXT:    li a0, 128
149; VLENUNKNOWN-NEXT:    vsetvli a0, a0, e8, m8, ta, ma
150; VLENUNKNOWN-NEXT:    ret
151;
152; VLEN128-LABEL: test_vsetvli_eqvlmax_e8m8:
153; VLEN128:       # %bb.0:
154; VLEN128-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
155; VLEN128-NEXT:    ret
156  %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 128, iXLen 0, iXLen 3)
157  ret iXLen %vl
158}
159