xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll (revision 95d2d1cba0e1428718bbdce0504292f62b212920)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
4
5declare <2 x i8> @llvm.stepvector.v2i8()
6
7define <2 x i8> @stepvector_v2i8() {
8; CHECK-LABEL: stepvector_v2i8:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
11; CHECK-NEXT:    vid.v v8
12; CHECK-NEXT:    ret
13  %v = call <2 x i8> @llvm.stepvector.v2i8()
14  ret <2 x i8> %v
15}
16
17declare <3 x i8> @llvm.stepvector.v3i8()
18
19define <3 x i8> @stepvector_v3i8() {
20; CHECK-LABEL: stepvector_v3i8:
21; CHECK:       # %bb.0:
22; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
23; CHECK-NEXT:    vid.v v8
24; CHECK-NEXT:    ret
25  %v = call <3 x i8> @llvm.stepvector.v3i8()
26  ret <3 x i8> %v
27}
28
29declare <4 x i8> @llvm.stepvector.v4i8()
30
31define <4 x i8> @stepvector_v4i8() {
32; CHECK-LABEL: stepvector_v4i8:
33; CHECK:       # %bb.0:
34; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
35; CHECK-NEXT:    vid.v v8
36; CHECK-NEXT:    ret
37  %v = call <4 x i8> @llvm.stepvector.v4i8()
38  ret <4 x i8> %v
39}
40
41declare <8 x i8> @llvm.stepvector.v8i8()
42
43define <8 x i8> @stepvector_v8i8() {
44; CHECK-LABEL: stepvector_v8i8:
45; CHECK:       # %bb.0:
46; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
47; CHECK-NEXT:    vid.v v8
48; CHECK-NEXT:    ret
49  %v = call <8 x i8> @llvm.stepvector.v8i8()
50  ret <8 x i8> %v
51}
52
53declare <16 x i8> @llvm.stepvector.v16i8()
54
55define <16 x i8> @stepvector_v16i8() {
56; CHECK-LABEL: stepvector_v16i8:
57; CHECK:       # %bb.0:
58; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
59; CHECK-NEXT:    vid.v v8
60; CHECK-NEXT:    ret
61  %v = call <16 x i8> @llvm.stepvector.v16i8()
62  ret <16 x i8> %v
63}
64
65declare <2 x i16> @llvm.stepvector.v2i16()
66
67define <2 x i16> @stepvector_v2i16() {
68; CHECK-LABEL: stepvector_v2i16:
69; CHECK:       # %bb.0:
70; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
71; CHECK-NEXT:    vid.v v8
72; CHECK-NEXT:    ret
73  %v = call <2 x i16> @llvm.stepvector.v2i16()
74  ret <2 x i16> %v
75}
76
77declare <4 x i16> @llvm.stepvector.v4i16()
78
79define <4 x i16> @stepvector_v4i16() {
80; CHECK-LABEL: stepvector_v4i16:
81; CHECK:       # %bb.0:
82; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
83; CHECK-NEXT:    vid.v v8
84; CHECK-NEXT:    ret
85  %v = call <4 x i16> @llvm.stepvector.v4i16()
86  ret <4 x i16> %v
87}
88
89declare <8 x i16> @llvm.stepvector.v8i16()
90
91define <8 x i16> @stepvector_v8i16() {
92; CHECK-LABEL: stepvector_v8i16:
93; CHECK:       # %bb.0:
94; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
95; CHECK-NEXT:    vid.v v8
96; CHECK-NEXT:    ret
97  %v = call <8 x i16> @llvm.stepvector.v8i16()
98  ret <8 x i16> %v
99}
100
101declare <16 x i16> @llvm.stepvector.v16i16()
102
103define <16 x i16> @stepvector_v16i16() {
104; CHECK-LABEL: stepvector_v16i16:
105; CHECK:       # %bb.0:
106; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
107; CHECK-NEXT:    vid.v v8
108; CHECK-NEXT:    ret
109  %v = call <16 x i16> @llvm.stepvector.v16i16()
110  ret <16 x i16> %v
111}
112
113declare <2 x i32> @llvm.stepvector.v2i32()
114
115define <2 x i32> @stepvector_v2i32() {
116; CHECK-LABEL: stepvector_v2i32:
117; CHECK:       # %bb.0:
118; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
119; CHECK-NEXT:    vid.v v8
120; CHECK-NEXT:    ret
121  %v = call <2 x i32> @llvm.stepvector.v2i32()
122  ret <2 x i32> %v
123}
124
125declare <4 x i32> @llvm.stepvector.v4i32()
126
127define <4 x i32> @stepvector_v4i32() {
128; CHECK-LABEL: stepvector_v4i32:
129; CHECK:       # %bb.0:
130; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
131; CHECK-NEXT:    vid.v v8
132; CHECK-NEXT:    ret
133  %v = call <4 x i32> @llvm.stepvector.v4i32()
134  ret <4 x i32> %v
135}
136
137declare <8 x i32> @llvm.stepvector.v8i32()
138
139define <8 x i32> @stepvector_v8i32() {
140; CHECK-LABEL: stepvector_v8i32:
141; CHECK:       # %bb.0:
142; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
143; CHECK-NEXT:    vid.v v8
144; CHECK-NEXT:    ret
145  %v = call <8 x i32> @llvm.stepvector.v8i32()
146  ret <8 x i32> %v
147}
148
149declare <16 x i32> @llvm.stepvector.v16i32()
150
151define <16 x i32> @stepvector_v16i32() {
152; CHECK-LABEL: stepvector_v16i32:
153; CHECK:       # %bb.0:
154; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
155; CHECK-NEXT:    vid.v v8
156; CHECK-NEXT:    ret
157  %v = call <16 x i32> @llvm.stepvector.v16i32()
158  ret <16 x i32> %v
159}
160
161declare <2 x i64> @llvm.stepvector.v2i64()
162
163define <2 x i64> @stepvector_v2i64() {
164; RV32-LABEL: stepvector_v2i64:
165; RV32:       # %bb.0:
166; RV32-NEXT:    lui a0, 16
167; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
168; RV32-NEXT:    vmv.s.x v9, a0
169; RV32-NEXT:    vsext.vf4 v8, v9
170; RV32-NEXT:    ret
171;
172; RV64-LABEL: stepvector_v2i64:
173; RV64:       # %bb.0:
174; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
175; RV64-NEXT:    vid.v v8
176; RV64-NEXT:    ret
177  %v = call <2 x i64> @llvm.stepvector.v2i64()
178  ret <2 x i64> %v
179}
180
181declare <4 x i64> @llvm.stepvector.v4i64()
182
183define <4 x i64> @stepvector_v4i64() {
184; RV32-LABEL: stepvector_v4i64:
185; RV32:       # %bb.0:
186; RV32-NEXT:    lui a0, %hi(.LCPI14_0)
187; RV32-NEXT:    addi a0, a0, %lo(.LCPI14_0)
188; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
189; RV32-NEXT:    vle8.v v10, (a0)
190; RV32-NEXT:    vsext.vf4 v8, v10
191; RV32-NEXT:    ret
192;
193; RV64-LABEL: stepvector_v4i64:
194; RV64:       # %bb.0:
195; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
196; RV64-NEXT:    vid.v v8
197; RV64-NEXT:    ret
198  %v = call <4 x i64> @llvm.stepvector.v4i64()
199  ret <4 x i64> %v
200}
201
202declare <8 x i64> @llvm.stepvector.v8i64()
203
204define <8 x i64> @stepvector_v8i64() {
205; RV32-LABEL: stepvector_v8i64:
206; RV32:       # %bb.0:
207; RV32-NEXT:    lui a0, %hi(.LCPI15_0)
208; RV32-NEXT:    addi a0, a0, %lo(.LCPI15_0)
209; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
210; RV32-NEXT:    vle8.v v12, (a0)
211; RV32-NEXT:    vsext.vf4 v8, v12
212; RV32-NEXT:    ret
213;
214; RV64-LABEL: stepvector_v8i64:
215; RV64:       # %bb.0:
216; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
217; RV64-NEXT:    vid.v v8
218; RV64-NEXT:    ret
219  %v = call <8 x i64> @llvm.stepvector.v8i64()
220  ret <8 x i64> %v
221}
222
223declare <16 x i64> @llvm.stepvector.v16i64()
224
225define <16 x i64> @stepvector_v16i64() {
226; RV32-LABEL: stepvector_v16i64:
227; RV32:       # %bb.0:
228; RV32-NEXT:    li a0, 32
229; RV32-NEXT:    lui a1, %hi(.LCPI16_0)
230; RV32-NEXT:    addi a1, a1, %lo(.LCPI16_0)
231; RV32-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
232; RV32-NEXT:    vle8.v v16, (a1)
233; RV32-NEXT:    vsext.vf4 v8, v16
234; RV32-NEXT:    ret
235;
236; RV64-LABEL: stepvector_v16i64:
237; RV64:       # %bb.0:
238; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
239; RV64-NEXT:    vid.v v8
240; RV64-NEXT:    ret
241  %v = call <16 x i64> @llvm.stepvector.v16i64()
242  ret <16 x i64> %v
243}
244