xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll (revision 770be43f6782dab84d215d01b37396d63a9c2b6e)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple riscv32 -mattr=+m,+d,+zfh,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple riscv64 -mattr=+m,+d,+zfh,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s
4
5define <vscale x 4 x i32> @extract_nxv8i32_nxv4i32_0(<vscale x 8 x i32> %vec) {
6; CHECK-LABEL: extract_nxv8i32_nxv4i32_0:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    ret
9  %c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, i64 0)
10  ret <vscale x 4 x i32> %c
11}
12
13define <vscale x 4 x i32> @extract_nxv8i32_nxv4i32_4(<vscale x 8 x i32> %vec) {
14; CHECK-LABEL: extract_nxv8i32_nxv4i32_4:
15; CHECK:       # %bb.0:
16; CHECK-NEXT:    vmv2r.v v8, v10
17; CHECK-NEXT:    ret
18  %c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, i64 4)
19  ret <vscale x 4 x i32> %c
20}
21
22define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_0(<vscale x 8 x i32> %vec) {
23; CHECK-LABEL: extract_nxv8i32_nxv2i32_0:
24; CHECK:       # %bb.0:
25; CHECK-NEXT:    ret
26  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 0)
27  ret <vscale x 2 x i32> %c
28}
29
30define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_2(<vscale x 8 x i32> %vec) {
31; CHECK-LABEL: extract_nxv8i32_nxv2i32_2:
32; CHECK:       # %bb.0:
33; CHECK-NEXT:    vmv1r.v v8, v9
34; CHECK-NEXT:    ret
35  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 2)
36  ret <vscale x 2 x i32> %c
37}
38
39define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_4(<vscale x 8 x i32> %vec) {
40; CHECK-LABEL: extract_nxv8i32_nxv2i32_4:
41; CHECK:       # %bb.0:
42; CHECK-NEXT:    vmv1r.v v8, v10
43; CHECK-NEXT:    ret
44  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 4)
45  ret <vscale x 2 x i32> %c
46}
47
48define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_6(<vscale x 8 x i32> %vec) {
49; CHECK-LABEL: extract_nxv8i32_nxv2i32_6:
50; CHECK:       # %bb.0:
51; CHECK-NEXT:    vmv1r.v v8, v11
52; CHECK-NEXT:    ret
53  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 6)
54  ret <vscale x 2 x i32> %c
55}
56
57define <vscale x 8 x i32> @extract_nxv16i32_nxv8i32_0(<vscale x 16 x i32> %vec) {
58; CHECK-LABEL: extract_nxv16i32_nxv8i32_0:
59; CHECK:       # %bb.0:
60; CHECK-NEXT:    ret
61  %c = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
62  ret <vscale x 8 x i32> %c
63}
64
65define <vscale x 8 x i32> @extract_nxv16i32_nxv8i32_8(<vscale x 16 x i32> %vec) {
66; CHECK-LABEL: extract_nxv16i32_nxv8i32_8:
67; CHECK:       # %bb.0:
68; CHECK-NEXT:    vmv4r.v v8, v12
69; CHECK-NEXT:    ret
70  %c = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
71  ret <vscale x 8 x i32> %c
72}
73
74define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_0(<vscale x 16 x i32> %vec) {
75; CHECK-LABEL: extract_nxv16i32_nxv4i32_0:
76; CHECK:       # %bb.0:
77; CHECK-NEXT:    ret
78  %c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
79  ret <vscale x 4 x i32> %c
80}
81
82define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_4(<vscale x 16 x i32> %vec) {
83; CHECK-LABEL: extract_nxv16i32_nxv4i32_4:
84; CHECK:       # %bb.0:
85; CHECK-NEXT:    vmv2r.v v8, v10
86; CHECK-NEXT:    ret
87  %c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 4)
88  ret <vscale x 4 x i32> %c
89}
90
91define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_8(<vscale x 16 x i32> %vec) {
92; CHECK-LABEL: extract_nxv16i32_nxv4i32_8:
93; CHECK:       # %bb.0:
94; CHECK-NEXT:    vmv2r.v v8, v12
95; CHECK-NEXT:    ret
96  %c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
97  ret <vscale x 4 x i32> %c
98}
99
100define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_12(<vscale x 16 x i32> %vec) {
101; CHECK-LABEL: extract_nxv16i32_nxv4i32_12:
102; CHECK:       # %bb.0:
103; CHECK-NEXT:    vmv2r.v v8, v14
104; CHECK-NEXT:    ret
105  %c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 12)
106  ret <vscale x 4 x i32> %c
107}
108
109define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_0(<vscale x 16 x i32> %vec) {
110; CHECK-LABEL: extract_nxv16i32_nxv2i32_0:
111; CHECK:       # %bb.0:
112; CHECK-NEXT:    ret
113  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
114  ret <vscale x 2 x i32> %c
115}
116
117define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_2(<vscale x 16 x i32> %vec) {
118; CHECK-LABEL: extract_nxv16i32_nxv2i32_2:
119; CHECK:       # %bb.0:
120; CHECK-NEXT:    vmv1r.v v8, v9
121; CHECK-NEXT:    ret
122  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 2)
123  ret <vscale x 2 x i32> %c
124}
125
126define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_4(<vscale x 16 x i32> %vec) {
127; CHECK-LABEL: extract_nxv16i32_nxv2i32_4:
128; CHECK:       # %bb.0:
129; CHECK-NEXT:    vmv1r.v v8, v10
130; CHECK-NEXT:    ret
131  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 4)
132  ret <vscale x 2 x i32> %c
133}
134
135define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_6(<vscale x 16 x i32> %vec) {
136; CHECK-LABEL: extract_nxv16i32_nxv2i32_6:
137; CHECK:       # %bb.0:
138; CHECK-NEXT:    vmv1r.v v8, v11
139; CHECK-NEXT:    ret
140  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 6)
141  ret <vscale x 2 x i32> %c
142}
143
144define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_8(<vscale x 16 x i32> %vec) {
145; CHECK-LABEL: extract_nxv16i32_nxv2i32_8:
146; CHECK:       # %bb.0:
147; CHECK-NEXT:    vmv1r.v v8, v12
148; CHECK-NEXT:    ret
149  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
150  ret <vscale x 2 x i32> %c
151}
152
153define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_10(<vscale x 16 x i32> %vec) {
154; CHECK-LABEL: extract_nxv16i32_nxv2i32_10:
155; CHECK:       # %bb.0:
156; CHECK-NEXT:    vmv1r.v v8, v13
157; CHECK-NEXT:    ret
158  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 10)
159  ret <vscale x 2 x i32> %c
160}
161
162define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_12(<vscale x 16 x i32> %vec) {
163; CHECK-LABEL: extract_nxv16i32_nxv2i32_12:
164; CHECK:       # %bb.0:
165; CHECK-NEXT:    vmv1r.v v8, v14
166; CHECK-NEXT:    ret
167  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 12)
168  ret <vscale x 2 x i32> %c
169}
170
171define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_14(<vscale x 16 x i32> %vec) {
172; CHECK-LABEL: extract_nxv16i32_nxv2i32_14:
173; CHECK:       # %bb.0:
174; CHECK-NEXT:    vmv1r.v v8, v15
175; CHECK-NEXT:    ret
176  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 14)
177  ret <vscale x 2 x i32> %c
178}
179
180define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_0(<vscale x 16 x i32> %vec) {
181; CHECK-LABEL: extract_nxv16i32_nxv1i32_0:
182; CHECK:       # %bb.0:
183; CHECK-NEXT:    ret
184  %c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
185  ret <vscale x 1 x i32> %c
186}
187
188define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_1(<vscale x 16 x i32> %vec) {
189; CHECK-LABEL: extract_nxv16i32_nxv1i32_1:
190; CHECK:       # %bb.0:
191; CHECK-NEXT:    csrr a0, vlenb
192; CHECK-NEXT:    srli a0, a0, 3
193; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
194; CHECK-NEXT:    vslidedown.vx v8, v8, a0
195; CHECK-NEXT:    ret
196  %c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 1)
197  ret <vscale x 1 x i32> %c
198}
199
200define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_3(<vscale x 16 x i32> %vec) {
201; CHECK-LABEL: extract_nxv16i32_nxv1i32_3:
202; CHECK:       # %bb.0:
203; CHECK-NEXT:    csrr a0, vlenb
204; CHECK-NEXT:    srli a0, a0, 3
205; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
206; CHECK-NEXT:    vslidedown.vx v8, v9, a0
207; CHECK-NEXT:    ret
208  %c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 3)
209  ret <vscale x 1 x i32> %c
210}
211
212define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_15(<vscale x 16 x i32> %vec) {
213; CHECK-LABEL: extract_nxv16i32_nxv1i32_15:
214; CHECK:       # %bb.0:
215; CHECK-NEXT:    csrr a0, vlenb
216; CHECK-NEXT:    srli a0, a0, 3
217; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
218; CHECK-NEXT:    vslidedown.vx v8, v15, a0
219; CHECK-NEXT:    ret
220  %c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 15)
221  ret <vscale x 1 x i32> %c
222}
223
224define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_2(<vscale x 16 x i32> %vec) {
225; CHECK-LABEL: extract_nxv16i32_nxv1i32_2:
226; CHECK:       # %bb.0:
227; CHECK-NEXT:    vmv1r.v v8, v9
228; CHECK-NEXT:    ret
229  %c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 2)
230  ret <vscale x 1 x i32> %c
231}
232
233define <vscale x 1 x i32> @extract_nxv2i32_nxv1i32_0(<vscale x 2 x i32> %vec) {
234; CHECK-LABEL: extract_nxv2i32_nxv1i32_0:
235; CHECK:       # %bb.0:
236; CHECK-NEXT:    ret
237  %c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv2i32(<vscale x 2 x i32> %vec, i64 0)
238  ret <vscale x 1 x i32> %c
239}
240
241define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_0(<vscale x 32 x i8> %vec) {
242; CHECK-LABEL: extract_nxv32i8_nxv2i8_0:
243; CHECK:       # %bb.0:
244; CHECK-NEXT:    ret
245  %c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 0)
246  ret <vscale x 2 x i8> %c
247}
248
249define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_2(<vscale x 32 x i8> %vec) {
250; CHECK-LABEL: extract_nxv32i8_nxv2i8_2:
251; CHECK:       # %bb.0:
252; CHECK-NEXT:    csrr a0, vlenb
253; CHECK-NEXT:    srli a0, a0, 2
254; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
255; CHECK-NEXT:    vslidedown.vx v8, v8, a0
256; CHECK-NEXT:    ret
257  %c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 2)
258  ret <vscale x 2 x i8> %c
259}
260
261define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_4(<vscale x 32 x i8> %vec) {
262; CHECK-LABEL: extract_nxv32i8_nxv2i8_4:
263; CHECK:       # %bb.0:
264; CHECK-NEXT:    csrr a0, vlenb
265; CHECK-NEXT:    srli a0, a0, 1
266; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
267; CHECK-NEXT:    vslidedown.vx v8, v8, a0
268; CHECK-NEXT:    ret
269  %c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 4)
270  ret <vscale x 2 x i8> %c
271}
272
273define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_6(<vscale x 32 x i8> %vec) {
274; CHECK-LABEL: extract_nxv32i8_nxv2i8_6:
275; CHECK:       # %bb.0:
276; CHECK-NEXT:    csrr a0, vlenb
277; CHECK-NEXT:    srli a0, a0, 3
278; CHECK-NEXT:    li a1, 6
279; CHECK-NEXT:    mul a0, a0, a1
280; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
281; CHECK-NEXT:    vslidedown.vx v8, v8, a0
282; CHECK-NEXT:    ret
283  %c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 6)
284  ret <vscale x 2 x i8> %c
285}
286
287define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_8(<vscale x 32 x i8> %vec) {
288; CHECK-LABEL: extract_nxv32i8_nxv2i8_8:
289; CHECK:       # %bb.0:
290; CHECK-NEXT:    vmv1r.v v8, v9
291; CHECK-NEXT:    ret
292  %c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 8)
293  ret <vscale x 2 x i8> %c
294}
295
296define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_22(<vscale x 32 x i8> %vec) {
297; CHECK-LABEL: extract_nxv32i8_nxv2i8_22:
298; CHECK:       # %bb.0:
299; CHECK-NEXT:    csrr a0, vlenb
300; CHECK-NEXT:    srli a0, a0, 3
301; CHECK-NEXT:    li a1, 6
302; CHECK-NEXT:    mul a0, a0, a1
303; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
304; CHECK-NEXT:    vslidedown.vx v8, v10, a0
305; CHECK-NEXT:    ret
306  %c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 22)
307  ret <vscale x 2 x i8> %c
308}
309
310define <vscale x 1 x i8> @extract_nxv8i8_nxv1i8_7(<vscale x 8 x i8> %vec) {
311; CHECK-LABEL: extract_nxv8i8_nxv1i8_7:
312; CHECK:       # %bb.0:
313; CHECK-NEXT:    csrr a0, vlenb
314; CHECK-NEXT:    srli a1, a0, 3
315; CHECK-NEXT:    sub a0, a0, a1
316; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
317; CHECK-NEXT:    vslidedown.vx v8, v8, a0
318; CHECK-NEXT:    ret
319  %c = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> %vec, i64 7)
320  ret <vscale x 1 x i8> %c
321}
322
323define <vscale x 1 x i8> @extract_nxv4i8_nxv1i8_3(<vscale x 4 x i8> %vec) {
324; CHECK-LABEL: extract_nxv4i8_nxv1i8_3:
325; CHECK:       # %bb.0:
326; CHECK-NEXT:    csrr a0, vlenb
327; CHECK-NEXT:    srli a0, a0, 3
328; CHECK-NEXT:    slli a1, a0, 1
329; CHECK-NEXT:    add a0, a1, a0
330; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
331; CHECK-NEXT:    vslidedown.vx v8, v8, a0
332; CHECK-NEXT:    ret
333  %c = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, i64 3)
334  ret <vscale x 1 x i8> %c
335}
336
337define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_0(<vscale x 16 x half> %vec) {
338; CHECK-LABEL: extract_nxv2f16_nxv16f16_0:
339; CHECK:       # %bb.0:
340; CHECK-NEXT:    ret
341  %c = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 0)
342  ret <vscale x 2 x half> %c
343}
344
345define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_2(<vscale x 16 x half> %vec) {
346; CHECK-LABEL: extract_nxv2f16_nxv16f16_2:
347; CHECK:       # %bb.0:
348; CHECK-NEXT:    csrr a0, vlenb
349; CHECK-NEXT:    srli a0, a0, 2
350; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
351; CHECK-NEXT:    vslidedown.vx v8, v8, a0
352; CHECK-NEXT:    ret
353  %c = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 2)
354  ret <vscale x 2 x half> %c
355}
356
357define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_4(<vscale x 16 x half> %vec) {
358; CHECK-LABEL: extract_nxv2f16_nxv16f16_4:
359; CHECK:       # %bb.0:
360; CHECK-NEXT:    vmv1r.v v8, v9
361; CHECK-NEXT:    ret
362  %c = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 4)
363  ret <vscale x 2 x half> %c
364}
365
366define <vscale x 8 x i1> @extract_nxv64i1_nxv8i1_0(<vscale x 64 x i1> %mask) {
367; CHECK-LABEL: extract_nxv64i1_nxv8i1_0:
368; CHECK:       # %bb.0:
369; CHECK-NEXT:    ret
370  %c = call <vscale x 8 x i1> @llvm.vector.extract.nxv8i1(<vscale x 64 x i1> %mask, i64 0)
371  ret <vscale x 8 x i1> %c
372}
373
374define <vscale x 8 x i1> @extract_nxv64i1_nxv8i1_8(<vscale x 64 x i1> %mask) {
375; CHECK-LABEL: extract_nxv64i1_nxv8i1_8:
376; CHECK:       # %bb.0:
377; CHECK-NEXT:    csrr a0, vlenb
378; CHECK-NEXT:    srli a0, a0, 3
379; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
380; CHECK-NEXT:    vslidedown.vx v0, v0, a0
381; CHECK-NEXT:    ret
382  %c = call <vscale x 8 x i1> @llvm.vector.extract.nxv8i1(<vscale x 64 x i1> %mask, i64 8)
383  ret <vscale x 8 x i1> %c
384}
385
386define <vscale x 2 x i1> @extract_nxv64i1_nxv2i1_0(<vscale x 64 x i1> %mask) {
387; CHECK-LABEL: extract_nxv64i1_nxv2i1_0:
388; CHECK:       # %bb.0:
389; CHECK-NEXT:    ret
390  %c = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1(<vscale x 64 x i1> %mask, i64 0)
391  ret <vscale x 2 x i1> %c
392}
393
394define <vscale x 2 x i1> @extract_nxv64i1_nxv2i1_2(<vscale x 64 x i1> %mask) {
395; CHECK-LABEL: extract_nxv64i1_nxv2i1_2:
396; CHECK:       # %bb.0:
397; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
398; CHECK-NEXT:    vmv.v.i v8, 0
399; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
400; CHECK-NEXT:    csrr a0, vlenb
401; CHECK-NEXT:    srli a0, a0, 2
402; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
403; CHECK-NEXT:    vslidedown.vx v8, v8, a0
404; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
405; CHECK-NEXT:    vmsne.vi v0, v8, 0
406; CHECK-NEXT:    ret
407  %c = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1(<vscale x 64 x i1> %mask, i64 2)
408  ret <vscale x 2 x i1> %c
409}
410
411define <vscale x 4 x i1> @extract_nxv4i1_nxv32i1_0(<vscale x 32 x i1> %x) {
412; CHECK-LABEL: extract_nxv4i1_nxv32i1_0:
413; CHECK:       # %bb.0:
414; CHECK-NEXT:    ret
415  %c = call <vscale x 4 x i1> @llvm.vector.extract.nxv4i1(<vscale x 32 x i1> %x, i64 0)
416  ret <vscale x 4 x i1> %c
417}
418
419define <vscale x 4 x i1> @extract_nxv4i1_nxv32i1_4(<vscale x 32 x i1> %x) {
420; CHECK-LABEL: extract_nxv4i1_nxv32i1_4:
421; CHECK:       # %bb.0:
422; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
423; CHECK-NEXT:    vmv.v.i v8, 0
424; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
425; CHECK-NEXT:    csrr a0, vlenb
426; CHECK-NEXT:    srli a0, a0, 1
427; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
428; CHECK-NEXT:    vslidedown.vx v8, v8, a0
429; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
430; CHECK-NEXT:    vmsne.vi v0, v8, 0
431; CHECK-NEXT:    ret
432  %c = call <vscale x 4 x i1> @llvm.vector.extract.nxv4i1(<vscale x 32 x i1> %x, i64 4)
433  ret <vscale x 4 x i1> %c
434}
435
436define <vscale x 16 x i1> @extract_nxv16i1_nxv32i1_0(<vscale x 32 x i1> %x) {
437; CHECK-LABEL: extract_nxv16i1_nxv32i1_0:
438; CHECK:       # %bb.0:
439; CHECK-NEXT:    ret
440  %c = call <vscale x 16 x i1> @llvm.vector.extract.nxv16i1(<vscale x 32 x i1> %x, i64 0)
441  ret <vscale x 16 x i1> %c
442}
443
444define <vscale x 16 x i1> @extract_nxv16i1_nxv32i1_16(<vscale x 32 x i1> %x) {
445; CHECK-LABEL: extract_nxv16i1_nxv32i1_16:
446; CHECK:       # %bb.0:
447; CHECK-NEXT:    csrr a0, vlenb
448; CHECK-NEXT:    srli a0, a0, 2
449; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
450; CHECK-NEXT:    vslidedown.vx v0, v0, a0
451; CHECK-NEXT:    ret
452  %c = call <vscale x 16 x i1> @llvm.vector.extract.nxv16i1(<vscale x 32 x i1> %x, i64 16)
453  ret <vscale x 16 x i1> %c
454}
455
456;
457; Extract f16 vector that needs widening from one that needs widening.
458;
459define <vscale x 6 x half> @extract_nxv6f16_nxv12f16_0(<vscale x 12 x half> %in) {
460; CHECK-LABEL: extract_nxv6f16_nxv12f16_0:
461; CHECK:       # %bb.0:
462; CHECK-NEXT:    ret
463  %res = call <vscale x 6 x half> @llvm.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half> %in, i64 0)
464  ret <vscale x 6 x half> %res
465}
466
467define <vscale x 6 x half> @extract_nxv6f16_nxv12f16_6(<vscale x 12 x half> %in) {
468; CHECK-LABEL: extract_nxv6f16_nxv12f16_6:
469; CHECK:       # %bb.0:
470; CHECK-NEXT:    csrr a0, vlenb
471; CHECK-NEXT:    srli a0, a0, 2
472; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
473; CHECK-NEXT:    vslidedown.vx v13, v10, a0
474; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
475; CHECK-NEXT:    vslidedown.vx v12, v9, a0
476; CHECK-NEXT:    add a1, a0, a0
477; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
478; CHECK-NEXT:    vslideup.vx v12, v10, a0
479; CHECK-NEXT:    vmv2r.v v8, v12
480; CHECK-NEXT:    ret
481  %res = call <vscale x 6 x half> @llvm.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half> %in, i64 6)
482  ret <vscale x 6 x half> %res
483}
484
485declare <vscale x 6 x half> @llvm.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half>, i64)
486
487declare <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, i64 %idx)
488declare <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> %vec, i64 %idx)
489
490declare <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 %idx)
491
492declare <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv2i32(<vscale x 2 x i32> %vec, i64 %idx)
493
494declare <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 %idx)
495declare <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, i64 %idx)
496
497declare <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
498declare <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
499declare <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
500declare <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
501
502declare <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 %idx)
503
504declare <vscale x 4 x i1> @llvm.vector.extract.nxv4i1(<vscale x 32 x i1> %vec, i64 %idx)
505declare <vscale x 16 x i1> @llvm.vector.extract.nxv16i1(<vscale x 32 x i1> %vec, i64 %idx)
506
507declare <vscale x 2 x i1> @llvm.vector.extract.nxv2i1(<vscale x 64 x i1> %vec, i64 %idx)
508declare <vscale x 8 x i1> @llvm.vector.extract.nxv8i1(<vscale x 64 x i1> %vec, i64 %idx)
509