xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV64NOM
4; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV64M
6
7define signext i8 @extractelt_nxv1i8_0(<vscale x 1 x i8> %v) {
8; CHECK-LABEL: extractelt_nxv1i8_0:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
11; CHECK-NEXT:    vmv.x.s a0, v8
12; CHECK-NEXT:    ret
13  %r = extractelement <vscale x 1 x i8> %v, i32 0
14  ret i8 %r
15}
16
17define signext i8 @extractelt_nxv1i8_imm(<vscale x 1 x i8> %v) {
18; CHECK-LABEL: extractelt_nxv1i8_imm:
19; CHECK:       # %bb.0:
20; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
21; CHECK-NEXT:    vslidedown.vi v8, v8, 2
22; CHECK-NEXT:    vmv.x.s a0, v8
23; CHECK-NEXT:    ret
24  %r = extractelement <vscale x 1 x i8> %v, i32 2
25  ret i8 %r
26}
27
28define signext i8 @extractelt_nxv1i8_idx(<vscale x 1 x i8> %v, i32 zeroext %idx) {
29; CHECK-LABEL: extractelt_nxv1i8_idx:
30; CHECK:       # %bb.0:
31; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
32; CHECK-NEXT:    vslidedown.vx v8, v8, a0
33; CHECK-NEXT:    vmv.x.s a0, v8
34; CHECK-NEXT:    ret
35  %r = extractelement <vscale x 1 x i8> %v, i32 %idx
36  ret i8 %r
37}
38
39define signext i8 @extractelt_nxv2i8_0(<vscale x 2 x i8> %v) {
40; CHECK-LABEL: extractelt_nxv2i8_0:
41; CHECK:       # %bb.0:
42; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
43; CHECK-NEXT:    vmv.x.s a0, v8
44; CHECK-NEXT:    ret
45  %r = extractelement <vscale x 2 x i8> %v, i32 0
46  ret i8 %r
47}
48
49define signext i8 @extractelt_nxv2i8_imm(<vscale x 2 x i8> %v) {
50; CHECK-LABEL: extractelt_nxv2i8_imm:
51; CHECK:       # %bb.0:
52; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
53; CHECK-NEXT:    vslidedown.vi v8, v8, 2
54; CHECK-NEXT:    vmv.x.s a0, v8
55; CHECK-NEXT:    ret
56  %r = extractelement <vscale x 2 x i8> %v, i32 2
57  ret i8 %r
58}
59
60define signext i8 @extractelt_nxv2i8_idx(<vscale x 2 x i8> %v, i32 zeroext %idx) {
61; CHECK-LABEL: extractelt_nxv2i8_idx:
62; CHECK:       # %bb.0:
63; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
64; CHECK-NEXT:    vslidedown.vx v8, v8, a0
65; CHECK-NEXT:    vmv.x.s a0, v8
66; CHECK-NEXT:    ret
67  %r = extractelement <vscale x 2 x i8> %v, i32 %idx
68  ret i8 %r
69}
70
71define signext i8 @extractelt_nxv4i8_0(<vscale x 4 x i8> %v) {
72; CHECK-LABEL: extractelt_nxv4i8_0:
73; CHECK:       # %bb.0:
74; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
75; CHECK-NEXT:    vmv.x.s a0, v8
76; CHECK-NEXT:    ret
77  %r = extractelement <vscale x 4 x i8> %v, i32 0
78  ret i8 %r
79}
80
81define signext i8 @extractelt_nxv4i8_imm(<vscale x 4 x i8> %v) {
82; CHECK-LABEL: extractelt_nxv4i8_imm:
83; CHECK:       # %bb.0:
84; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
85; CHECK-NEXT:    vslidedown.vi v8, v8, 2
86; CHECK-NEXT:    vmv.x.s a0, v8
87; CHECK-NEXT:    ret
88  %r = extractelement <vscale x 4 x i8> %v, i32 2
89  ret i8 %r
90}
91
92define signext i8 @extractelt_nxv4i8_idx(<vscale x 4 x i8> %v, i32 zeroext %idx) {
93; CHECK-LABEL: extractelt_nxv4i8_idx:
94; CHECK:       # %bb.0:
95; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
96; CHECK-NEXT:    vslidedown.vx v8, v8, a0
97; CHECK-NEXT:    vmv.x.s a0, v8
98; CHECK-NEXT:    ret
99  %r = extractelement <vscale x 4 x i8> %v, i32 %idx
100  ret i8 %r
101}
102
103define signext i8 @extractelt_nxv8i8_0(<vscale x 8 x i8> %v) {
104; CHECK-LABEL: extractelt_nxv8i8_0:
105; CHECK:       # %bb.0:
106; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
107; CHECK-NEXT:    vmv.x.s a0, v8
108; CHECK-NEXT:    ret
109  %r = extractelement <vscale x 8 x i8> %v, i32 0
110  ret i8 %r
111}
112
113define signext i8 @extractelt_nxv8i8_imm(<vscale x 8 x i8> %v) {
114; CHECK-LABEL: extractelt_nxv8i8_imm:
115; CHECK:       # %bb.0:
116; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
117; CHECK-NEXT:    vslidedown.vi v8, v8, 2
118; CHECK-NEXT:    vmv.x.s a0, v8
119; CHECK-NEXT:    ret
120  %r = extractelement <vscale x 8 x i8> %v, i32 2
121  ret i8 %r
122}
123
124define signext i8 @extractelt_nxv8i8_idx(<vscale x 8 x i8> %v, i32 zeroext %idx) {
125; CHECK-LABEL: extractelt_nxv8i8_idx:
126; CHECK:       # %bb.0:
127; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
128; CHECK-NEXT:    vslidedown.vx v8, v8, a0
129; CHECK-NEXT:    vmv.x.s a0, v8
130; CHECK-NEXT:    ret
131  %r = extractelement <vscale x 8 x i8> %v, i32 %idx
132  ret i8 %r
133}
134
135define signext i8 @extractelt_nxv16i8_0(<vscale x 16 x i8> %v) {
136; CHECK-LABEL: extractelt_nxv16i8_0:
137; CHECK:       # %bb.0:
138; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
139; CHECK-NEXT:    vmv.x.s a0, v8
140; CHECK-NEXT:    ret
141  %r = extractelement <vscale x 16 x i8> %v, i32 0
142  ret i8 %r
143}
144
145define signext i8 @extractelt_nxv16i8_imm(<vscale x 16 x i8> %v) {
146; CHECK-LABEL: extractelt_nxv16i8_imm:
147; CHECK:       # %bb.0:
148; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
149; CHECK-NEXT:    vslidedown.vi v8, v8, 2
150; CHECK-NEXT:    vmv.x.s a0, v8
151; CHECK-NEXT:    ret
152  %r = extractelement <vscale x 16 x i8> %v, i32 2
153  ret i8 %r
154}
155
156define signext i8 @extractelt_nxv16i8_idx(<vscale x 16 x i8> %v, i32 zeroext %idx) {
157; CHECK-LABEL: extractelt_nxv16i8_idx:
158; CHECK:       # %bb.0:
159; CHECK-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
160; CHECK-NEXT:    vslidedown.vx v8, v8, a0
161; CHECK-NEXT:    vmv.x.s a0, v8
162; CHECK-NEXT:    ret
163  %r = extractelement <vscale x 16 x i8> %v, i32 %idx
164  ret i8 %r
165}
166
167define signext i8 @extractelt_nxv32i8_0(<vscale x 32 x i8> %v) {
168; CHECK-LABEL: extractelt_nxv32i8_0:
169; CHECK:       # %bb.0:
170; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
171; CHECK-NEXT:    vmv.x.s a0, v8
172; CHECK-NEXT:    ret
173  %r = extractelement <vscale x 32 x i8> %v, i32 0
174  ret i8 %r
175}
176
177define signext i8 @extractelt_nxv32i8_imm(<vscale x 32 x i8> %v) {
178; CHECK-LABEL: extractelt_nxv32i8_imm:
179; CHECK:       # %bb.0:
180; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
181; CHECK-NEXT:    vslidedown.vi v8, v8, 2
182; CHECK-NEXT:    vmv.x.s a0, v8
183; CHECK-NEXT:    ret
184  %r = extractelement <vscale x 32 x i8> %v, i32 2
185  ret i8 %r
186}
187
188define signext i8 @extractelt_nxv32i8_idx(<vscale x 32 x i8> %v, i32 zeroext %idx) {
189; CHECK-LABEL: extractelt_nxv32i8_idx:
190; CHECK:       # %bb.0:
191; CHECK-NEXT:    vsetivli zero, 1, e8, m4, ta, ma
192; CHECK-NEXT:    vslidedown.vx v8, v8, a0
193; CHECK-NEXT:    vmv.x.s a0, v8
194; CHECK-NEXT:    ret
195  %r = extractelement <vscale x 32 x i8> %v, i32 %idx
196  ret i8 %r
197}
198
199define signext i8 @extractelt_nxv64i8_0(<vscale x 64 x i8> %v) {
200; CHECK-LABEL: extractelt_nxv64i8_0:
201; CHECK:       # %bb.0:
202; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
203; CHECK-NEXT:    vmv.x.s a0, v8
204; CHECK-NEXT:    ret
205  %r = extractelement <vscale x 64 x i8> %v, i32 0
206  ret i8 %r
207}
208
209define signext i8 @extractelt_nxv64i8_imm(<vscale x 64 x i8> %v) {
210; CHECK-LABEL: extractelt_nxv64i8_imm:
211; CHECK:       # %bb.0:
212; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
213; CHECK-NEXT:    vslidedown.vi v8, v8, 2
214; CHECK-NEXT:    vmv.x.s a0, v8
215; CHECK-NEXT:    ret
216  %r = extractelement <vscale x 64 x i8> %v, i32 2
217  ret i8 %r
218}
219
220define signext i8 @extractelt_nxv64i8_idx(<vscale x 64 x i8> %v, i32 zeroext %idx) {
221; CHECK-LABEL: extractelt_nxv64i8_idx:
222; CHECK:       # %bb.0:
223; CHECK-NEXT:    vsetivli zero, 1, e8, m8, ta, ma
224; CHECK-NEXT:    vslidedown.vx v8, v8, a0
225; CHECK-NEXT:    vmv.x.s a0, v8
226; CHECK-NEXT:    ret
227  %r = extractelement <vscale x 64 x i8> %v, i32 %idx
228  ret i8 %r
229}
230
231define signext i16 @extractelt_nxv1i16_0(<vscale x 1 x i16> %v) {
232; CHECK-LABEL: extractelt_nxv1i16_0:
233; CHECK:       # %bb.0:
234; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
235; CHECK-NEXT:    vmv.x.s a0, v8
236; CHECK-NEXT:    ret
237  %r = extractelement <vscale x 1 x i16> %v, i32 0
238  ret i16 %r
239}
240
241define signext i16 @extractelt_nxv1i16_imm(<vscale x 1 x i16> %v) {
242; CHECK-LABEL: extractelt_nxv1i16_imm:
243; CHECK:       # %bb.0:
244; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
245; CHECK-NEXT:    vslidedown.vi v8, v8, 2
246; CHECK-NEXT:    vmv.x.s a0, v8
247; CHECK-NEXT:    ret
248  %r = extractelement <vscale x 1 x i16> %v, i32 2
249  ret i16 %r
250}
251
252define signext i16 @extractelt_nxv1i16_idx(<vscale x 1 x i16> %v, i32 zeroext %idx) {
253; CHECK-LABEL: extractelt_nxv1i16_idx:
254; CHECK:       # %bb.0:
255; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
256; CHECK-NEXT:    vslidedown.vx v8, v8, a0
257; CHECK-NEXT:    vmv.x.s a0, v8
258; CHECK-NEXT:    ret
259  %r = extractelement <vscale x 1 x i16> %v, i32 %idx
260  ret i16 %r
261}
262
263define signext i16 @extractelt_nxv2i16_0(<vscale x 2 x i16> %v) {
264; CHECK-LABEL: extractelt_nxv2i16_0:
265; CHECK:       # %bb.0:
266; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
267; CHECK-NEXT:    vmv.x.s a0, v8
268; CHECK-NEXT:    ret
269  %r = extractelement <vscale x 2 x i16> %v, i32 0
270  ret i16 %r
271}
272
273define signext i16 @extractelt_nxv2i16_imm(<vscale x 2 x i16> %v) {
274; CHECK-LABEL: extractelt_nxv2i16_imm:
275; CHECK:       # %bb.0:
276; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
277; CHECK-NEXT:    vslidedown.vi v8, v8, 2
278; CHECK-NEXT:    vmv.x.s a0, v8
279; CHECK-NEXT:    ret
280  %r = extractelement <vscale x 2 x i16> %v, i32 2
281  ret i16 %r
282}
283
284define signext i16 @extractelt_nxv2i16_idx(<vscale x 2 x i16> %v, i32 zeroext %idx) {
285; CHECK-LABEL: extractelt_nxv2i16_idx:
286; CHECK:       # %bb.0:
287; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
288; CHECK-NEXT:    vslidedown.vx v8, v8, a0
289; CHECK-NEXT:    vmv.x.s a0, v8
290; CHECK-NEXT:    ret
291  %r = extractelement <vscale x 2 x i16> %v, i32 %idx
292  ret i16 %r
293}
294
295define signext i16 @extractelt_nxv4i16_0(<vscale x 4 x i16> %v) {
296; CHECK-LABEL: extractelt_nxv4i16_0:
297; CHECK:       # %bb.0:
298; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
299; CHECK-NEXT:    vmv.x.s a0, v8
300; CHECK-NEXT:    ret
301  %r = extractelement <vscale x 4 x i16> %v, i32 0
302  ret i16 %r
303}
304
305define signext i16 @extractelt_nxv4i16_imm(<vscale x 4 x i16> %v) {
306; CHECK-LABEL: extractelt_nxv4i16_imm:
307; CHECK:       # %bb.0:
308; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
309; CHECK-NEXT:    vslidedown.vi v8, v8, 2
310; CHECK-NEXT:    vmv.x.s a0, v8
311; CHECK-NEXT:    ret
312  %r = extractelement <vscale x 4 x i16> %v, i32 2
313  ret i16 %r
314}
315
316define signext i16 @extractelt_nxv4i16_idx(<vscale x 4 x i16> %v, i32 zeroext %idx) {
317; CHECK-LABEL: extractelt_nxv4i16_idx:
318; CHECK:       # %bb.0:
319; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
320; CHECK-NEXT:    vslidedown.vx v8, v8, a0
321; CHECK-NEXT:    vmv.x.s a0, v8
322; CHECK-NEXT:    ret
323  %r = extractelement <vscale x 4 x i16> %v, i32 %idx
324  ret i16 %r
325}
326
327define signext i16 @extractelt_nxv8i16_0(<vscale x 8 x i16> %v) {
328; CHECK-LABEL: extractelt_nxv8i16_0:
329; CHECK:       # %bb.0:
330; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
331; CHECK-NEXT:    vmv.x.s a0, v8
332; CHECK-NEXT:    ret
333  %r = extractelement <vscale x 8 x i16> %v, i32 0
334  ret i16 %r
335}
336
337define signext i16 @extractelt_nxv8i16_imm(<vscale x 8 x i16> %v) {
338; CHECK-LABEL: extractelt_nxv8i16_imm:
339; CHECK:       # %bb.0:
340; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
341; CHECK-NEXT:    vslidedown.vi v8, v8, 2
342; CHECK-NEXT:    vmv.x.s a0, v8
343; CHECK-NEXT:    ret
344  %r = extractelement <vscale x 8 x i16> %v, i32 2
345  ret i16 %r
346}
347
348define signext i16 @extractelt_nxv8i16_idx(<vscale x 8 x i16> %v, i32 zeroext %idx) {
349; CHECK-LABEL: extractelt_nxv8i16_idx:
350; CHECK:       # %bb.0:
351; CHECK-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
352; CHECK-NEXT:    vslidedown.vx v8, v8, a0
353; CHECK-NEXT:    vmv.x.s a0, v8
354; CHECK-NEXT:    ret
355  %r = extractelement <vscale x 8 x i16> %v, i32 %idx
356  ret i16 %r
357}
358
359define signext i16 @extractelt_nxv16i16_0(<vscale x 16 x i16> %v) {
360; CHECK-LABEL: extractelt_nxv16i16_0:
361; CHECK:       # %bb.0:
362; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
363; CHECK-NEXT:    vmv.x.s a0, v8
364; CHECK-NEXT:    ret
365  %r = extractelement <vscale x 16 x i16> %v, i32 0
366  ret i16 %r
367}
368
369define signext i16 @extractelt_nxv16i16_imm(<vscale x 16 x i16> %v) {
370; CHECK-LABEL: extractelt_nxv16i16_imm:
371; CHECK:       # %bb.0:
372; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
373; CHECK-NEXT:    vslidedown.vi v8, v8, 2
374; CHECK-NEXT:    vmv.x.s a0, v8
375; CHECK-NEXT:    ret
376  %r = extractelement <vscale x 16 x i16> %v, i32 2
377  ret i16 %r
378}
379
380define signext i16 @extractelt_nxv16i16_idx(<vscale x 16 x i16> %v, i32 zeroext %idx) {
381; CHECK-LABEL: extractelt_nxv16i16_idx:
382; CHECK:       # %bb.0:
383; CHECK-NEXT:    vsetivli zero, 1, e16, m4, ta, ma
384; CHECK-NEXT:    vslidedown.vx v8, v8, a0
385; CHECK-NEXT:    vmv.x.s a0, v8
386; CHECK-NEXT:    ret
387  %r = extractelement <vscale x 16 x i16> %v, i32 %idx
388  ret i16 %r
389}
390
391define signext i16 @extractelt_nxv32i16_0(<vscale x 32 x i16> %v) {
392; CHECK-LABEL: extractelt_nxv32i16_0:
393; CHECK:       # %bb.0:
394; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
395; CHECK-NEXT:    vmv.x.s a0, v8
396; CHECK-NEXT:    ret
397  %r = extractelement <vscale x 32 x i16> %v, i32 0
398  ret i16 %r
399}
400
401define signext i16 @extractelt_nxv32i16_imm(<vscale x 32 x i16> %v) {
402; CHECK-LABEL: extractelt_nxv32i16_imm:
403; CHECK:       # %bb.0:
404; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
405; CHECK-NEXT:    vslidedown.vi v8, v8, 2
406; CHECK-NEXT:    vmv.x.s a0, v8
407; CHECK-NEXT:    ret
408  %r = extractelement <vscale x 32 x i16> %v, i32 2
409  ret i16 %r
410}
411
412define signext i16 @extractelt_nxv32i16_idx(<vscale x 32 x i16> %v, i32 zeroext %idx) {
413; CHECK-LABEL: extractelt_nxv32i16_idx:
414; CHECK:       # %bb.0:
415; CHECK-NEXT:    vsetivli zero, 1, e16, m8, ta, ma
416; CHECK-NEXT:    vslidedown.vx v8, v8, a0
417; CHECK-NEXT:    vmv.x.s a0, v8
418; CHECK-NEXT:    ret
419  %r = extractelement <vscale x 32 x i16> %v, i32 %idx
420  ret i16 %r
421}
422
423define signext i32 @extractelt_nxv1i32_0(<vscale x 1 x i32> %v) {
424; CHECK-LABEL: extractelt_nxv1i32_0:
425; CHECK:       # %bb.0:
426; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
427; CHECK-NEXT:    vmv.x.s a0, v8
428; CHECK-NEXT:    ret
429  %r = extractelement <vscale x 1 x i32> %v, i32 0
430  ret i32 %r
431}
432
433define signext i32 @extractelt_nxv1i32_imm(<vscale x 1 x i32> %v) {
434; CHECK-LABEL: extractelt_nxv1i32_imm:
435; CHECK:       # %bb.0:
436; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
437; CHECK-NEXT:    vslidedown.vi v8, v8, 2
438; CHECK-NEXT:    vmv.x.s a0, v8
439; CHECK-NEXT:    ret
440  %r = extractelement <vscale x 1 x i32> %v, i32 2
441  ret i32 %r
442}
443
444define signext i32 @extractelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 zeroext %idx) {
445; CHECK-LABEL: extractelt_nxv1i32_idx:
446; CHECK:       # %bb.0:
447; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
448; CHECK-NEXT:    vslidedown.vx v8, v8, a0
449; CHECK-NEXT:    vmv.x.s a0, v8
450; CHECK-NEXT:    ret
451  %r = extractelement <vscale x 1 x i32> %v, i32 %idx
452  ret i32 %r
453}
454
455define signext i32 @extractelt_nxv2i32_0(<vscale x 2 x i32> %v) {
456; CHECK-LABEL: extractelt_nxv2i32_0:
457; CHECK:       # %bb.0:
458; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
459; CHECK-NEXT:    vmv.x.s a0, v8
460; CHECK-NEXT:    ret
461  %r = extractelement <vscale x 2 x i32> %v, i32 0
462  ret i32 %r
463}
464
465define signext i32 @extractelt_nxv2i32_imm(<vscale x 2 x i32> %v) {
466; CHECK-LABEL: extractelt_nxv2i32_imm:
467; CHECK:       # %bb.0:
468; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
469; CHECK-NEXT:    vslidedown.vi v8, v8, 2
470; CHECK-NEXT:    vmv.x.s a0, v8
471; CHECK-NEXT:    ret
472  %r = extractelement <vscale x 2 x i32> %v, i32 2
473  ret i32 %r
474}
475
476define signext i32 @extractelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 zeroext %idx) {
477; CHECK-LABEL: extractelt_nxv2i32_idx:
478; CHECK:       # %bb.0:
479; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
480; CHECK-NEXT:    vslidedown.vx v8, v8, a0
481; CHECK-NEXT:    vmv.x.s a0, v8
482; CHECK-NEXT:    ret
483  %r = extractelement <vscale x 2 x i32> %v, i32 %idx
484  ret i32 %r
485}
486
487define signext i32 @extractelt_nxv4i32_0(<vscale x 4 x i32> %v) {
488; CHECK-LABEL: extractelt_nxv4i32_0:
489; CHECK:       # %bb.0:
490; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
491; CHECK-NEXT:    vmv.x.s a0, v8
492; CHECK-NEXT:    ret
493  %r = extractelement <vscale x 4 x i32> %v, i32 0
494  ret i32 %r
495}
496
497define signext i32 @extractelt_nxv4i32_imm(<vscale x 4 x i32> %v) {
498; CHECK-LABEL: extractelt_nxv4i32_imm:
499; CHECK:       # %bb.0:
500; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
501; CHECK-NEXT:    vslidedown.vi v8, v8, 2
502; CHECK-NEXT:    vmv.x.s a0, v8
503; CHECK-NEXT:    ret
504  %r = extractelement <vscale x 4 x i32> %v, i32 2
505  ret i32 %r
506}
507
508define signext i32 @extractelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 zeroext %idx) {
509; CHECK-LABEL: extractelt_nxv4i32_idx:
510; CHECK:       # %bb.0:
511; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
512; CHECK-NEXT:    vslidedown.vx v8, v8, a0
513; CHECK-NEXT:    vmv.x.s a0, v8
514; CHECK-NEXT:    ret
515  %r = extractelement <vscale x 4 x i32> %v, i32 %idx
516  ret i32 %r
517}
518
519define signext i32 @extractelt_nxv8i32_0(<vscale x 8 x i32> %v) {
520; CHECK-LABEL: extractelt_nxv8i32_0:
521; CHECK:       # %bb.0:
522; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
523; CHECK-NEXT:    vmv.x.s a0, v8
524; CHECK-NEXT:    ret
525  %r = extractelement <vscale x 8 x i32> %v, i32 0
526  ret i32 %r
527}
528
529define signext i32 @extractelt_nxv8i32_imm(<vscale x 8 x i32> %v) {
530; CHECK-LABEL: extractelt_nxv8i32_imm:
531; CHECK:       # %bb.0:
532; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
533; CHECK-NEXT:    vslidedown.vi v8, v8, 2
534; CHECK-NEXT:    vmv.x.s a0, v8
535; CHECK-NEXT:    ret
536  %r = extractelement <vscale x 8 x i32> %v, i32 2
537  ret i32 %r
538}
539
540define signext i32 @extractelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 zeroext %idx) {
541; CHECK-LABEL: extractelt_nxv8i32_idx:
542; CHECK:       # %bb.0:
543; CHECK-NEXT:    vsetivli zero, 1, e32, m4, ta, ma
544; CHECK-NEXT:    vslidedown.vx v8, v8, a0
545; CHECK-NEXT:    vmv.x.s a0, v8
546; CHECK-NEXT:    ret
547  %r = extractelement <vscale x 8 x i32> %v, i32 %idx
548  ret i32 %r
549}
550
551define signext i32 @extractelt_nxv16i32_0(<vscale x 16 x i32> %v) {
552; CHECK-LABEL: extractelt_nxv16i32_0:
553; CHECK:       # %bb.0:
554; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
555; CHECK-NEXT:    vmv.x.s a0, v8
556; CHECK-NEXT:    ret
557  %r = extractelement <vscale x 16 x i32> %v, i32 0
558  ret i32 %r
559}
560
561define signext i32 @extractelt_nxv16i32_imm(<vscale x 16 x i32> %v) {
562; CHECK-LABEL: extractelt_nxv16i32_imm:
563; CHECK:       # %bb.0:
564; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
565; CHECK-NEXT:    vslidedown.vi v8, v8, 2
566; CHECK-NEXT:    vmv.x.s a0, v8
567; CHECK-NEXT:    ret
568  %r = extractelement <vscale x 16 x i32> %v, i32 2
569  ret i32 %r
570}
571
572define signext i32 @extractelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 zeroext %idx) {
573; CHECK-LABEL: extractelt_nxv16i32_idx:
574; CHECK:       # %bb.0:
575; CHECK-NEXT:    vsetivli zero, 1, e32, m8, ta, ma
576; CHECK-NEXT:    vslidedown.vx v8, v8, a0
577; CHECK-NEXT:    vmv.x.s a0, v8
578; CHECK-NEXT:    ret
579  %r = extractelement <vscale x 16 x i32> %v, i32 %idx
580  ret i32 %r
581}
582
583define i64 @extractelt_nxv1i64_0(<vscale x 1 x i64> %v) {
584; CHECK-LABEL: extractelt_nxv1i64_0:
585; CHECK:       # %bb.0:
586; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
587; CHECK-NEXT:    vmv.x.s a0, v8
588; CHECK-NEXT:    ret
589  %r = extractelement <vscale x 1 x i64> %v, i32 0
590  ret i64 %r
591}
592
593define i64 @extractelt_nxv1i64_imm(<vscale x 1 x i64> %v) {
594; CHECK-LABEL: extractelt_nxv1i64_imm:
595; CHECK:       # %bb.0:
596; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
597; CHECK-NEXT:    vslidedown.vi v8, v8, 2
598; CHECK-NEXT:    vmv.x.s a0, v8
599; CHECK-NEXT:    ret
600  %r = extractelement <vscale x 1 x i64> %v, i32 2
601  ret i64 %r
602}
603
604define i64 @extractelt_nxv1i64_idx(<vscale x 1 x i64> %v, i32 zeroext %idx) {
605; CHECK-LABEL: extractelt_nxv1i64_idx:
606; CHECK:       # %bb.0:
607; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
608; CHECK-NEXT:    vslidedown.vx v8, v8, a0
609; CHECK-NEXT:    vmv.x.s a0, v8
610; CHECK-NEXT:    ret
611  %r = extractelement <vscale x 1 x i64> %v, i32 %idx
612  ret i64 %r
613}
614
615define i64 @extractelt_nxv2i64_0(<vscale x 2 x i64> %v) {
616; CHECK-LABEL: extractelt_nxv2i64_0:
617; CHECK:       # %bb.0:
618; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
619; CHECK-NEXT:    vmv.x.s a0, v8
620; CHECK-NEXT:    ret
621  %r = extractelement <vscale x 2 x i64> %v, i32 0
622  ret i64 %r
623}
624
625define i64 @extractelt_nxv2i64_imm(<vscale x 2 x i64> %v) {
626; CHECK-LABEL: extractelt_nxv2i64_imm:
627; CHECK:       # %bb.0:
628; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
629; CHECK-NEXT:    vslidedown.vi v8, v8, 2
630; CHECK-NEXT:    vmv.x.s a0, v8
631; CHECK-NEXT:    ret
632  %r = extractelement <vscale x 2 x i64> %v, i32 2
633  ret i64 %r
634}
635
636define i64 @extractelt_nxv2i64_idx(<vscale x 2 x i64> %v, i32 zeroext %idx) {
637; CHECK-LABEL: extractelt_nxv2i64_idx:
638; CHECK:       # %bb.0:
639; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
640; CHECK-NEXT:    vslidedown.vx v8, v8, a0
641; CHECK-NEXT:    vmv.x.s a0, v8
642; CHECK-NEXT:    ret
643  %r = extractelement <vscale x 2 x i64> %v, i32 %idx
644  ret i64 %r
645}
646
647define i64 @extractelt_nxv4i64_0(<vscale x 4 x i64> %v) {
648; CHECK-LABEL: extractelt_nxv4i64_0:
649; CHECK:       # %bb.0:
650; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
651; CHECK-NEXT:    vmv.x.s a0, v8
652; CHECK-NEXT:    ret
653  %r = extractelement <vscale x 4 x i64> %v, i32 0
654  ret i64 %r
655}
656
657define i64 @extractelt_nxv4i64_imm(<vscale x 4 x i64> %v) {
658; CHECK-LABEL: extractelt_nxv4i64_imm:
659; CHECK:       # %bb.0:
660; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
661; CHECK-NEXT:    vslidedown.vi v8, v8, 2
662; CHECK-NEXT:    vmv.x.s a0, v8
663; CHECK-NEXT:    ret
664  %r = extractelement <vscale x 4 x i64> %v, i32 2
665  ret i64 %r
666}
667
668define i64 @extractelt_nxv4i64_idx(<vscale x 4 x i64> %v, i32 zeroext %idx) {
669; CHECK-LABEL: extractelt_nxv4i64_idx:
670; CHECK:       # %bb.0:
671; CHECK-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
672; CHECK-NEXT:    vslidedown.vx v8, v8, a0
673; CHECK-NEXT:    vmv.x.s a0, v8
674; CHECK-NEXT:    ret
675  %r = extractelement <vscale x 4 x i64> %v, i32 %idx
676  ret i64 %r
677}
678
679define i64 @extractelt_nxv8i64_0(<vscale x 8 x i64> %v) {
680; CHECK-LABEL: extractelt_nxv8i64_0:
681; CHECK:       # %bb.0:
682; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
683; CHECK-NEXT:    vmv.x.s a0, v8
684; CHECK-NEXT:    ret
685  %r = extractelement <vscale x 8 x i64> %v, i32 0
686  ret i64 %r
687}
688
689define i64 @extractelt_nxv8i64_imm(<vscale x 8 x i64> %v) {
690; CHECK-LABEL: extractelt_nxv8i64_imm:
691; CHECK:       # %bb.0:
692; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
693; CHECK-NEXT:    vslidedown.vi v8, v8, 2
694; CHECK-NEXT:    vmv.x.s a0, v8
695; CHECK-NEXT:    ret
696  %r = extractelement <vscale x 8 x i64> %v, i32 2
697  ret i64 %r
698}
699
700define i64 @extractelt_nxv8i64_2_exact_vlen(<vscale x 8 x i64> %v) vscale_range(2,2) {
701; CHECK-LABEL: extractelt_nxv8i64_2_exact_vlen:
702; CHECK:       # %bb.0:
703; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
704; CHECK-NEXT:    vmv.x.s a0, v9
705; CHECK-NEXT:    ret
706  %r = extractelement <vscale x 8 x i64> %v, i32 2
707  ret i64 %r
708}
709
710define i64 @extractelt_nxv8i64_15_exact_vlen(<vscale x 8 x i64> %v) vscale_range(2,2) {
711; CHECK-LABEL: extractelt_nxv8i64_15_exact_vlen:
712; CHECK:       # %bb.0:
713; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
714; CHECK-NEXT:    vslidedown.vi v8, v15, 1
715; CHECK-NEXT:    vmv.x.s a0, v8
716; CHECK-NEXT:    ret
717  %r = extractelement <vscale x 8 x i64> %v, i32 15
718  ret i64 %r
719}
720
721define i64 @extractelt_nxv8i64_idx(<vscale x 8 x i64> %v, i32 zeroext %idx) {
722; CHECK-LABEL: extractelt_nxv8i64_idx:
723; CHECK:       # %bb.0:
724; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, ma
725; CHECK-NEXT:    vslidedown.vx v8, v8, a0
726; CHECK-NEXT:    vmv.x.s a0, v8
727; CHECK-NEXT:    ret
728  %r = extractelement <vscale x 8 x i64> %v, i32 %idx
729  ret i64 %r
730}
731
732define i32 @extractelt_add_nxv4i32_splat(<vscale x 4 x i32> %x) {
733; CHECK-LABEL: extractelt_add_nxv4i32_splat:
734; CHECK:       # %bb.0:
735; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
736; CHECK-NEXT:    vslidedown.vi v8, v8, 2
737; CHECK-NEXT:    vmv.x.s a0, v8
738; CHECK-NEXT:    addiw a0, a0, 3
739; CHECK-NEXT:    ret
740  %bo = add <vscale x 4 x i32> %x, splat (i32 3)
741  %ext = extractelement <vscale x 4 x i32> %bo, i32 2
742  ret i32 %ext
743}
744
745define i32 @extractelt_sub_nxv4i32_splat(<vscale x 4 x i32> %x) {
746; CHECK-LABEL: extractelt_sub_nxv4i32_splat:
747; CHECK:       # %bb.0:
748; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
749; CHECK-NEXT:    vslidedown.vi v8, v8, 1
750; CHECK-NEXT:    vmv.x.s a0, v8
751; CHECK-NEXT:    li a1, 3
752; CHECK-NEXT:    subw a0, a1, a0
753; CHECK-NEXT:    ret
754  %bo = sub <vscale x 4 x i32> splat (i32 3), %x
755  %ext = extractelement <vscale x 4 x i32> %bo, i32 1
756  ret i32 %ext
757}
758
759define i32 @extractelt_mul_nxv4i32_splat(<vscale x 4 x i32> %x) {
760; RV64NOM-LABEL: extractelt_mul_nxv4i32_splat:
761; RV64NOM:       # %bb.0:
762; RV64NOM-NEXT:    li a0, 3
763; RV64NOM-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
764; RV64NOM-NEXT:    vmul.vx v8, v8, a0
765; RV64NOM-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
766; RV64NOM-NEXT:    vslidedown.vi v8, v8, 3
767; RV64NOM-NEXT:    vmv.x.s a0, v8
768; RV64NOM-NEXT:    ret
769;
770; RV64M-LABEL: extractelt_mul_nxv4i32_splat:
771; RV64M:       # %bb.0:
772; RV64M-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
773; RV64M-NEXT:    vslidedown.vi v8, v8, 3
774; RV64M-NEXT:    vmv.x.s a0, v8
775; RV64M-NEXT:    slli a1, a0, 1
776; RV64M-NEXT:    addw a0, a1, a0
777; RV64M-NEXT:    ret
778  %bo = mul <vscale x 4 x i32> %x, splat (i32 3)
779  %ext = extractelement <vscale x 4 x i32> %bo, i32 3
780  ret i32 %ext
781}
782
783define i32 @extractelt_sdiv_nxv4i32_splat(<vscale x 4 x i32> %x) {
784; RV64NOM-LABEL: extractelt_sdiv_nxv4i32_splat:
785; RV64NOM:       # %bb.0:
786; RV64NOM-NEXT:    lui a0, 349525
787; RV64NOM-NEXT:    addi a0, a0, 1366
788; RV64NOM-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
789; RV64NOM-NEXT:    vmulh.vx v8, v8, a0
790; RV64NOM-NEXT:    vsrl.vi v10, v8, 31
791; RV64NOM-NEXT:    vadd.vv v8, v8, v10
792; RV64NOM-NEXT:    vmv.x.s a0, v8
793; RV64NOM-NEXT:    ret
794;
795; RV64M-LABEL: extractelt_sdiv_nxv4i32_splat:
796; RV64M:       # %bb.0:
797; RV64M-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
798; RV64M-NEXT:    vmv.x.s a0, v8
799; RV64M-NEXT:    lui a1, 349525
800; RV64M-NEXT:    addiw a1, a1, 1366
801; RV64M-NEXT:    mul a0, a0, a1
802; RV64M-NEXT:    srli a1, a0, 63
803; RV64M-NEXT:    srli a0, a0, 32
804; RV64M-NEXT:    addw a0, a0, a1
805; RV64M-NEXT:    ret
806  %bo = sdiv <vscale x 4 x i32> %x, splat (i32 3)
807  %ext = extractelement <vscale x 4 x i32> %bo, i32 0
808  ret i32 %ext
809}
810
811define i32 @extractelt_udiv_nxv4i32_splat(<vscale x 4 x i32> %x) {
812; RV64NOM-LABEL: extractelt_udiv_nxv4i32_splat:
813; RV64NOM:       # %bb.0:
814; RV64NOM-NEXT:    lui a0, 349525
815; RV64NOM-NEXT:    addi a0, a0, 1366
816; RV64NOM-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
817; RV64NOM-NEXT:    vmulh.vx v8, v8, a0
818; RV64NOM-NEXT:    vsrl.vi v10, v8, 31
819; RV64NOM-NEXT:    vadd.vv v8, v8, v10
820; RV64NOM-NEXT:    vmv.x.s a0, v8
821; RV64NOM-NEXT:    ret
822;
823; RV64M-LABEL: extractelt_udiv_nxv4i32_splat:
824; RV64M:       # %bb.0:
825; RV64M-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
826; RV64M-NEXT:    vmv.x.s a0, v8
827; RV64M-NEXT:    lui a1, 349525
828; RV64M-NEXT:    addiw a1, a1, 1366
829; RV64M-NEXT:    mul a0, a0, a1
830; RV64M-NEXT:    srli a1, a0, 63
831; RV64M-NEXT:    srli a0, a0, 32
832; RV64M-NEXT:    addw a0, a0, a1
833; RV64M-NEXT:    ret
834  %bo = sdiv <vscale x 4 x i32> %x, splat (i32 3)
835  %ext = extractelement <vscale x 4 x i32> %bo, i32 0
836  ret i32 %ext
837}
838
839define i64 @extractelt_nxv16i64_0(<vscale x 16 x i64> %v) {
840; CHECK-LABEL: extractelt_nxv16i64_0:
841; CHECK:       # %bb.0:
842; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
843; CHECK-NEXT:    vmv.x.s a0, v8
844; CHECK-NEXT:    ret
845  %r = extractelement <vscale x 16 x i64> %v, i32 0
846  ret i64 %r
847}
848
849define i64 @extractelt_nxv16i64_neg1(<vscale x 16 x i64> %v) {
850; CHECK-LABEL: extractelt_nxv16i64_neg1:
851; CHECK:       # %bb.0:
852; CHECK-NEXT:    addi sp, sp, -80
853; CHECK-NEXT:    .cfi_def_cfa_offset 80
854; CHECK-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
855; CHECK-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
856; CHECK-NEXT:    .cfi_offset ra, -8
857; CHECK-NEXT:    .cfi_offset s0, -16
858; CHECK-NEXT:    addi s0, sp, 80
859; CHECK-NEXT:    .cfi_def_cfa s0, 0
860; CHECK-NEXT:    csrr a0, vlenb
861; CHECK-NEXT:    slli a0, a0, 4
862; CHECK-NEXT:    sub sp, sp, a0
863; CHECK-NEXT:    andi sp, sp, -64
864; CHECK-NEXT:    addi a0, sp, 64
865; CHECK-NEXT:    csrr a2, vlenb
866; CHECK-NEXT:    li a1, -1
867; CHECK-NEXT:    vs8r.v v8, (a0)
868; CHECK-NEXT:    slli a3, a2, 3
869; CHECK-NEXT:    srli a1, a1, 32
870; CHECK-NEXT:    slli a2, a2, 1
871; CHECK-NEXT:    add a3, a0, a3
872; CHECK-NEXT:    addi a2, a2, -1
873; CHECK-NEXT:    vs8r.v v16, (a3)
874; CHECK-NEXT:    bltu a2, a1, .LBB74_2
875; CHECK-NEXT:  # %bb.1:
876; CHECK-NEXT:    mv a2, a1
877; CHECK-NEXT:  .LBB74_2:
878; CHECK-NEXT:    slli a2, a2, 3
879; CHECK-NEXT:    add a0, a0, a2
880; CHECK-NEXT:    ld a0, 0(a0)
881; CHECK-NEXT:    addi sp, s0, -80
882; CHECK-NEXT:    .cfi_def_cfa sp, 80
883; CHECK-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
884; CHECK-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
885; CHECK-NEXT:    .cfi_restore ra
886; CHECK-NEXT:    .cfi_restore s0
887; CHECK-NEXT:    addi sp, sp, 80
888; CHECK-NEXT:    .cfi_def_cfa_offset 0
889; CHECK-NEXT:    ret
890  %r = extractelement <vscale x 16 x i64> %v, i32 -1
891  ret i64 %r
892}
893
894define i64 @extractelt_nxv16i64_imm(<vscale x 16 x i64> %v) {
895; CHECK-LABEL: extractelt_nxv16i64_imm:
896; CHECK:       # %bb.0:
897; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
898; CHECK-NEXT:    vslidedown.vi v8, v8, 2
899; CHECK-NEXT:    vmv.x.s a0, v8
900; CHECK-NEXT:    ret
901  %r = extractelement <vscale x 16 x i64> %v, i32 2
902  ret i64 %r
903}
904
905define i64 @extractelt_nxv16i64_idx(<vscale x 16 x i64> %v, i32 zeroext %idx) {
906; CHECK-LABEL: extractelt_nxv16i64_idx:
907; CHECK:       # %bb.0:
908; CHECK-NEXT:    csrr a1, vlenb
909; CHECK-NEXT:    slli a2, a1, 1
910; CHECK-NEXT:    addi a2, a2, -1
911; CHECK-NEXT:    bltu a0, a2, .LBB76_2
912; CHECK-NEXT:  # %bb.1:
913; CHECK-NEXT:    mv a0, a2
914; CHECK-NEXT:  .LBB76_2:
915; CHECK-NEXT:    addi sp, sp, -80
916; CHECK-NEXT:    .cfi_def_cfa_offset 80
917; CHECK-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
918; CHECK-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
919; CHECK-NEXT:    .cfi_offset ra, -8
920; CHECK-NEXT:    .cfi_offset s0, -16
921; CHECK-NEXT:    addi s0, sp, 80
922; CHECK-NEXT:    .cfi_def_cfa s0, 0
923; CHECK-NEXT:    csrr a2, vlenb
924; CHECK-NEXT:    slli a2, a2, 4
925; CHECK-NEXT:    sub sp, sp, a2
926; CHECK-NEXT:    andi sp, sp, -64
927; CHECK-NEXT:    slli a0, a0, 3
928; CHECK-NEXT:    addi a2, sp, 64
929; CHECK-NEXT:    slli a1, a1, 3
930; CHECK-NEXT:    add a0, a2, a0
931; CHECK-NEXT:    vs8r.v v8, (a2)
932; CHECK-NEXT:    add a1, a2, a1
933; CHECK-NEXT:    vs8r.v v16, (a1)
934; CHECK-NEXT:    ld a0, 0(a0)
935; CHECK-NEXT:    addi sp, s0, -80
936; CHECK-NEXT:    .cfi_def_cfa sp, 80
937; CHECK-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
938; CHECK-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
939; CHECK-NEXT:    .cfi_restore ra
940; CHECK-NEXT:    .cfi_restore s0
941; CHECK-NEXT:    addi sp, sp, 80
942; CHECK-NEXT:    .cfi_def_cfa_offset 0
943; CHECK-NEXT:    ret
944  %r = extractelement <vscale x 16 x i64> %v, i32 %idx
945  ret i64 %r
946}
947