xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,NOZFMIN,ZVFH
3; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,NOZFMIN,ZVFH
4; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,NOZFMIN,ZVFHMIN
5; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,NOZFMIN,ZVFHMIN
6; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfhmin,+zfbfmin,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,ZFMIN
7; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfhmin,+zfbfmin,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,ZFMIN
8
9define bfloat @extractelt_nxv1bf16_0(<vscale x 1 x bfloat> %v) {
10; NOZFMIN-LABEL: extractelt_nxv1bf16_0:
11; NOZFMIN:       # %bb.0:
12; NOZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
13; NOZFMIN-NEXT:    vmv.x.s a0, v8
14; NOZFMIN-NEXT:    lui a1, 1048560
15; NOZFMIN-NEXT:    or a0, a0, a1
16; NOZFMIN-NEXT:    fmv.w.x fa0, a0
17; NOZFMIN-NEXT:    ret
18;
19; ZFMIN-LABEL: extractelt_nxv1bf16_0:
20; ZFMIN:       # %bb.0:
21; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
22; ZFMIN-NEXT:    vmv.x.s a0, v8
23; ZFMIN-NEXT:    fmv.h.x fa0, a0
24; ZFMIN-NEXT:    ret
25  %r = extractelement <vscale x 1 x bfloat> %v, i32 0
26  ret bfloat %r
27}
28
29define bfloat @extractelt_nxv1bf16_imm(<vscale x 1 x bfloat> %v) {
30; NOZFMIN-LABEL: extractelt_nxv1bf16_imm:
31; NOZFMIN:       # %bb.0:
32; NOZFMIN-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
33; NOZFMIN-NEXT:    vslidedown.vi v8, v8, 2
34; NOZFMIN-NEXT:    vmv.x.s a0, v8
35; NOZFMIN-NEXT:    lui a1, 1048560
36; NOZFMIN-NEXT:    or a0, a0, a1
37; NOZFMIN-NEXT:    fmv.w.x fa0, a0
38; NOZFMIN-NEXT:    ret
39;
40; ZFMIN-LABEL: extractelt_nxv1bf16_imm:
41; ZFMIN:       # %bb.0:
42; ZFMIN-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
43; ZFMIN-NEXT:    vslidedown.vi v8, v8, 2
44; ZFMIN-NEXT:    vmv.x.s a0, v8
45; ZFMIN-NEXT:    fmv.h.x fa0, a0
46; ZFMIN-NEXT:    ret
47  %r = extractelement <vscale x 1 x bfloat> %v, i32 2
48  ret bfloat %r
49}
50
51define bfloat @extractelt_nxv1bf16_idx(<vscale x 1 x bfloat> %v, i32 zeroext %idx) {
52; NOZFMIN-LABEL: extractelt_nxv1bf16_idx:
53; NOZFMIN:       # %bb.0:
54; NOZFMIN-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
55; NOZFMIN-NEXT:    vslidedown.vx v8, v8, a0
56; NOZFMIN-NEXT:    vmv.x.s a0, v8
57; NOZFMIN-NEXT:    lui a1, 1048560
58; NOZFMIN-NEXT:    or a0, a0, a1
59; NOZFMIN-NEXT:    fmv.w.x fa0, a0
60; NOZFMIN-NEXT:    ret
61;
62; ZFMIN-LABEL: extractelt_nxv1bf16_idx:
63; ZFMIN:       # %bb.0:
64; ZFMIN-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
65; ZFMIN-NEXT:    vslidedown.vx v8, v8, a0
66; ZFMIN-NEXT:    vmv.x.s a0, v8
67; ZFMIN-NEXT:    fmv.h.x fa0, a0
68; ZFMIN-NEXT:    ret
69  %r = extractelement <vscale x 1 x bfloat> %v, i32 %idx
70  ret bfloat %r
71}
72
73define bfloat @extractelt_nxv2bf16_0(<vscale x 2 x bfloat> %v) {
74; NOZFMIN-LABEL: extractelt_nxv2bf16_0:
75; NOZFMIN:       # %bb.0:
76; NOZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
77; NOZFMIN-NEXT:    vmv.x.s a0, v8
78; NOZFMIN-NEXT:    lui a1, 1048560
79; NOZFMIN-NEXT:    or a0, a0, a1
80; NOZFMIN-NEXT:    fmv.w.x fa0, a0
81; NOZFMIN-NEXT:    ret
82;
83; ZFMIN-LABEL: extractelt_nxv2bf16_0:
84; ZFMIN:       # %bb.0:
85; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
86; ZFMIN-NEXT:    vmv.x.s a0, v8
87; ZFMIN-NEXT:    fmv.h.x fa0, a0
88; ZFMIN-NEXT:    ret
89  %r = extractelement <vscale x 2 x bfloat> %v, i32 0
90  ret bfloat %r
91}
92
93define bfloat @extractelt_nxv2bf16_imm(<vscale x 2 x bfloat> %v) {
94; NOZFMIN-LABEL: extractelt_nxv2bf16_imm:
95; NOZFMIN:       # %bb.0:
96; NOZFMIN-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
97; NOZFMIN-NEXT:    vslidedown.vi v8, v8, 2
98; NOZFMIN-NEXT:    vmv.x.s a0, v8
99; NOZFMIN-NEXT:    lui a1, 1048560
100; NOZFMIN-NEXT:    or a0, a0, a1
101; NOZFMIN-NEXT:    fmv.w.x fa0, a0
102; NOZFMIN-NEXT:    ret
103;
104; ZFMIN-LABEL: extractelt_nxv2bf16_imm:
105; ZFMIN:       # %bb.0:
106; ZFMIN-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
107; ZFMIN-NEXT:    vslidedown.vi v8, v8, 2
108; ZFMIN-NEXT:    vmv.x.s a0, v8
109; ZFMIN-NEXT:    fmv.h.x fa0, a0
110; ZFMIN-NEXT:    ret
111  %r = extractelement <vscale x 2 x bfloat> %v, i32 2
112  ret bfloat %r
113}
114
115define bfloat @extractelt_nxv2bf16_idx(<vscale x 2 x bfloat> %v, i32 zeroext %idx) {
116; NOZFMIN-LABEL: extractelt_nxv2bf16_idx:
117; NOZFMIN:       # %bb.0:
118; NOZFMIN-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
119; NOZFMIN-NEXT:    vslidedown.vx v8, v8, a0
120; NOZFMIN-NEXT:    vmv.x.s a0, v8
121; NOZFMIN-NEXT:    lui a1, 1048560
122; NOZFMIN-NEXT:    or a0, a0, a1
123; NOZFMIN-NEXT:    fmv.w.x fa0, a0
124; NOZFMIN-NEXT:    ret
125;
126; ZFMIN-LABEL: extractelt_nxv2bf16_idx:
127; ZFMIN:       # %bb.0:
128; ZFMIN-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
129; ZFMIN-NEXT:    vslidedown.vx v8, v8, a0
130; ZFMIN-NEXT:    vmv.x.s a0, v8
131; ZFMIN-NEXT:    fmv.h.x fa0, a0
132; ZFMIN-NEXT:    ret
133  %r = extractelement <vscale x 2 x bfloat> %v, i32 %idx
134  ret bfloat %r
135}
136
137define bfloat @extractelt_nxv4bf16_0(<vscale x 4 x bfloat> %v) {
138; NOZFMIN-LABEL: extractelt_nxv4bf16_0:
139; NOZFMIN:       # %bb.0:
140; NOZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
141; NOZFMIN-NEXT:    vmv.x.s a0, v8
142; NOZFMIN-NEXT:    lui a1, 1048560
143; NOZFMIN-NEXT:    or a0, a0, a1
144; NOZFMIN-NEXT:    fmv.w.x fa0, a0
145; NOZFMIN-NEXT:    ret
146;
147; ZFMIN-LABEL: extractelt_nxv4bf16_0:
148; ZFMIN:       # %bb.0:
149; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
150; ZFMIN-NEXT:    vmv.x.s a0, v8
151; ZFMIN-NEXT:    fmv.h.x fa0, a0
152; ZFMIN-NEXT:    ret
153  %r = extractelement <vscale x 4 x bfloat> %v, i32 0
154  ret bfloat %r
155}
156
157define bfloat @extractelt_nxv4bf16_imm(<vscale x 4 x bfloat> %v) {
158; NOZFMIN-LABEL: extractelt_nxv4bf16_imm:
159; NOZFMIN:       # %bb.0:
160; NOZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
161; NOZFMIN-NEXT:    vslidedown.vi v8, v8, 2
162; NOZFMIN-NEXT:    vmv.x.s a0, v8
163; NOZFMIN-NEXT:    lui a1, 1048560
164; NOZFMIN-NEXT:    or a0, a0, a1
165; NOZFMIN-NEXT:    fmv.w.x fa0, a0
166; NOZFMIN-NEXT:    ret
167;
168; ZFMIN-LABEL: extractelt_nxv4bf16_imm:
169; ZFMIN:       # %bb.0:
170; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
171; ZFMIN-NEXT:    vslidedown.vi v8, v8, 2
172; ZFMIN-NEXT:    vmv.x.s a0, v8
173; ZFMIN-NEXT:    fmv.h.x fa0, a0
174; ZFMIN-NEXT:    ret
175  %r = extractelement <vscale x 4 x bfloat> %v, i32 2
176  ret bfloat %r
177}
178
179define bfloat @extractelt_nxv4bf16_idx(<vscale x 4 x bfloat> %v, i32 zeroext %idx) {
180; NOZFMIN-LABEL: extractelt_nxv4bf16_idx:
181; NOZFMIN:       # %bb.0:
182; NOZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
183; NOZFMIN-NEXT:    vslidedown.vx v8, v8, a0
184; NOZFMIN-NEXT:    vmv.x.s a0, v8
185; NOZFMIN-NEXT:    lui a1, 1048560
186; NOZFMIN-NEXT:    or a0, a0, a1
187; NOZFMIN-NEXT:    fmv.w.x fa0, a0
188; NOZFMIN-NEXT:    ret
189;
190; ZFMIN-LABEL: extractelt_nxv4bf16_idx:
191; ZFMIN:       # %bb.0:
192; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
193; ZFMIN-NEXT:    vslidedown.vx v8, v8, a0
194; ZFMIN-NEXT:    vmv.x.s a0, v8
195; ZFMIN-NEXT:    fmv.h.x fa0, a0
196; ZFMIN-NEXT:    ret
197  %r = extractelement <vscale x 4 x bfloat> %v, i32 %idx
198  ret bfloat %r
199}
200
201define bfloat @extractelt_nxv8bf16_0(<vscale x 8 x bfloat> %v) {
202; NOZFMIN-LABEL: extractelt_nxv8bf16_0:
203; NOZFMIN:       # %bb.0:
204; NOZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
205; NOZFMIN-NEXT:    vmv.x.s a0, v8
206; NOZFMIN-NEXT:    lui a1, 1048560
207; NOZFMIN-NEXT:    or a0, a0, a1
208; NOZFMIN-NEXT:    fmv.w.x fa0, a0
209; NOZFMIN-NEXT:    ret
210;
211; ZFMIN-LABEL: extractelt_nxv8bf16_0:
212; ZFMIN:       # %bb.0:
213; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
214; ZFMIN-NEXT:    vmv.x.s a0, v8
215; ZFMIN-NEXT:    fmv.h.x fa0, a0
216; ZFMIN-NEXT:    ret
217  %r = extractelement <vscale x 8 x bfloat> %v, i32 0
218  ret bfloat %r
219}
220
221define bfloat @extractelt_nxv8bf16_imm(<vscale x 8 x bfloat> %v) {
222; NOZFMIN-LABEL: extractelt_nxv8bf16_imm:
223; NOZFMIN:       # %bb.0:
224; NOZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
225; NOZFMIN-NEXT:    vslidedown.vi v8, v8, 2
226; NOZFMIN-NEXT:    vmv.x.s a0, v8
227; NOZFMIN-NEXT:    lui a1, 1048560
228; NOZFMIN-NEXT:    or a0, a0, a1
229; NOZFMIN-NEXT:    fmv.w.x fa0, a0
230; NOZFMIN-NEXT:    ret
231;
232; ZFMIN-LABEL: extractelt_nxv8bf16_imm:
233; ZFMIN:       # %bb.0:
234; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
235; ZFMIN-NEXT:    vslidedown.vi v8, v8, 2
236; ZFMIN-NEXT:    vmv.x.s a0, v8
237; ZFMIN-NEXT:    fmv.h.x fa0, a0
238; ZFMIN-NEXT:    ret
239  %r = extractelement <vscale x 8 x bfloat> %v, i32 2
240  ret bfloat %r
241}
242
243define bfloat @extractelt_nxv8bf16_idx(<vscale x 8 x bfloat> %v, i32 zeroext %idx) {
244; NOZFMIN-LABEL: extractelt_nxv8bf16_idx:
245; NOZFMIN:       # %bb.0:
246; NOZFMIN-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
247; NOZFMIN-NEXT:    vslidedown.vx v8, v8, a0
248; NOZFMIN-NEXT:    vmv.x.s a0, v8
249; NOZFMIN-NEXT:    lui a1, 1048560
250; NOZFMIN-NEXT:    or a0, a0, a1
251; NOZFMIN-NEXT:    fmv.w.x fa0, a0
252; NOZFMIN-NEXT:    ret
253;
254; ZFMIN-LABEL: extractelt_nxv8bf16_idx:
255; ZFMIN:       # %bb.0:
256; ZFMIN-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
257; ZFMIN-NEXT:    vslidedown.vx v8, v8, a0
258; ZFMIN-NEXT:    vmv.x.s a0, v8
259; ZFMIN-NEXT:    fmv.h.x fa0, a0
260; ZFMIN-NEXT:    ret
261  %r = extractelement <vscale x 8 x bfloat> %v, i32 %idx
262  ret bfloat %r
263}
264
265define bfloat @extractelt_nxv16bf16_0(<vscale x 16 x bfloat> %v) {
266; NOZFMIN-LABEL: extractelt_nxv16bf16_0:
267; NOZFMIN:       # %bb.0:
268; NOZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
269; NOZFMIN-NEXT:    vmv.x.s a0, v8
270; NOZFMIN-NEXT:    lui a1, 1048560
271; NOZFMIN-NEXT:    or a0, a0, a1
272; NOZFMIN-NEXT:    fmv.w.x fa0, a0
273; NOZFMIN-NEXT:    ret
274;
275; ZFMIN-LABEL: extractelt_nxv16bf16_0:
276; ZFMIN:       # %bb.0:
277; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
278; ZFMIN-NEXT:    vmv.x.s a0, v8
279; ZFMIN-NEXT:    fmv.h.x fa0, a0
280; ZFMIN-NEXT:    ret
281  %r = extractelement <vscale x 16 x bfloat> %v, i32 0
282  ret bfloat %r
283}
284
285define bfloat @extractelt_nxv16bf16_imm(<vscale x 16 x bfloat> %v) {
286; NOZFMIN-LABEL: extractelt_nxv16bf16_imm:
287; NOZFMIN:       # %bb.0:
288; NOZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
289; NOZFMIN-NEXT:    vslidedown.vi v8, v8, 2
290; NOZFMIN-NEXT:    vmv.x.s a0, v8
291; NOZFMIN-NEXT:    lui a1, 1048560
292; NOZFMIN-NEXT:    or a0, a0, a1
293; NOZFMIN-NEXT:    fmv.w.x fa0, a0
294; NOZFMIN-NEXT:    ret
295;
296; ZFMIN-LABEL: extractelt_nxv16bf16_imm:
297; ZFMIN:       # %bb.0:
298; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
299; ZFMIN-NEXT:    vslidedown.vi v8, v8, 2
300; ZFMIN-NEXT:    vmv.x.s a0, v8
301; ZFMIN-NEXT:    fmv.h.x fa0, a0
302; ZFMIN-NEXT:    ret
303  %r = extractelement <vscale x 16 x bfloat> %v, i32 2
304  ret bfloat %r
305}
306
307define bfloat @extractelt_nxv16bf16_idx(<vscale x 16 x bfloat> %v, i32 zeroext %idx) {
308; NOZFMIN-LABEL: extractelt_nxv16bf16_idx:
309; NOZFMIN:       # %bb.0:
310; NOZFMIN-NEXT:    vsetivli zero, 1, e16, m4, ta, ma
311; NOZFMIN-NEXT:    vslidedown.vx v8, v8, a0
312; NOZFMIN-NEXT:    vmv.x.s a0, v8
313; NOZFMIN-NEXT:    lui a1, 1048560
314; NOZFMIN-NEXT:    or a0, a0, a1
315; NOZFMIN-NEXT:    fmv.w.x fa0, a0
316; NOZFMIN-NEXT:    ret
317;
318; ZFMIN-LABEL: extractelt_nxv16bf16_idx:
319; ZFMIN:       # %bb.0:
320; ZFMIN-NEXT:    vsetivli zero, 1, e16, m4, ta, ma
321; ZFMIN-NEXT:    vslidedown.vx v8, v8, a0
322; ZFMIN-NEXT:    vmv.x.s a0, v8
323; ZFMIN-NEXT:    fmv.h.x fa0, a0
324; ZFMIN-NEXT:    ret
325  %r = extractelement <vscale x 16 x bfloat> %v, i32 %idx
326  ret bfloat %r
327}
328
329define bfloat @extractelt_nxv32bf16_0(<vscale x 32 x bfloat> %v) {
330; NOZFMIN-LABEL: extractelt_nxv32bf16_0:
331; NOZFMIN:       # %bb.0:
332; NOZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
333; NOZFMIN-NEXT:    vmv.x.s a0, v8
334; NOZFMIN-NEXT:    lui a1, 1048560
335; NOZFMIN-NEXT:    or a0, a0, a1
336; NOZFMIN-NEXT:    fmv.w.x fa0, a0
337; NOZFMIN-NEXT:    ret
338;
339; ZFMIN-LABEL: extractelt_nxv32bf16_0:
340; ZFMIN:       # %bb.0:
341; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
342; ZFMIN-NEXT:    vmv.x.s a0, v8
343; ZFMIN-NEXT:    fmv.h.x fa0, a0
344; ZFMIN-NEXT:    ret
345  %r = extractelement <vscale x 32 x bfloat> %v, i32 0
346  ret bfloat %r
347}
348
349define bfloat @extractelt_nxv32bf16_imm(<vscale x 32 x bfloat> %v) {
350; NOZFMIN-LABEL: extractelt_nxv32bf16_imm:
351; NOZFMIN:       # %bb.0:
352; NOZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
353; NOZFMIN-NEXT:    vslidedown.vi v8, v8, 2
354; NOZFMIN-NEXT:    vmv.x.s a0, v8
355; NOZFMIN-NEXT:    lui a1, 1048560
356; NOZFMIN-NEXT:    or a0, a0, a1
357; NOZFMIN-NEXT:    fmv.w.x fa0, a0
358; NOZFMIN-NEXT:    ret
359;
360; ZFMIN-LABEL: extractelt_nxv32bf16_imm:
361; ZFMIN:       # %bb.0:
362; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
363; ZFMIN-NEXT:    vslidedown.vi v8, v8, 2
364; ZFMIN-NEXT:    vmv.x.s a0, v8
365; ZFMIN-NEXT:    fmv.h.x fa0, a0
366; ZFMIN-NEXT:    ret
367  %r = extractelement <vscale x 32 x bfloat> %v, i32 2
368  ret bfloat %r
369}
370
371define bfloat @extractelt_nxv32bf16_idx(<vscale x 32 x bfloat> %v, i32 zeroext %idx) {
372; NOZFMIN-LABEL: extractelt_nxv32bf16_idx:
373; NOZFMIN:       # %bb.0:
374; NOZFMIN-NEXT:    vsetivli zero, 1, e16, m8, ta, ma
375; NOZFMIN-NEXT:    vslidedown.vx v8, v8, a0
376; NOZFMIN-NEXT:    vmv.x.s a0, v8
377; NOZFMIN-NEXT:    lui a1, 1048560
378; NOZFMIN-NEXT:    or a0, a0, a1
379; NOZFMIN-NEXT:    fmv.w.x fa0, a0
380; NOZFMIN-NEXT:    ret
381;
382; ZFMIN-LABEL: extractelt_nxv32bf16_idx:
383; ZFMIN:       # %bb.0:
384; ZFMIN-NEXT:    vsetivli zero, 1, e16, m8, ta, ma
385; ZFMIN-NEXT:    vslidedown.vx v8, v8, a0
386; ZFMIN-NEXT:    vmv.x.s a0, v8
387; ZFMIN-NEXT:    fmv.h.x fa0, a0
388; ZFMIN-NEXT:    ret
389  %r = extractelement <vscale x 32 x bfloat> %v, i32 %idx
390  ret bfloat %r
391}
392
393define half @extractelt_nxv1f16_0(<vscale x 1 x half> %v) {
394; ZVFH-LABEL: extractelt_nxv1f16_0:
395; ZVFH:       # %bb.0:
396; ZVFH-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
397; ZVFH-NEXT:    vfmv.f.s fa0, v8
398; ZVFH-NEXT:    ret
399;
400; ZVFHMIN-LABEL: extractelt_nxv1f16_0:
401; ZVFHMIN:       # %bb.0:
402; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
403; ZVFHMIN-NEXT:    vmv.x.s a0, v8
404; ZVFHMIN-NEXT:    lui a1, 1048560
405; ZVFHMIN-NEXT:    or a0, a0, a1
406; ZVFHMIN-NEXT:    fmv.w.x fa0, a0
407; ZVFHMIN-NEXT:    ret
408;
409; ZFMIN-LABEL: extractelt_nxv1f16_0:
410; ZFMIN:       # %bb.0:
411; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
412; ZFMIN-NEXT:    vmv.x.s a0, v8
413; ZFMIN-NEXT:    fmv.h.x fa0, a0
414; ZFMIN-NEXT:    ret
415  %r = extractelement <vscale x 1 x half> %v, i32 0
416  ret half %r
417}
418
419define half @extractelt_nxv1f16_imm(<vscale x 1 x half> %v) {
420; ZVFH-LABEL: extractelt_nxv1f16_imm:
421; ZVFH:       # %bb.0:
422; ZVFH-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
423; ZVFH-NEXT:    vslidedown.vi v8, v8, 2
424; ZVFH-NEXT:    vfmv.f.s fa0, v8
425; ZVFH-NEXT:    ret
426;
427; ZVFHMIN-LABEL: extractelt_nxv1f16_imm:
428; ZVFHMIN:       # %bb.0:
429; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
430; ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
431; ZVFHMIN-NEXT:    vmv.x.s a0, v8
432; ZVFHMIN-NEXT:    lui a1, 1048560
433; ZVFHMIN-NEXT:    or a0, a0, a1
434; ZVFHMIN-NEXT:    fmv.w.x fa0, a0
435; ZVFHMIN-NEXT:    ret
436;
437; ZFMIN-LABEL: extractelt_nxv1f16_imm:
438; ZFMIN:       # %bb.0:
439; ZFMIN-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
440; ZFMIN-NEXT:    vslidedown.vi v8, v8, 2
441; ZFMIN-NEXT:    vmv.x.s a0, v8
442; ZFMIN-NEXT:    fmv.h.x fa0, a0
443; ZFMIN-NEXT:    ret
444  %r = extractelement <vscale x 1 x half> %v, i32 2
445  ret half %r
446}
447
448define half @extractelt_nxv1f16_idx(<vscale x 1 x half> %v, i32 zeroext %idx) {
449; ZVFH-LABEL: extractelt_nxv1f16_idx:
450; ZVFH:       # %bb.0:
451; ZVFH-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
452; ZVFH-NEXT:    vslidedown.vx v8, v8, a0
453; ZVFH-NEXT:    vfmv.f.s fa0, v8
454; ZVFH-NEXT:    ret
455;
456; ZVFHMIN-LABEL: extractelt_nxv1f16_idx:
457; ZVFHMIN:       # %bb.0:
458; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
459; ZVFHMIN-NEXT:    vslidedown.vx v8, v8, a0
460; ZVFHMIN-NEXT:    vmv.x.s a0, v8
461; ZVFHMIN-NEXT:    lui a1, 1048560
462; ZVFHMIN-NEXT:    or a0, a0, a1
463; ZVFHMIN-NEXT:    fmv.w.x fa0, a0
464; ZVFHMIN-NEXT:    ret
465;
466; ZFMIN-LABEL: extractelt_nxv1f16_idx:
467; ZFMIN:       # %bb.0:
468; ZFMIN-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
469; ZFMIN-NEXT:    vslidedown.vx v8, v8, a0
470; ZFMIN-NEXT:    vmv.x.s a0, v8
471; ZFMIN-NEXT:    fmv.h.x fa0, a0
472; ZFMIN-NEXT:    ret
473  %r = extractelement <vscale x 1 x half> %v, i32 %idx
474  ret half %r
475}
476
477define half @extractelt_nxv2f16_0(<vscale x 2 x half> %v) {
478; ZVFH-LABEL: extractelt_nxv2f16_0:
479; ZVFH:       # %bb.0:
480; ZVFH-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
481; ZVFH-NEXT:    vfmv.f.s fa0, v8
482; ZVFH-NEXT:    ret
483;
484; ZVFHMIN-LABEL: extractelt_nxv2f16_0:
485; ZVFHMIN:       # %bb.0:
486; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
487; ZVFHMIN-NEXT:    vmv.x.s a0, v8
488; ZVFHMIN-NEXT:    lui a1, 1048560
489; ZVFHMIN-NEXT:    or a0, a0, a1
490; ZVFHMIN-NEXT:    fmv.w.x fa0, a0
491; ZVFHMIN-NEXT:    ret
492;
493; ZFMIN-LABEL: extractelt_nxv2f16_0:
494; ZFMIN:       # %bb.0:
495; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
496; ZFMIN-NEXT:    vmv.x.s a0, v8
497; ZFMIN-NEXT:    fmv.h.x fa0, a0
498; ZFMIN-NEXT:    ret
499  %r = extractelement <vscale x 2 x half> %v, i32 0
500  ret half %r
501}
502
503define half @extractelt_nxv2f16_imm(<vscale x 2 x half> %v) {
504; ZVFH-LABEL: extractelt_nxv2f16_imm:
505; ZVFH:       # %bb.0:
506; ZVFH-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
507; ZVFH-NEXT:    vslidedown.vi v8, v8, 2
508; ZVFH-NEXT:    vfmv.f.s fa0, v8
509; ZVFH-NEXT:    ret
510;
511; ZVFHMIN-LABEL: extractelt_nxv2f16_imm:
512; ZVFHMIN:       # %bb.0:
513; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
514; ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
515; ZVFHMIN-NEXT:    vmv.x.s a0, v8
516; ZVFHMIN-NEXT:    lui a1, 1048560
517; ZVFHMIN-NEXT:    or a0, a0, a1
518; ZVFHMIN-NEXT:    fmv.w.x fa0, a0
519; ZVFHMIN-NEXT:    ret
520;
521; ZFMIN-LABEL: extractelt_nxv2f16_imm:
522; ZFMIN:       # %bb.0:
523; ZFMIN-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
524; ZFMIN-NEXT:    vslidedown.vi v8, v8, 2
525; ZFMIN-NEXT:    vmv.x.s a0, v8
526; ZFMIN-NEXT:    fmv.h.x fa0, a0
527; ZFMIN-NEXT:    ret
528  %r = extractelement <vscale x 2 x half> %v, i32 2
529  ret half %r
530}
531
532define half @extractelt_nxv2f16_idx(<vscale x 2 x half> %v, i32 zeroext %idx) {
533; ZVFH-LABEL: extractelt_nxv2f16_idx:
534; ZVFH:       # %bb.0:
535; ZVFH-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
536; ZVFH-NEXT:    vslidedown.vx v8, v8, a0
537; ZVFH-NEXT:    vfmv.f.s fa0, v8
538; ZVFH-NEXT:    ret
539;
540; ZVFHMIN-LABEL: extractelt_nxv2f16_idx:
541; ZVFHMIN:       # %bb.0:
542; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
543; ZVFHMIN-NEXT:    vslidedown.vx v8, v8, a0
544; ZVFHMIN-NEXT:    vmv.x.s a0, v8
545; ZVFHMIN-NEXT:    lui a1, 1048560
546; ZVFHMIN-NEXT:    or a0, a0, a1
547; ZVFHMIN-NEXT:    fmv.w.x fa0, a0
548; ZVFHMIN-NEXT:    ret
549;
550; ZFMIN-LABEL: extractelt_nxv2f16_idx:
551; ZFMIN:       # %bb.0:
552; ZFMIN-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
553; ZFMIN-NEXT:    vslidedown.vx v8, v8, a0
554; ZFMIN-NEXT:    vmv.x.s a0, v8
555; ZFMIN-NEXT:    fmv.h.x fa0, a0
556; ZFMIN-NEXT:    ret
557  %r = extractelement <vscale x 2 x half> %v, i32 %idx
558  ret half %r
559}
560
561define half @extractelt_nxv4f16_0(<vscale x 4 x half> %v) {
562; ZVFH-LABEL: extractelt_nxv4f16_0:
563; ZVFH:       # %bb.0:
564; ZVFH-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
565; ZVFH-NEXT:    vfmv.f.s fa0, v8
566; ZVFH-NEXT:    ret
567;
568; ZVFHMIN-LABEL: extractelt_nxv4f16_0:
569; ZVFHMIN:       # %bb.0:
570; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
571; ZVFHMIN-NEXT:    vmv.x.s a0, v8
572; ZVFHMIN-NEXT:    lui a1, 1048560
573; ZVFHMIN-NEXT:    or a0, a0, a1
574; ZVFHMIN-NEXT:    fmv.w.x fa0, a0
575; ZVFHMIN-NEXT:    ret
576;
577; ZFMIN-LABEL: extractelt_nxv4f16_0:
578; ZFMIN:       # %bb.0:
579; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
580; ZFMIN-NEXT:    vmv.x.s a0, v8
581; ZFMIN-NEXT:    fmv.h.x fa0, a0
582; ZFMIN-NEXT:    ret
583  %r = extractelement <vscale x 4 x half> %v, i32 0
584  ret half %r
585}
586
587define half @extractelt_nxv4f16_imm(<vscale x 4 x half> %v) {
588; ZVFH-LABEL: extractelt_nxv4f16_imm:
589; ZVFH:       # %bb.0:
590; ZVFH-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
591; ZVFH-NEXT:    vslidedown.vi v8, v8, 2
592; ZVFH-NEXT:    vfmv.f.s fa0, v8
593; ZVFH-NEXT:    ret
594;
595; ZVFHMIN-LABEL: extractelt_nxv4f16_imm:
596; ZVFHMIN:       # %bb.0:
597; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
598; ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
599; ZVFHMIN-NEXT:    vmv.x.s a0, v8
600; ZVFHMIN-NEXT:    lui a1, 1048560
601; ZVFHMIN-NEXT:    or a0, a0, a1
602; ZVFHMIN-NEXT:    fmv.w.x fa0, a0
603; ZVFHMIN-NEXT:    ret
604;
605; ZFMIN-LABEL: extractelt_nxv4f16_imm:
606; ZFMIN:       # %bb.0:
607; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
608; ZFMIN-NEXT:    vslidedown.vi v8, v8, 2
609; ZFMIN-NEXT:    vmv.x.s a0, v8
610; ZFMIN-NEXT:    fmv.h.x fa0, a0
611; ZFMIN-NEXT:    ret
612  %r = extractelement <vscale x 4 x half> %v, i32 2
613  ret half %r
614}
615
616define half @extractelt_nxv4f16_idx(<vscale x 4 x half> %v, i32 zeroext %idx) {
617; ZVFH-LABEL: extractelt_nxv4f16_idx:
618; ZVFH:       # %bb.0:
619; ZVFH-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
620; ZVFH-NEXT:    vslidedown.vx v8, v8, a0
621; ZVFH-NEXT:    vfmv.f.s fa0, v8
622; ZVFH-NEXT:    ret
623;
624; ZVFHMIN-LABEL: extractelt_nxv4f16_idx:
625; ZVFHMIN:       # %bb.0:
626; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
627; ZVFHMIN-NEXT:    vslidedown.vx v8, v8, a0
628; ZVFHMIN-NEXT:    vmv.x.s a0, v8
629; ZVFHMIN-NEXT:    lui a1, 1048560
630; ZVFHMIN-NEXT:    or a0, a0, a1
631; ZVFHMIN-NEXT:    fmv.w.x fa0, a0
632; ZVFHMIN-NEXT:    ret
633;
634; ZFMIN-LABEL: extractelt_nxv4f16_idx:
635; ZFMIN:       # %bb.0:
636; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
637; ZFMIN-NEXT:    vslidedown.vx v8, v8, a0
638; ZFMIN-NEXT:    vmv.x.s a0, v8
639; ZFMIN-NEXT:    fmv.h.x fa0, a0
640; ZFMIN-NEXT:    ret
641  %r = extractelement <vscale x 4 x half> %v, i32 %idx
642  ret half %r
643}
644
645define half @extractelt_nxv8f16_0(<vscale x 8 x half> %v) {
646; ZVFH-LABEL: extractelt_nxv8f16_0:
647; ZVFH:       # %bb.0:
648; ZVFH-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
649; ZVFH-NEXT:    vfmv.f.s fa0, v8
650; ZVFH-NEXT:    ret
651;
652; ZVFHMIN-LABEL: extractelt_nxv8f16_0:
653; ZVFHMIN:       # %bb.0:
654; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
655; ZVFHMIN-NEXT:    vmv.x.s a0, v8
656; ZVFHMIN-NEXT:    lui a1, 1048560
657; ZVFHMIN-NEXT:    or a0, a0, a1
658; ZVFHMIN-NEXT:    fmv.w.x fa0, a0
659; ZVFHMIN-NEXT:    ret
660;
661; ZFMIN-LABEL: extractelt_nxv8f16_0:
662; ZFMIN:       # %bb.0:
663; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
664; ZFMIN-NEXT:    vmv.x.s a0, v8
665; ZFMIN-NEXT:    fmv.h.x fa0, a0
666; ZFMIN-NEXT:    ret
667  %r = extractelement <vscale x 8 x half> %v, i32 0
668  ret half %r
669}
670
671define half @extractelt_nxv8f16_imm(<vscale x 8 x half> %v) {
672; ZVFH-LABEL: extractelt_nxv8f16_imm:
673; ZVFH:       # %bb.0:
674; ZVFH-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
675; ZVFH-NEXT:    vslidedown.vi v8, v8, 2
676; ZVFH-NEXT:    vfmv.f.s fa0, v8
677; ZVFH-NEXT:    ret
678;
679; ZVFHMIN-LABEL: extractelt_nxv8f16_imm:
680; ZVFHMIN:       # %bb.0:
681; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
682; ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
683; ZVFHMIN-NEXT:    vmv.x.s a0, v8
684; ZVFHMIN-NEXT:    lui a1, 1048560
685; ZVFHMIN-NEXT:    or a0, a0, a1
686; ZVFHMIN-NEXT:    fmv.w.x fa0, a0
687; ZVFHMIN-NEXT:    ret
688;
689; ZFMIN-LABEL: extractelt_nxv8f16_imm:
690; ZFMIN:       # %bb.0:
691; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
692; ZFMIN-NEXT:    vslidedown.vi v8, v8, 2
693; ZFMIN-NEXT:    vmv.x.s a0, v8
694; ZFMIN-NEXT:    fmv.h.x fa0, a0
695; ZFMIN-NEXT:    ret
696  %r = extractelement <vscale x 8 x half> %v, i32 2
697  ret half %r
698}
699
700define half @extractelt_nxv8f16_idx(<vscale x 8 x half> %v, i32 zeroext %idx) {
701; ZVFH-LABEL: extractelt_nxv8f16_idx:
702; ZVFH:       # %bb.0:
703; ZVFH-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
704; ZVFH-NEXT:    vslidedown.vx v8, v8, a0
705; ZVFH-NEXT:    vfmv.f.s fa0, v8
706; ZVFH-NEXT:    ret
707;
708; ZVFHMIN-LABEL: extractelt_nxv8f16_idx:
709; ZVFHMIN:       # %bb.0:
710; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
711; ZVFHMIN-NEXT:    vslidedown.vx v8, v8, a0
712; ZVFHMIN-NEXT:    vmv.x.s a0, v8
713; ZVFHMIN-NEXT:    lui a1, 1048560
714; ZVFHMIN-NEXT:    or a0, a0, a1
715; ZVFHMIN-NEXT:    fmv.w.x fa0, a0
716; ZVFHMIN-NEXT:    ret
717;
718; ZFMIN-LABEL: extractelt_nxv8f16_idx:
719; ZFMIN:       # %bb.0:
720; ZFMIN-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
721; ZFMIN-NEXT:    vslidedown.vx v8, v8, a0
722; ZFMIN-NEXT:    vmv.x.s a0, v8
723; ZFMIN-NEXT:    fmv.h.x fa0, a0
724; ZFMIN-NEXT:    ret
725  %r = extractelement <vscale x 8 x half> %v, i32 %idx
726  ret half %r
727}
728
729define half @extractelt_nxv16f16_0(<vscale x 16 x half> %v) {
730; ZVFH-LABEL: extractelt_nxv16f16_0:
731; ZVFH:       # %bb.0:
732; ZVFH-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
733; ZVFH-NEXT:    vfmv.f.s fa0, v8
734; ZVFH-NEXT:    ret
735;
736; ZVFHMIN-LABEL: extractelt_nxv16f16_0:
737; ZVFHMIN:       # %bb.0:
738; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
739; ZVFHMIN-NEXT:    vmv.x.s a0, v8
740; ZVFHMIN-NEXT:    lui a1, 1048560
741; ZVFHMIN-NEXT:    or a0, a0, a1
742; ZVFHMIN-NEXT:    fmv.w.x fa0, a0
743; ZVFHMIN-NEXT:    ret
744;
745; ZFMIN-LABEL: extractelt_nxv16f16_0:
746; ZFMIN:       # %bb.0:
747; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
748; ZFMIN-NEXT:    vmv.x.s a0, v8
749; ZFMIN-NEXT:    fmv.h.x fa0, a0
750; ZFMIN-NEXT:    ret
751  %r = extractelement <vscale x 16 x half> %v, i32 0
752  ret half %r
753}
754
755define half @extractelt_nxv16f16_imm(<vscale x 16 x half> %v) {
756; ZVFH-LABEL: extractelt_nxv16f16_imm:
757; ZVFH:       # %bb.0:
758; ZVFH-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
759; ZVFH-NEXT:    vslidedown.vi v8, v8, 2
760; ZVFH-NEXT:    vfmv.f.s fa0, v8
761; ZVFH-NEXT:    ret
762;
763; ZVFHMIN-LABEL: extractelt_nxv16f16_imm:
764; ZVFHMIN:       # %bb.0:
765; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
766; ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
767; ZVFHMIN-NEXT:    vmv.x.s a0, v8
768; ZVFHMIN-NEXT:    lui a1, 1048560
769; ZVFHMIN-NEXT:    or a0, a0, a1
770; ZVFHMIN-NEXT:    fmv.w.x fa0, a0
771; ZVFHMIN-NEXT:    ret
772;
773; ZFMIN-LABEL: extractelt_nxv16f16_imm:
774; ZFMIN:       # %bb.0:
775; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
776; ZFMIN-NEXT:    vslidedown.vi v8, v8, 2
777; ZFMIN-NEXT:    vmv.x.s a0, v8
778; ZFMIN-NEXT:    fmv.h.x fa0, a0
779; ZFMIN-NEXT:    ret
780  %r = extractelement <vscale x 16 x half> %v, i32 2
781  ret half %r
782}
783
784define half @extractelt_nxv16f16_idx(<vscale x 16 x half> %v, i32 zeroext %idx) {
785; ZVFH-LABEL: extractelt_nxv16f16_idx:
786; ZVFH:       # %bb.0:
787; ZVFH-NEXT:    vsetivli zero, 1, e16, m4, ta, ma
788; ZVFH-NEXT:    vslidedown.vx v8, v8, a0
789; ZVFH-NEXT:    vfmv.f.s fa0, v8
790; ZVFH-NEXT:    ret
791;
792; ZVFHMIN-LABEL: extractelt_nxv16f16_idx:
793; ZVFHMIN:       # %bb.0:
794; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m4, ta, ma
795; ZVFHMIN-NEXT:    vslidedown.vx v8, v8, a0
796; ZVFHMIN-NEXT:    vmv.x.s a0, v8
797; ZVFHMIN-NEXT:    lui a1, 1048560
798; ZVFHMIN-NEXT:    or a0, a0, a1
799; ZVFHMIN-NEXT:    fmv.w.x fa0, a0
800; ZVFHMIN-NEXT:    ret
801;
802; ZFMIN-LABEL: extractelt_nxv16f16_idx:
803; ZFMIN:       # %bb.0:
804; ZFMIN-NEXT:    vsetivli zero, 1, e16, m4, ta, ma
805; ZFMIN-NEXT:    vslidedown.vx v8, v8, a0
806; ZFMIN-NEXT:    vmv.x.s a0, v8
807; ZFMIN-NEXT:    fmv.h.x fa0, a0
808; ZFMIN-NEXT:    ret
809  %r = extractelement <vscale x 16 x half> %v, i32 %idx
810  ret half %r
811}
812
813define half @extractelt_nxv32f16_0(<vscale x 32 x half> %v) {
814; ZVFH-LABEL: extractelt_nxv32f16_0:
815; ZVFH:       # %bb.0:
816; ZVFH-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
817; ZVFH-NEXT:    vfmv.f.s fa0, v8
818; ZVFH-NEXT:    ret
819;
820; ZVFHMIN-LABEL: extractelt_nxv32f16_0:
821; ZVFHMIN:       # %bb.0:
822; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
823; ZVFHMIN-NEXT:    vmv.x.s a0, v8
824; ZVFHMIN-NEXT:    lui a1, 1048560
825; ZVFHMIN-NEXT:    or a0, a0, a1
826; ZVFHMIN-NEXT:    fmv.w.x fa0, a0
827; ZVFHMIN-NEXT:    ret
828;
829; ZFMIN-LABEL: extractelt_nxv32f16_0:
830; ZFMIN:       # %bb.0:
831; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
832; ZFMIN-NEXT:    vmv.x.s a0, v8
833; ZFMIN-NEXT:    fmv.h.x fa0, a0
834; ZFMIN-NEXT:    ret
835  %r = extractelement <vscale x 32 x half> %v, i32 0
836  ret half %r
837}
838
839define half @extractelt_nxv32f16_imm(<vscale x 32 x half> %v) {
840; ZVFH-LABEL: extractelt_nxv32f16_imm:
841; ZVFH:       # %bb.0:
842; ZVFH-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
843; ZVFH-NEXT:    vslidedown.vi v8, v8, 2
844; ZVFH-NEXT:    vfmv.f.s fa0, v8
845; ZVFH-NEXT:    ret
846;
847; ZVFHMIN-LABEL: extractelt_nxv32f16_imm:
848; ZVFHMIN:       # %bb.0:
849; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
850; ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
851; ZVFHMIN-NEXT:    vmv.x.s a0, v8
852; ZVFHMIN-NEXT:    lui a1, 1048560
853; ZVFHMIN-NEXT:    or a0, a0, a1
854; ZVFHMIN-NEXT:    fmv.w.x fa0, a0
855; ZVFHMIN-NEXT:    ret
856;
857; ZFMIN-LABEL: extractelt_nxv32f16_imm:
858; ZFMIN:       # %bb.0:
859; ZFMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
860; ZFMIN-NEXT:    vslidedown.vi v8, v8, 2
861; ZFMIN-NEXT:    vmv.x.s a0, v8
862; ZFMIN-NEXT:    fmv.h.x fa0, a0
863; ZFMIN-NEXT:    ret
864  %r = extractelement <vscale x 32 x half> %v, i32 2
865  ret half %r
866}
867
868define half @extractelt_nxv32f16_idx(<vscale x 32 x half> %v, i32 zeroext %idx) {
869; ZVFH-LABEL: extractelt_nxv32f16_idx:
870; ZVFH:       # %bb.0:
871; ZVFH-NEXT:    vsetivli zero, 1, e16, m8, ta, ma
872; ZVFH-NEXT:    vslidedown.vx v8, v8, a0
873; ZVFH-NEXT:    vfmv.f.s fa0, v8
874; ZVFH-NEXT:    ret
875;
876; ZVFHMIN-LABEL: extractelt_nxv32f16_idx:
877; ZVFHMIN:       # %bb.0:
878; ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m8, ta, ma
879; ZVFHMIN-NEXT:    vslidedown.vx v8, v8, a0
880; ZVFHMIN-NEXT:    vmv.x.s a0, v8
881; ZVFHMIN-NEXT:    lui a1, 1048560
882; ZVFHMIN-NEXT:    or a0, a0, a1
883; ZVFHMIN-NEXT:    fmv.w.x fa0, a0
884; ZVFHMIN-NEXT:    ret
885;
886; ZFMIN-LABEL: extractelt_nxv32f16_idx:
887; ZFMIN:       # %bb.0:
888; ZFMIN-NEXT:    vsetivli zero, 1, e16, m8, ta, ma
889; ZFMIN-NEXT:    vslidedown.vx v8, v8, a0
890; ZFMIN-NEXT:    vmv.x.s a0, v8
891; ZFMIN-NEXT:    fmv.h.x fa0, a0
892; ZFMIN-NEXT:    ret
893  %r = extractelement <vscale x 32 x half> %v, i32 %idx
894  ret half %r
895}
896
897define float @extractelt_nxv1f32_0(<vscale x 1 x float> %v) {
898; CHECK-LABEL: extractelt_nxv1f32_0:
899; CHECK:       # %bb.0:
900; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
901; CHECK-NEXT:    vfmv.f.s fa0, v8
902; CHECK-NEXT:    ret
903  %r = extractelement <vscale x 1 x float> %v, i32 0
904  ret float %r
905}
906
907define float @extractelt_nxv1f32_imm(<vscale x 1 x float> %v) {
908; CHECK-LABEL: extractelt_nxv1f32_imm:
909; CHECK:       # %bb.0:
910; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
911; CHECK-NEXT:    vslidedown.vi v8, v8, 2
912; CHECK-NEXT:    vfmv.f.s fa0, v8
913; CHECK-NEXT:    ret
914  %r = extractelement <vscale x 1 x float> %v, i32 2
915  ret float %r
916}
917
918define float @extractelt_nxv1f32_idx(<vscale x 1 x float> %v, i32 zeroext %idx) {
919; CHECK-LABEL: extractelt_nxv1f32_idx:
920; CHECK:       # %bb.0:
921; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
922; CHECK-NEXT:    vslidedown.vx v8, v8, a0
923; CHECK-NEXT:    vfmv.f.s fa0, v8
924; CHECK-NEXT:    ret
925  %r = extractelement <vscale x 1 x float> %v, i32 %idx
926  ret float %r
927}
928
929define float @extractelt_nxv2f32_0(<vscale x 2 x float> %v) {
930; CHECK-LABEL: extractelt_nxv2f32_0:
931; CHECK:       # %bb.0:
932; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
933; CHECK-NEXT:    vfmv.f.s fa0, v8
934; CHECK-NEXT:    ret
935  %r = extractelement <vscale x 2 x float> %v, i32 0
936  ret float %r
937}
938
939define float @extractelt_nxv2f32_imm(<vscale x 2 x float> %v) {
940; CHECK-LABEL: extractelt_nxv2f32_imm:
941; CHECK:       # %bb.0:
942; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
943; CHECK-NEXT:    vslidedown.vi v8, v8, 2
944; CHECK-NEXT:    vfmv.f.s fa0, v8
945; CHECK-NEXT:    ret
946  %r = extractelement <vscale x 2 x float> %v, i32 2
947  ret float %r
948}
949
950define float @extractelt_nxv2f32_idx(<vscale x 2 x float> %v, i32 zeroext %idx) {
951; CHECK-LABEL: extractelt_nxv2f32_idx:
952; CHECK:       # %bb.0:
953; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
954; CHECK-NEXT:    vslidedown.vx v8, v8, a0
955; CHECK-NEXT:    vfmv.f.s fa0, v8
956; CHECK-NEXT:    ret
957  %r = extractelement <vscale x 2 x float> %v, i32 %idx
958  ret float %r
959}
960
961define float @extractelt_nxv4f32_0(<vscale x 4 x float> %v) {
962; CHECK-LABEL: extractelt_nxv4f32_0:
963; CHECK:       # %bb.0:
964; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
965; CHECK-NEXT:    vfmv.f.s fa0, v8
966; CHECK-NEXT:    ret
967  %r = extractelement <vscale x 4 x float> %v, i32 0
968  ret float %r
969}
970
971define float @extractelt_nxv4f32_imm(<vscale x 4 x float> %v) {
972; CHECK-LABEL: extractelt_nxv4f32_imm:
973; CHECK:       # %bb.0:
974; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
975; CHECK-NEXT:    vslidedown.vi v8, v8, 2
976; CHECK-NEXT:    vfmv.f.s fa0, v8
977; CHECK-NEXT:    ret
978  %r = extractelement <vscale x 4 x float> %v, i32 2
979  ret float %r
980}
981
982define float @extractelt_nxv4f32_idx(<vscale x 4 x float> %v, i32 zeroext %idx) {
983; CHECK-LABEL: extractelt_nxv4f32_idx:
984; CHECK:       # %bb.0:
985; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
986; CHECK-NEXT:    vslidedown.vx v8, v8, a0
987; CHECK-NEXT:    vfmv.f.s fa0, v8
988; CHECK-NEXT:    ret
989  %r = extractelement <vscale x 4 x float> %v, i32 %idx
990  ret float %r
991}
992
993define float @extractelt_nxv8f32_0(<vscale x 8 x float> %v) {
994; CHECK-LABEL: extractelt_nxv8f32_0:
995; CHECK:       # %bb.0:
996; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
997; CHECK-NEXT:    vfmv.f.s fa0, v8
998; CHECK-NEXT:    ret
999  %r = extractelement <vscale x 8 x float> %v, i32 0
1000  ret float %r
1001}
1002
1003define float @extractelt_nxv8f32_imm(<vscale x 8 x float> %v) {
1004; CHECK-LABEL: extractelt_nxv8f32_imm:
1005; CHECK:       # %bb.0:
1006; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
1007; CHECK-NEXT:    vslidedown.vi v8, v8, 2
1008; CHECK-NEXT:    vfmv.f.s fa0, v8
1009; CHECK-NEXT:    ret
1010  %r = extractelement <vscale x 8 x float> %v, i32 2
1011  ret float %r
1012}
1013
1014define float @extractelt_nxv8f32_idx(<vscale x 8 x float> %v, i32 zeroext %idx) {
1015; CHECK-LABEL: extractelt_nxv8f32_idx:
1016; CHECK:       # %bb.0:
1017; CHECK-NEXT:    vsetivli zero, 1, e32, m4, ta, ma
1018; CHECK-NEXT:    vslidedown.vx v8, v8, a0
1019; CHECK-NEXT:    vfmv.f.s fa0, v8
1020; CHECK-NEXT:    ret
1021  %r = extractelement <vscale x 8 x float> %v, i32 %idx
1022  ret float %r
1023}
1024
1025define float @extractelt_nxv16f32_0(<vscale x 16 x float> %v) {
1026; CHECK-LABEL: extractelt_nxv16f32_0:
1027; CHECK:       # %bb.0:
1028; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
1029; CHECK-NEXT:    vfmv.f.s fa0, v8
1030; CHECK-NEXT:    ret
1031  %r = extractelement <vscale x 16 x float> %v, i32 0
1032  ret float %r
1033}
1034
1035define float @extractelt_nxv16f32_imm(<vscale x 16 x float> %v) {
1036; CHECK-LABEL: extractelt_nxv16f32_imm:
1037; CHECK:       # %bb.0:
1038; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
1039; CHECK-NEXT:    vslidedown.vi v8, v8, 2
1040; CHECK-NEXT:    vfmv.f.s fa0, v8
1041; CHECK-NEXT:    ret
1042  %r = extractelement <vscale x 16 x float> %v, i32 2
1043  ret float %r
1044}
1045
1046define float @extractelt_nxv16f32_idx(<vscale x 16 x float> %v, i32 zeroext %idx) {
1047; CHECK-LABEL: extractelt_nxv16f32_idx:
1048; CHECK:       # %bb.0:
1049; CHECK-NEXT:    vsetivli zero, 1, e32, m8, ta, ma
1050; CHECK-NEXT:    vslidedown.vx v8, v8, a0
1051; CHECK-NEXT:    vfmv.f.s fa0, v8
1052; CHECK-NEXT:    ret
1053  %r = extractelement <vscale x 16 x float> %v, i32 %idx
1054  ret float %r
1055}
1056
1057define double @extractelt_nxv1f64_0(<vscale x 1 x double> %v) {
1058; CHECK-LABEL: extractelt_nxv1f64_0:
1059; CHECK:       # %bb.0:
1060; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
1061; CHECK-NEXT:    vfmv.f.s fa0, v8
1062; CHECK-NEXT:    ret
1063  %r = extractelement <vscale x 1 x double> %v, i32 0
1064  ret double %r
1065}
1066
1067define double @extractelt_nxv1f64_imm(<vscale x 1 x double> %v) {
1068; CHECK-LABEL: extractelt_nxv1f64_imm:
1069; CHECK:       # %bb.0:
1070; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
1071; CHECK-NEXT:    vslidedown.vi v8, v8, 2
1072; CHECK-NEXT:    vfmv.f.s fa0, v8
1073; CHECK-NEXT:    ret
1074  %r = extractelement <vscale x 1 x double> %v, i32 2
1075  ret double %r
1076}
1077
1078define double @extractelt_nxv1f64_idx(<vscale x 1 x double> %v, i32 zeroext %idx) {
1079; CHECK-LABEL: extractelt_nxv1f64_idx:
1080; CHECK:       # %bb.0:
1081; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
1082; CHECK-NEXT:    vslidedown.vx v8, v8, a0
1083; CHECK-NEXT:    vfmv.f.s fa0, v8
1084; CHECK-NEXT:    ret
1085  %r = extractelement <vscale x 1 x double> %v, i32 %idx
1086  ret double %r
1087}
1088
1089define double @extractelt_nxv2f64_0(<vscale x 2 x double> %v) {
1090; CHECK-LABEL: extractelt_nxv2f64_0:
1091; CHECK:       # %bb.0:
1092; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
1093; CHECK-NEXT:    vfmv.f.s fa0, v8
1094; CHECK-NEXT:    ret
1095  %r = extractelement <vscale x 2 x double> %v, i32 0
1096  ret double %r
1097}
1098
1099define double @extractelt_nxv2f64_imm(<vscale x 2 x double> %v) {
1100; CHECK-LABEL: extractelt_nxv2f64_imm:
1101; CHECK:       # %bb.0:
1102; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
1103; CHECK-NEXT:    vslidedown.vi v8, v8, 2
1104; CHECK-NEXT:    vfmv.f.s fa0, v8
1105; CHECK-NEXT:    ret
1106  %r = extractelement <vscale x 2 x double> %v, i32 2
1107  ret double %r
1108}
1109
1110define double @extractelt_nxv2f64_idx(<vscale x 2 x double> %v, i32 zeroext %idx) {
1111; CHECK-LABEL: extractelt_nxv2f64_idx:
1112; CHECK:       # %bb.0:
1113; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
1114; CHECK-NEXT:    vslidedown.vx v8, v8, a0
1115; CHECK-NEXT:    vfmv.f.s fa0, v8
1116; CHECK-NEXT:    ret
1117  %r = extractelement <vscale x 2 x double> %v, i32 %idx
1118  ret double %r
1119}
1120
1121define double @extractelt_nxv4f64_0(<vscale x 4 x double> %v) {
1122; CHECK-LABEL: extractelt_nxv4f64_0:
1123; CHECK:       # %bb.0:
1124; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
1125; CHECK-NEXT:    vfmv.f.s fa0, v8
1126; CHECK-NEXT:    ret
1127  %r = extractelement <vscale x 4 x double> %v, i32 0
1128  ret double %r
1129}
1130
1131define double @extractelt_nxv4f64_imm(<vscale x 4 x double> %v) {
1132; CHECK-LABEL: extractelt_nxv4f64_imm:
1133; CHECK:       # %bb.0:
1134; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
1135; CHECK-NEXT:    vslidedown.vi v8, v8, 2
1136; CHECK-NEXT:    vfmv.f.s fa0, v8
1137; CHECK-NEXT:    ret
1138  %r = extractelement <vscale x 4 x double> %v, i32 2
1139  ret double %r
1140}
1141
1142define double @extractelt_nxv4f64_idx(<vscale x 4 x double> %v, i32 zeroext %idx) {
1143; CHECK-LABEL: extractelt_nxv4f64_idx:
1144; CHECK:       # %bb.0:
1145; CHECK-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
1146; CHECK-NEXT:    vslidedown.vx v8, v8, a0
1147; CHECK-NEXT:    vfmv.f.s fa0, v8
1148; CHECK-NEXT:    ret
1149  %r = extractelement <vscale x 4 x double> %v, i32 %idx
1150  ret double %r
1151}
1152
1153define double @extractelt_nxv8f64_0(<vscale x 8 x double> %v) {
1154; CHECK-LABEL: extractelt_nxv8f64_0:
1155; CHECK:       # %bb.0:
1156; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
1157; CHECK-NEXT:    vfmv.f.s fa0, v8
1158; CHECK-NEXT:    ret
1159  %r = extractelement <vscale x 8 x double> %v, i32 0
1160  ret double %r
1161}
1162
1163define double @extractelt_nxv8f64_imm(<vscale x 8 x double> %v) {
1164; CHECK-LABEL: extractelt_nxv8f64_imm:
1165; CHECK:       # %bb.0:
1166; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
1167; CHECK-NEXT:    vslidedown.vi v8, v8, 2
1168; CHECK-NEXT:    vfmv.f.s fa0, v8
1169; CHECK-NEXT:    ret
1170  %r = extractelement <vscale x 8 x double> %v, i32 2
1171  ret double %r
1172}
1173
1174define double @extractelt_nxv8f64_idx(<vscale x 8 x double> %v, i32 zeroext %idx) {
1175; CHECK-LABEL: extractelt_nxv8f64_idx:
1176; CHECK:       # %bb.0:
1177; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, ma
1178; CHECK-NEXT:    vslidedown.vx v8, v8, a0
1179; CHECK-NEXT:    vfmv.f.s fa0, v8
1180; CHECK-NEXT:    ret
1181  %r = extractelement <vscale x 8 x double> %v, i32 %idx
1182  ret double %r
1183}
1184
1185define void @store_extractelt_nxv8f64(ptr %x, ptr %p) {
1186; CHECK-LABEL: store_extractelt_nxv8f64:
1187; CHECK:       # %bb.0:
1188; CHECK-NEXT:    vl8re64.v v8, (a0)
1189; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
1190; CHECK-NEXT:    vslidedown.vi v8, v8, 1
1191; CHECK-NEXT:    vse64.v v8, (a1)
1192; CHECK-NEXT:    ret
1193  %a = load <vscale x 8 x double>, ptr %x
1194  %b = extractelement <vscale x 8 x double> %a, i64 1
1195  store double %b, ptr %p
1196  ret void
1197}
1198
1199define void @store_vfmv_f_s_nxv8f64(ptr %x, ptr %p) {
1200; CHECK-LABEL: store_vfmv_f_s_nxv8f64:
1201; CHECK:       # %bb.0:
1202; CHECK-NEXT:    vl8re64.v v8, (a0)
1203; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
1204; CHECK-NEXT:    vse64.v v8, (a1)
1205; CHECK-NEXT:    ret
1206  %a = load <vscale x 8 x double>, ptr %x
1207  %b = call double @llvm.riscv.vfmv.f.s.nxv8f64(<vscale x 8 x double> %a)
1208  store double %b, ptr %p
1209  ret void
1210}
1211
1212declare double @llvm.riscv.vfmv.f.s.nxv8f64(<vscale x 8 x double>)
1213
1214define float @extractelt_fadd_nxv4f32_splat(<vscale x 4 x float> %x) {
1215; CHECK-LABEL: extractelt_fadd_nxv4f32_splat:
1216; CHECK:       # %bb.0:
1217; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
1218; CHECK-NEXT:    vslidedown.vi v8, v8, 2
1219; CHECK-NEXT:    lui a0, 263168
1220; CHECK-NEXT:    vfmv.f.s fa5, v8
1221; CHECK-NEXT:    fmv.w.x fa4, a0
1222; CHECK-NEXT:    fadd.s fa0, fa5, fa4
1223; CHECK-NEXT:    ret
1224  %bo = fadd <vscale x 4 x float> %x, splat (float 3.0)
1225  %ext = extractelement <vscale x 4 x float> %bo, i32 2
1226  ret float %ext
1227}
1228
1229define float @extractelt_fsub_nxv4f32_splat(<vscale x 4 x float> %x) {
1230; CHECK-LABEL: extractelt_fsub_nxv4f32_splat:
1231; CHECK:       # %bb.0:
1232; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
1233; CHECK-NEXT:    vslidedown.vi v8, v8, 1
1234; CHECK-NEXT:    lui a0, 263168
1235; CHECK-NEXT:    vfmv.f.s fa5, v8
1236; CHECK-NEXT:    fmv.w.x fa4, a0
1237; CHECK-NEXT:    fsub.s fa0, fa4, fa5
1238; CHECK-NEXT:    ret
1239  %bo = fsub <vscale x 4 x float> splat (float 3.0), %x
1240  %ext = extractelement <vscale x 4 x float> %bo, i32 1
1241  ret float %ext
1242}
1243
1244define float @extractelt_fmul_nxv4f32_splat(<vscale x 4 x float> %x) {
1245; CHECK-LABEL: extractelt_fmul_nxv4f32_splat:
1246; CHECK:       # %bb.0:
1247; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
1248; CHECK-NEXT:    vslidedown.vi v8, v8, 3
1249; CHECK-NEXT:    lui a0, 263168
1250; CHECK-NEXT:    vfmv.f.s fa5, v8
1251; CHECK-NEXT:    fmv.w.x fa4, a0
1252; CHECK-NEXT:    fmul.s fa0, fa5, fa4
1253; CHECK-NEXT:    ret
1254  %bo = fmul <vscale x 4 x float> %x, splat (float 3.0)
1255  %ext = extractelement <vscale x 4 x float> %bo, i32 3
1256  ret float %ext
1257}
1258
1259define float @extractelt_fdiv_nxv4f32_splat(<vscale x 4 x float> %x) {
1260; CHECK-LABEL: extractelt_fdiv_nxv4f32_splat:
1261; CHECK:       # %bb.0:
1262; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
1263; CHECK-NEXT:    vfmv.f.s fa5, v8
1264; CHECK-NEXT:    lui a0, 263168
1265; CHECK-NEXT:    fmv.w.x fa4, a0
1266; CHECK-NEXT:    fdiv.s fa0, fa5, fa4
1267; CHECK-NEXT:    ret
1268  %bo = fdiv <vscale x 4 x float> %x, splat (float 3.0)
1269  %ext = extractelement <vscale x 4 x float> %bo, i32 0
1270  ret float %ext
1271}
1272
1273define double @extractelt_nxv16f64_0(<vscale x 16 x double> %v) {
1274; CHECK-LABEL: extractelt_nxv16f64_0:
1275; CHECK:       # %bb.0:
1276; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
1277; CHECK-NEXT:    vfmv.f.s fa0, v8
1278; CHECK-NEXT:    ret
1279  %r = extractelement <vscale x 16 x double> %v, i32 0
1280  ret double %r
1281}
1282
1283define double @extractelt_nxv16f64_neg1(<vscale x 16 x double> %v) {
1284; RV32-LABEL: extractelt_nxv16f64_neg1:
1285; RV32:       # %bb.0:
1286; RV32-NEXT:    addi sp, sp, -80
1287; RV32-NEXT:    .cfi_def_cfa_offset 80
1288; RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
1289; RV32-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
1290; RV32-NEXT:    .cfi_offset ra, -4
1291; RV32-NEXT:    .cfi_offset s0, -8
1292; RV32-NEXT:    addi s0, sp, 80
1293; RV32-NEXT:    .cfi_def_cfa s0, 0
1294; RV32-NEXT:    csrr a0, vlenb
1295; RV32-NEXT:    slli a0, a0, 4
1296; RV32-NEXT:    sub sp, sp, a0
1297; RV32-NEXT:    andi sp, sp, -64
1298; RV32-NEXT:    addi a0, sp, 64
1299; RV32-NEXT:    csrr a1, vlenb
1300; RV32-NEXT:    vs8r.v v8, (a0)
1301; RV32-NEXT:    slli a2, a1, 3
1302; RV32-NEXT:    slli a1, a1, 4
1303; RV32-NEXT:    add a2, a0, a2
1304; RV32-NEXT:    vs8r.v v16, (a2)
1305; RV32-NEXT:    add a0, a1, a0
1306; RV32-NEXT:    fld fa0, -8(a0)
1307; RV32-NEXT:    addi sp, s0, -80
1308; RV32-NEXT:    .cfi_def_cfa sp, 80
1309; RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
1310; RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
1311; RV32-NEXT:    .cfi_restore ra
1312; RV32-NEXT:    .cfi_restore s0
1313; RV32-NEXT:    addi sp, sp, 80
1314; RV32-NEXT:    .cfi_def_cfa_offset 0
1315; RV32-NEXT:    ret
1316;
1317; RV64-LABEL: extractelt_nxv16f64_neg1:
1318; RV64:       # %bb.0:
1319; RV64-NEXT:    addi sp, sp, -80
1320; RV64-NEXT:    .cfi_def_cfa_offset 80
1321; RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
1322; RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
1323; RV64-NEXT:    .cfi_offset ra, -8
1324; RV64-NEXT:    .cfi_offset s0, -16
1325; RV64-NEXT:    addi s0, sp, 80
1326; RV64-NEXT:    .cfi_def_cfa s0, 0
1327; RV64-NEXT:    csrr a0, vlenb
1328; RV64-NEXT:    slli a0, a0, 4
1329; RV64-NEXT:    sub sp, sp, a0
1330; RV64-NEXT:    andi sp, sp, -64
1331; RV64-NEXT:    addi a0, sp, 64
1332; RV64-NEXT:    csrr a2, vlenb
1333; RV64-NEXT:    li a1, -1
1334; RV64-NEXT:    vs8r.v v8, (a0)
1335; RV64-NEXT:    slli a3, a2, 3
1336; RV64-NEXT:    srli a1, a1, 32
1337; RV64-NEXT:    slli a2, a2, 1
1338; RV64-NEXT:    add a3, a0, a3
1339; RV64-NEXT:    addi a2, a2, -1
1340; RV64-NEXT:    vs8r.v v16, (a3)
1341; RV64-NEXT:    bltu a2, a1, .LBB70_2
1342; RV64-NEXT:  # %bb.1:
1343; RV64-NEXT:    mv a2, a1
1344; RV64-NEXT:  .LBB70_2:
1345; RV64-NEXT:    slli a2, a2, 3
1346; RV64-NEXT:    add a0, a0, a2
1347; RV64-NEXT:    fld fa0, 0(a0)
1348; RV64-NEXT:    addi sp, s0, -80
1349; RV64-NEXT:    .cfi_def_cfa sp, 80
1350; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
1351; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
1352; RV64-NEXT:    .cfi_restore ra
1353; RV64-NEXT:    .cfi_restore s0
1354; RV64-NEXT:    addi sp, sp, 80
1355; RV64-NEXT:    .cfi_def_cfa_offset 0
1356; RV64-NEXT:    ret
1357  %r = extractelement <vscale x 16 x double> %v, i32 -1
1358  ret double %r
1359}
1360
1361define double @extractelt_nxv16f64_imm(<vscale x 16 x double> %v) {
1362; CHECK-LABEL: extractelt_nxv16f64_imm:
1363; CHECK:       # %bb.0:
1364; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
1365; CHECK-NEXT:    vslidedown.vi v8, v8, 2
1366; CHECK-NEXT:    vfmv.f.s fa0, v8
1367; CHECK-NEXT:    ret
1368  %r = extractelement <vscale x 16 x double> %v, i32 2
1369  ret double %r
1370}
1371
1372define double @extractelt_nxv16f64_idx(<vscale x 16 x double> %v, i32 zeroext %idx) {
1373; RV32-LABEL: extractelt_nxv16f64_idx:
1374; RV32:       # %bb.0:
1375; RV32-NEXT:    csrr a1, vlenb
1376; RV32-NEXT:    slli a2, a1, 1
1377; RV32-NEXT:    addi a2, a2, -1
1378; RV32-NEXT:    bltu a0, a2, .LBB72_2
1379; RV32-NEXT:  # %bb.1:
1380; RV32-NEXT:    mv a0, a2
1381; RV32-NEXT:  .LBB72_2:
1382; RV32-NEXT:    addi sp, sp, -80
1383; RV32-NEXT:    .cfi_def_cfa_offset 80
1384; RV32-NEXT:    sw ra, 76(sp) # 4-byte Folded Spill
1385; RV32-NEXT:    sw s0, 72(sp) # 4-byte Folded Spill
1386; RV32-NEXT:    .cfi_offset ra, -4
1387; RV32-NEXT:    .cfi_offset s0, -8
1388; RV32-NEXT:    addi s0, sp, 80
1389; RV32-NEXT:    .cfi_def_cfa s0, 0
1390; RV32-NEXT:    csrr a2, vlenb
1391; RV32-NEXT:    slli a2, a2, 4
1392; RV32-NEXT:    sub sp, sp, a2
1393; RV32-NEXT:    andi sp, sp, -64
1394; RV32-NEXT:    slli a0, a0, 3
1395; RV32-NEXT:    addi a2, sp, 64
1396; RV32-NEXT:    slli a1, a1, 3
1397; RV32-NEXT:    add a0, a2, a0
1398; RV32-NEXT:    vs8r.v v8, (a2)
1399; RV32-NEXT:    add a1, a2, a1
1400; RV32-NEXT:    vs8r.v v16, (a1)
1401; RV32-NEXT:    fld fa0, 0(a0)
1402; RV32-NEXT:    addi sp, s0, -80
1403; RV32-NEXT:    .cfi_def_cfa sp, 80
1404; RV32-NEXT:    lw ra, 76(sp) # 4-byte Folded Reload
1405; RV32-NEXT:    lw s0, 72(sp) # 4-byte Folded Reload
1406; RV32-NEXT:    .cfi_restore ra
1407; RV32-NEXT:    .cfi_restore s0
1408; RV32-NEXT:    addi sp, sp, 80
1409; RV32-NEXT:    .cfi_def_cfa_offset 0
1410; RV32-NEXT:    ret
1411;
1412; RV64-LABEL: extractelt_nxv16f64_idx:
1413; RV64:       # %bb.0:
1414; RV64-NEXT:    csrr a1, vlenb
1415; RV64-NEXT:    slli a2, a1, 1
1416; RV64-NEXT:    addi a2, a2, -1
1417; RV64-NEXT:    bltu a0, a2, .LBB72_2
1418; RV64-NEXT:  # %bb.1:
1419; RV64-NEXT:    mv a0, a2
1420; RV64-NEXT:  .LBB72_2:
1421; RV64-NEXT:    addi sp, sp, -80
1422; RV64-NEXT:    .cfi_def_cfa_offset 80
1423; RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
1424; RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
1425; RV64-NEXT:    .cfi_offset ra, -8
1426; RV64-NEXT:    .cfi_offset s0, -16
1427; RV64-NEXT:    addi s0, sp, 80
1428; RV64-NEXT:    .cfi_def_cfa s0, 0
1429; RV64-NEXT:    csrr a2, vlenb
1430; RV64-NEXT:    slli a2, a2, 4
1431; RV64-NEXT:    sub sp, sp, a2
1432; RV64-NEXT:    andi sp, sp, -64
1433; RV64-NEXT:    slli a0, a0, 3
1434; RV64-NEXT:    addi a2, sp, 64
1435; RV64-NEXT:    slli a1, a1, 3
1436; RV64-NEXT:    add a0, a2, a0
1437; RV64-NEXT:    vs8r.v v8, (a2)
1438; RV64-NEXT:    add a1, a2, a1
1439; RV64-NEXT:    vs8r.v v16, (a1)
1440; RV64-NEXT:    fld fa0, 0(a0)
1441; RV64-NEXT:    addi sp, s0, -80
1442; RV64-NEXT:    .cfi_def_cfa sp, 80
1443; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
1444; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
1445; RV64-NEXT:    .cfi_restore ra
1446; RV64-NEXT:    .cfi_restore s0
1447; RV64-NEXT:    addi sp, sp, 80
1448; RV64-NEXT:    .cfi_def_cfa_offset 0
1449; RV64-NEXT:    ret
1450  %r = extractelement <vscale x 16 x double> %v, i32 %idx
1451  ret double %r
1452}
1453