xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll (revision c2bd5c25b3634e55089d34afe922aa38eee743e2)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve,+bf16 < %s | FileCheck %s --check-prefixes=CHECK
3
4; Should codegen to a nop, since idx is zero.
5define <2 x i64> @extract_v2i64_nxv2i64(<vscale x 2 x i64> %vec) nounwind {
6; CHECK-LABEL: extract_v2i64_nxv2i64:
7; CHECK:       // %bb.0:
8; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
9; CHECK-NEXT:    ret
10  %retval = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> %vec, i64 0)
11  ret <2 x i64> %retval
12}
13
14; Goes through memory currently; idx != 0.
15define <2 x i64> @extract_v2i64_nxv2i64_idx2(<vscale x 2 x i64> %vec) nounwind {
16; CHECK-LABEL: extract_v2i64_nxv2i64_idx2:
17; CHECK:       // %bb.0:
18; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #16
19; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
20; CHECK-NEXT:    ret
21  %retval = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> %vec, i64 2)
22  ret <2 x i64> %retval
23}
24
25; Should codegen to a nop, since idx is zero.
26define <4 x i32> @extract_v4i32_nxv4i32(<vscale x 4 x i32> %vec) nounwind {
27; CHECK-LABEL: extract_v4i32_nxv4i32:
28; CHECK:       // %bb.0:
29; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
30; CHECK-NEXT:    ret
31  %retval = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> %vec, i64 0)
32  ret <4 x i32> %retval
33}
34
35; Goes through memory currently; idx != 0.
36define <4 x i32> @extract_v4i32_nxv4i32_idx4(<vscale x 4 x i32> %vec) nounwind {
37; CHECK-LABEL: extract_v4i32_nxv4i32_idx4:
38; CHECK:       // %bb.0:
39; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #16
40; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
41; CHECK-NEXT:    ret
42  %retval = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> %vec, i64 4)
43  ret <4 x i32> %retval
44}
45
46; Should codegen to uzps, since idx is zero and type is illegal.
47define <4 x i32> @extract_v4i32_nxv2i32(<vscale x 2 x i32> %vec) nounwind #1 {
48; CHECK-LABEL: extract_v4i32_nxv2i32:
49; CHECK:       // %bb.0:
50; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
51; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
52; CHECK-NEXT:    ret
53  %retval = call <4 x i32> @llvm.vector.extract.v4i32.nxv2i32(<vscale x 2 x i32> %vec, i64 0)
54  ret <4 x i32> %retval
55}
56
57; Goes through memory currently; idx != 0.
58define <4 x i32> @extract_v4i32_nxv2i32_idx4(<vscale x 2 x i32> %vec) nounwind #1 {
59; CHECK-LABEL: extract_v4i32_nxv2i32_idx4:
60; CHECK:       // %bb.0:
61; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #32
62; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
63; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
64; CHECK-NEXT:    ret
65  %retval = call <4 x i32> @llvm.vector.extract.v4i32.nxv2i32(<vscale x 2 x i32> %vec, i64 4)
66  ret <4 x i32> %retval
67}
68
69; Should codegen to a nop, since idx is zero.
70define <8 x i16> @extract_v8i16_nxv8i16(<vscale x 8 x i16> %vec) nounwind {
71; CHECK-LABEL: extract_v8i16_nxv8i16:
72; CHECK:       // %bb.0:
73; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
74; CHECK-NEXT:    ret
75  %retval = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> %vec, i64 0)
76  ret <8 x i16> %retval
77}
78
79; Goes through memory currently; idx != 0.
80define <8 x i16> @extract_v8i16_nxv8i16_idx8(<vscale x 8 x i16> %vec) nounwind {
81; CHECK-LABEL: extract_v8i16_nxv8i16_idx8:
82; CHECK:       // %bb.0:
83; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #16
84; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
85; CHECK-NEXT:    ret
86  %retval = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> %vec, i64 8)
87  ret <8 x i16> %retval
88}
89
90; Should codegen to uzps, since idx is zero and type is illegal.
91define <8 x i16> @extract_v8i16_nxv4i16(<vscale x 4 x i16> %vec) nounwind #1 {
92; CHECK-LABEL: extract_v8i16_nxv4i16:
93; CHECK:       // %bb.0:
94; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
95; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
96; CHECK-NEXT:    ret
97  %retval = call <8 x i16> @llvm.vector.extract.v8i16.nxv4i16(<vscale x 4 x i16> %vec, i64 0)
98  ret <8 x i16> %retval
99}
100
101; Goes through memory currently; idx != 0.
102define <8 x i16> @extract_v8i16_nxv4i16_idx8(<vscale x 4 x i16> %vec) nounwind #1 {
103; CHECK-LABEL: extract_v8i16_nxv4i16_idx8:
104; CHECK:       // %bb.0:
105; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #32
106; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
107; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
108; CHECK-NEXT:    ret
109  %retval = call <8 x i16> @llvm.vector.extract.v8i16.nxv4i16(<vscale x 4 x i16> %vec, i64 8)
110  ret <8 x i16> %retval
111}
112
113; Should codegen to uzps, since idx is zero and type is illegal.
114define <8 x i16> @extract_v8i16_nxv2i16(<vscale x 2 x i16> %vec) nounwind #1 {
115; CHECK-LABEL: extract_v8i16_nxv2i16:
116; CHECK:       // %bb.0:
117; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
118; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
119; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
120; CHECK-NEXT:    ret
121  %retval = call <8 x i16> @llvm.vector.extract.v8i16.nxv2i16(<vscale x 2 x i16> %vec, i64 0)
122  ret <8 x i16> %retval
123}
124
125; Goes through memory currently; idx != 0.
126define <8 x i16> @extract_v8i16_nxv2i16_idx8(<vscale x 2 x i16> %vec) nounwind #1 {
127; CHECK-LABEL: extract_v8i16_nxv2i16_idx8:
128; CHECK:       // %bb.0:
129; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #64
130; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
131; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
132; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
133; CHECK-NEXT:    ret
134  %retval = call <8 x i16> @llvm.vector.extract.v8i16.nxv2i16(<vscale x 2 x i16> %vec, i64 8)
135  ret <8 x i16> %retval
136}
137
138; Should codegen to a nop, since idx is zero.
139define <16 x i8> @extract_v16i8_nxv16i8(<vscale x 16 x i8> %vec) nounwind {
140; CHECK-LABEL: extract_v16i8_nxv16i8:
141; CHECK:       // %bb.0:
142; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
143; CHECK-NEXT:    ret
144  %retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> %vec, i64 0)
145  ret <16 x i8> %retval
146}
147
148; Goes through memory currently; idx != 0.
149define <16 x i8> @extract_v16i8_nxv16i8_idx16(<vscale x 16 x i8> %vec) nounwind {
150; CHECK-LABEL: extract_v16i8_nxv16i8_idx16:
151; CHECK:       // %bb.0:
152; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #16
153; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
154; CHECK-NEXT:    ret
155  %retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> %vec, i64 16)
156  ret <16 x i8> %retval
157}
158
159; Should codegen to uzps, since idx is zero and type is illegal.
160define <16 x i8> @extract_v16i8_nxv8i8(<vscale x 8 x i8> %vec) nounwind #1 {
161; CHECK-LABEL: extract_v16i8_nxv8i8:
162; CHECK:       // %bb.0:
163; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
164; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
165; CHECK-NEXT:    ret
166  %retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %vec, i64 0)
167  ret <16 x i8> %retval
168}
169
170; Goes through memory currently; idx != 0.
171define <16 x i8> @extract_v16i8_nxv8i8_idx16(<vscale x 8 x i8> %vec) nounwind #1 {
172; CHECK-LABEL: extract_v16i8_nxv8i8_idx16:
173; CHECK:       // %bb.0:
174; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #32
175; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
176; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
177; CHECK-NEXT:    ret
178  %retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %vec, i64 16)
179  ret <16 x i8> %retval
180}
181
182; Should codegen to uzps, since idx is zero and type is illegal.
183define <16 x i8> @extract_v16i8_nxv4i8(<vscale x 4 x i8> %vec) nounwind #1 {
184; CHECK-LABEL: extract_v16i8_nxv4i8:
185; CHECK:       // %bb.0:
186; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
187; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
188; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
189; CHECK-NEXT:    ret
190  %retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv4i8(<vscale x 4 x i8> %vec, i64 0)
191  ret <16 x i8> %retval
192}
193
194; Goes through memory currently; idx != 0.
195define <16 x i8> @extract_v16i8_nxv4i8_idx16(<vscale x 4 x i8> %vec) nounwind #1 {
196; CHECK-LABEL: extract_v16i8_nxv4i8_idx16:
197; CHECK:       // %bb.0:
198; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #64
199; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
200; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
201; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
202; CHECK-NEXT:    ret
203  %retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv4i8(<vscale x 4 x i8> %vec, i64 16)
204  ret <16 x i8> %retval
205}
206
207; Should codegen to uzps, since idx is zero and type is illegal.
208define <16 x i8> @extract_v16i8_nxv2i8(<vscale x 2 x i8> %vec) nounwind #1 {
209; CHECK-LABEL: extract_v16i8_nxv2i8:
210; CHECK:       // %bb.0:
211; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
212; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
213; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
214; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
215; CHECK-NEXT:    ret
216  %retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv2i8(<vscale x 2 x i8> %vec, i64 0)
217  ret <16 x i8> %retval
218}
219
220; Goes through memory currently; idx != 0.
221define <16 x i8> @extract_v16i8_nxv2i8_idx16(<vscale x 2 x i8> %vec) nounwind #1 {
222; CHECK-LABEL: extract_v16i8_nxv2i8_idx16:
223; CHECK:       // %bb.0:
224; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #128
225; CHECK-NEXT:    uzp1 z0.s, z0.s, z0.s
226; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
227; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
228; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
229; CHECK-NEXT:    ret
230  %retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv2i8(<vscale x 2 x i8> %vec, i64 16)
231  ret <16 x i8> %retval
232}
233
234
235; Predicates
236
237define <2 x i1> @extract_v2i1_nxv2i1(<vscale x 2 x i1> %inmask) {
238; CHECK-LABEL: extract_v2i1_nxv2i1:
239; CHECK:       // %bb.0:
240; CHECK-NEXT:    mov z0.d, p0/z, #1 // =0x1
241; CHECK-NEXT:    mov v0.s[1], v0.s[2]
242; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
243; CHECK-NEXT:    ret
244  %mask = call <2 x i1> @llvm.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1> %inmask, i64 0)
245  ret <2 x i1> %mask
246}
247
248define <4 x i1> @extract_v4i1_nxv4i1(<vscale x 4 x i1> %inmask) {
249; CHECK-LABEL: extract_v4i1_nxv4i1:
250; CHECK:       // %bb.0:
251; CHECK-NEXT:    mov z1.s, p0/z, #1 // =0x1
252; CHECK-NEXT:    mov w8, v1.s[1]
253; CHECK-NEXT:    mov v0.16b, v1.16b
254; CHECK-NEXT:    mov w9, v1.s[2]
255; CHECK-NEXT:    mov v0.h[1], w8
256; CHECK-NEXT:    mov w8, v1.s[3]
257; CHECK-NEXT:    mov v0.h[2], w9
258; CHECK-NEXT:    mov v0.h[3], w8
259; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
260; CHECK-NEXT:    ret
261  %mask = call <4 x i1> @llvm.vector.extract.v4i1.nxv4i1(<vscale x 4 x i1> %inmask, i64 0)
262  ret <4 x i1> %mask
263}
264
265define <8 x i1> @extract_v8i1_nxv8i1(<vscale x 8 x i1> %inmask) {
266; CHECK-LABEL: extract_v8i1_nxv8i1:
267; CHECK:       // %bb.0:
268; CHECK-NEXT:    mov z1.h, p0/z, #1 // =0x1
269; CHECK-NEXT:    umov w8, v1.h[1]
270; CHECK-NEXT:    mov v0.16b, v1.16b
271; CHECK-NEXT:    umov w9, v1.h[2]
272; CHECK-NEXT:    mov v0.b[1], w8
273; CHECK-NEXT:    umov w8, v1.h[3]
274; CHECK-NEXT:    mov v0.b[2], w9
275; CHECK-NEXT:    umov w9, v1.h[4]
276; CHECK-NEXT:    mov v0.b[3], w8
277; CHECK-NEXT:    umov w8, v1.h[5]
278; CHECK-NEXT:    mov v0.b[4], w9
279; CHECK-NEXT:    umov w9, v1.h[6]
280; CHECK-NEXT:    mov v0.b[5], w8
281; CHECK-NEXT:    umov w8, v1.h[7]
282; CHECK-NEXT:    mov v0.b[6], w9
283; CHECK-NEXT:    mov v0.b[7], w8
284; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
285; CHECK-NEXT:    ret
286  %mask = call <8 x i1> @llvm.vector.extract.v8i1.nxv8i1(<vscale x 8 x i1> %inmask, i64 0)
287  ret <8 x i1> %mask
288}
289
290; TODO: Apply better reasoning when lowering extract_subvector from the bottom 128-bits
291; of an SVE type.
292define <16 x i1> @extract_v16i1_nxv16i1(<vscale x 16 x i1> %inmask) {
293; CHECK-LABEL: extract_v16i1_nxv16i1:
294; CHECK:       // %bb.0:
295; CHECK-NEXT:    mov z1.b, p0/z, #1 // =0x1
296; CHECK-NEXT:    mov v0.16b, v1.16b
297; CHECK-NEXT:    mov v0.b[1], v1.b[1]
298; CHECK-NEXT:    mov v0.b[2], v1.b[2]
299; CHECK-NEXT:    mov v0.b[3], v1.b[3]
300; CHECK-NEXT:    mov v0.b[4], v1.b[4]
301; CHECK-NEXT:    mov v0.b[5], v1.b[5]
302; CHECK-NEXT:    mov v0.b[6], v1.b[6]
303; CHECK-NEXT:    mov v0.b[7], v1.b[7]
304; CHECK-NEXT:    mov v0.b[8], v1.b[8]
305; CHECK-NEXT:    mov v0.b[9], v1.b[9]
306; CHECK-NEXT:    mov v0.b[10], v1.b[10]
307; CHECK-NEXT:    mov v0.b[11], v1.b[11]
308; CHECK-NEXT:    mov v0.b[12], v1.b[12]
309; CHECK-NEXT:    mov v0.b[13], v1.b[13]
310; CHECK-NEXT:    mov v0.b[14], v1.b[14]
311; CHECK-NEXT:    mov v0.b[15], v1.b[15]
312; CHECK-NEXT:    ret
313  %mask = call <16 x i1> @llvm.vector.extract.v16i1.nxv16i1(<vscale x 16 x i1> %inmask, i64 0)
314  ret <16 x i1> %mask
315}
316
317
318; Fixed length clamping
319
320define <2 x i64> @extract_fixed_v2i64_nxv2i64(<vscale x 2 x i64> %vec) nounwind #0 {
321; CHECK-LABEL: extract_fixed_v2i64_nxv2i64:
322; CHECK:       // %bb.0:
323; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #16
324; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
325; CHECK-NEXT:    ret
326  %retval = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> %vec, i64 2)
327  ret <2 x i64> %retval
328}
329
330define void @extract_fixed_v4i64_nxv2i64(<vscale x 2 x i64> %vec, ptr %p) nounwind #0 {
331; CHECK-LABEL: extract_fixed_v4i64_nxv2i64:
332; CHECK:       // %bb.0:
333; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #32
334; CHECK-NEXT:    ptrue p0.d
335; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
336; CHECK-NEXT:    ret
337  %retval = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> %vec, i64 4)
338  store <4 x i64> %retval, ptr %p
339  ret void
340}
341
342; Check that extract from load via bitcast-gep-of-scalar-ptr does not crash.
343define <4 x i32> @typesize_regression_test_v4i32(ptr %addr, i64 %idx) {
344; CHECK-LABEL: typesize_regression_test_v4i32:
345; CHECK:       // %bb.0: // %entry
346; CHECK-NEXT:    ptrue p0.s
347; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0, x1, lsl #2]
348; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
349; CHECK-NEXT:    ret
350entry:
351  %ptr = getelementptr inbounds i32, ptr %addr, i64 %idx
352  %bc = bitcast ptr %ptr to ptr
353  %ld = load volatile <vscale x 4 x i32>, ptr %bc, align 16
354  %out = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> %ld, i64 0)
355  ret <4 x i32> %out
356}
357
358;
359; Extract fixed-width vector from a scalable vector splat.
360;
361
362define <2 x float> @extract_v2f32_nxv4f32_splat(float %f) {
363; CHECK-LABEL: extract_v2f32_nxv4f32_splat:
364; CHECK:       // %bb.0:
365; CHECK-NEXT:    // kill: def $s0 killed $s0 def $q0
366; CHECK-NEXT:    dup v0.2s, v0.s[0]
367; CHECK-NEXT:    ret
368  %ins = insertelement <vscale x 4 x float> poison, float %f, i32 0
369  %splat = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
370  %ext = call <2 x float> @llvm.vector.extract.v2f32.nxv4f32(<vscale x 4 x float> %splat, i64 0)
371  ret <2 x float> %ext
372}
373
374define <2 x float> @extract_v2f32_nxv4f32_splat_const() {
375; CHECK-LABEL: extract_v2f32_nxv4f32_splat_const:
376; CHECK:       // %bb.0:
377; CHECK-NEXT:    fmov v0.2s, #1.00000000
378; CHECK-NEXT:    ret
379  %ext = call <2 x float> @llvm.vector.extract.v2f32.nxv4f32(<vscale x 4 x float> splat(float 1.0), i64 0)
380  ret <2 x float> %ext
381}
382
383define <4 x i32> @extract_v4i32_nxv8i32_splat_const() {
384; CHECK-LABEL: extract_v4i32_nxv8i32_splat_const:
385; CHECK:       // %bb.0:
386; CHECK-NEXT:    movi v0.4s, #1
387; CHECK-NEXT:    ret
388  %ext = call <4 x i32> @llvm.vector.extract.v4i32.nxv8i32(<vscale x 8 x i32> splat(i32 1), i64 0)
389  ret <4 x i32> %ext
390}
391
392attributes #0 = { vscale_range(2,2) }
393attributes #1 = { vscale_range(8,8) }
394
395declare <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64>, i64)
396
397declare <4 x i32> @llvm.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32>, i64)
398declare <4 x i32> @llvm.vector.extract.v4i32.nxv2i32(<vscale x 2 x i32>, i64)
399
400declare <8 x i16> @llvm.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16>, i64)
401declare <8 x i16> @llvm.vector.extract.v8i16.nxv4i16(<vscale x 4 x i16>, i64)
402declare <8 x i16> @llvm.vector.extract.v8i16.nxv2i16(<vscale x 2 x i16>, i64)
403
404declare <16 x i8> @llvm.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8>, i64)
405declare <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8>, i64)
406declare <16 x i8> @llvm.vector.extract.v16i8.nxv4i8(<vscale x 4 x i8>, i64)
407declare <16 x i8> @llvm.vector.extract.v16i8.nxv2i8(<vscale x 2 x i8>, i64)
408
409declare <2 x i1> @llvm.vector.extract.v2i1.nxv2i1(<vscale x 2 x i1>, i64)
410declare <4 x i1> @llvm.vector.extract.v4i1.nxv4i1(<vscale x 4 x i1>, i64)
411declare <8 x i1> @llvm.vector.extract.v8i1.nxv8i1(<vscale x 8 x i1>, i64)
412declare <16 x i1> @llvm.vector.extract.v16i1.nxv16i1(<vscale x 16 x i1>, i64)
413
414declare <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64>, i64)
415declare <2 x float> @llvm.vector.extract.v2f32.nxv4f32(<vscale x 4 x float>, i64)
416declare <4 x i32> @llvm.vector.extract.v4i32.nxv8i32(<vscale x 8 x i32>, i64)
417