xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vleff.ll (revision 84a3739ac072c95af9fa80e36d9e0f52d11e28eb)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.nxv1i64(
8  <vscale x 1 x i64>,
9  ptr,
10  iXLen);
11
12define <vscale x 1 x i64> @intrinsic_vleff_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, iXLen* %2) nounwind {
13; RV32-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64:
14; RV32:       # %bb.0: # %entry
15; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
16; RV32-NEXT:    vle64ff.v v8, (a0)
17; RV32-NEXT:    csrr a0, vl
18; RV32-NEXT:    sw a0, 0(a2)
19; RV32-NEXT:    ret
20;
21; RV64-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64:
22; RV64:       # %bb.0: # %entry
23; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
24; RV64-NEXT:    vle64ff.v v8, (a0)
25; RV64-NEXT:    csrr a0, vl
26; RV64-NEXT:    sd a0, 0(a2)
27; RV64-NEXT:    ret
28entry:
29  %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.nxv1i64(
30    <vscale x 1 x i64> undef,
31    ptr %0,
32    iXLen %1)
33  %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
34  %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
35  store iXLen %c, iXLen* %2
36  ret <vscale x 1 x i64> %b
37}
38
39declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
40  <vscale x 1 x i64>,
41  ptr,
42  <vscale x 1 x i1>,
43  iXLen,
44  iXLen);
45
46define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
47; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
48; RV32:       # %bb.0: # %entry
49; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
50; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
51; RV32-NEXT:    csrr a0, vl
52; RV32-NEXT:    sw a0, 0(a2)
53; RV32-NEXT:    ret
54;
55; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
56; RV64:       # %bb.0: # %entry
57; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
58; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
59; RV64-NEXT:    csrr a0, vl
60; RV64-NEXT:    sd a0, 0(a2)
61; RV64-NEXT:    ret
62entry:
63  %a = call { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
64    <vscale x 1 x i64> %0,
65    ptr %1,
66    <vscale x 1 x i1> %2,
67    iXLen %3, iXLen 1)
68  %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
69  %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
70  store iXLen %c, iXLen* %4
71
72  ret <vscale x 1 x i64> %b
73}
74
75declare { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.nxv2i64(
76  <vscale x 2 x i64>,
77  ptr,
78  iXLen);
79
80define <vscale x 2 x i64> @intrinsic_vleff_v_nxv2i64_nxv2i64(ptr %0, iXLen %1, iXLen* %2) nounwind {
81; RV32-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64:
82; RV32:       # %bb.0: # %entry
83; RV32-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
84; RV32-NEXT:    vle64ff.v v8, (a0)
85; RV32-NEXT:    csrr a0, vl
86; RV32-NEXT:    sw a0, 0(a2)
87; RV32-NEXT:    ret
88;
89; RV64-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64:
90; RV64:       # %bb.0: # %entry
91; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
92; RV64-NEXT:    vle64ff.v v8, (a0)
93; RV64-NEXT:    csrr a0, vl
94; RV64-NEXT:    sd a0, 0(a2)
95; RV64-NEXT:    ret
96entry:
97  %a = call { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.nxv2i64(
98    <vscale x 2 x i64> undef,
99    ptr %0,
100    iXLen %1)
101  %b = extractvalue { <vscale x 2 x i64>, iXLen } %a, 0
102  %c = extractvalue { <vscale x 2 x i64>, iXLen } %a, 1
103  store iXLen %c, iXLen* %2
104  ret <vscale x 2 x i64> %b
105}
106
107declare { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv2i64(
108  <vscale x 2 x i64>,
109  ptr,
110  <vscale x 2 x i1>,
111  iXLen,
112  iXLen);
113
114define <vscale x 2 x i64> @intrinsic_vleff_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
115; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64:
116; RV32:       # %bb.0: # %entry
117; RV32-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
118; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
119; RV32-NEXT:    csrr a0, vl
120; RV32-NEXT:    sw a0, 0(a2)
121; RV32-NEXT:    ret
122;
123; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64:
124; RV64:       # %bb.0: # %entry
125; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
126; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
127; RV64-NEXT:    csrr a0, vl
128; RV64-NEXT:    sd a0, 0(a2)
129; RV64-NEXT:    ret
130entry:
131  %a = call { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv2i64(
132    <vscale x 2 x i64> %0,
133    ptr %1,
134    <vscale x 2 x i1> %2,
135    iXLen %3, iXLen 1)
136  %b = extractvalue { <vscale x 2 x i64>, iXLen } %a, 0
137  %c = extractvalue { <vscale x 2 x i64>, iXLen } %a, 1
138  store iXLen %c, iXLen* %4
139
140  ret <vscale x 2 x i64> %b
141}
142
143declare { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.nxv4i64(
144  <vscale x 4 x i64>,
145  ptr,
146  iXLen);
147
148define <vscale x 4 x i64> @intrinsic_vleff_v_nxv4i64_nxv4i64(ptr %0, iXLen %1, iXLen* %2) nounwind {
149; RV32-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64:
150; RV32:       # %bb.0: # %entry
151; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
152; RV32-NEXT:    vle64ff.v v8, (a0)
153; RV32-NEXT:    csrr a0, vl
154; RV32-NEXT:    sw a0, 0(a2)
155; RV32-NEXT:    ret
156;
157; RV64-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64:
158; RV64:       # %bb.0: # %entry
159; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
160; RV64-NEXT:    vle64ff.v v8, (a0)
161; RV64-NEXT:    csrr a0, vl
162; RV64-NEXT:    sd a0, 0(a2)
163; RV64-NEXT:    ret
164entry:
165  %a = call { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.nxv4i64(
166    <vscale x 4 x i64> undef,
167    ptr %0,
168    iXLen %1)
169  %b = extractvalue { <vscale x 4 x i64>, iXLen } %a, 0
170  %c = extractvalue { <vscale x 4 x i64>, iXLen } %a, 1
171  store iXLen %c, iXLen* %2
172  ret <vscale x 4 x i64> %b
173}
174
175declare { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv4i64(
176  <vscale x 4 x i64>,
177  ptr,
178  <vscale x 4 x i1>,
179  iXLen,
180  iXLen);
181
182define <vscale x 4 x i64> @intrinsic_vleff_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
183; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64:
184; RV32:       # %bb.0: # %entry
185; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
186; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
187; RV32-NEXT:    csrr a0, vl
188; RV32-NEXT:    sw a0, 0(a2)
189; RV32-NEXT:    ret
190;
191; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64:
192; RV64:       # %bb.0: # %entry
193; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
194; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
195; RV64-NEXT:    csrr a0, vl
196; RV64-NEXT:    sd a0, 0(a2)
197; RV64-NEXT:    ret
198entry:
199  %a = call { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv4i64(
200    <vscale x 4 x i64> %0,
201    ptr %1,
202    <vscale x 4 x i1> %2,
203    iXLen %3, iXLen 1)
204  %b = extractvalue { <vscale x 4 x i64>, iXLen } %a, 0
205  %c = extractvalue { <vscale x 4 x i64>, iXLen } %a, 1
206  store iXLen %c, iXLen* %4
207
208  ret <vscale x 4 x i64> %b
209}
210
211declare { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.nxv8i64(
212  <vscale x 8 x i64>,
213  ptr,
214  iXLen);
215
216define <vscale x 8 x i64> @intrinsic_vleff_v_nxv8i64_nxv8i64(ptr %0, iXLen %1, iXLen* %2) nounwind {
217; RV32-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64:
218; RV32:       # %bb.0: # %entry
219; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
220; RV32-NEXT:    vle64ff.v v8, (a0)
221; RV32-NEXT:    csrr a0, vl
222; RV32-NEXT:    sw a0, 0(a2)
223; RV32-NEXT:    ret
224;
225; RV64-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64:
226; RV64:       # %bb.0: # %entry
227; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
228; RV64-NEXT:    vle64ff.v v8, (a0)
229; RV64-NEXT:    csrr a0, vl
230; RV64-NEXT:    sd a0, 0(a2)
231; RV64-NEXT:    ret
232entry:
233  %a = call { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.nxv8i64(
234    <vscale x 8 x i64> undef,
235    ptr %0,
236    iXLen %1)
237  %b = extractvalue { <vscale x 8 x i64>, iXLen } %a, 0
238  %c = extractvalue { <vscale x 8 x i64>, iXLen } %a, 1
239  store iXLen %c, iXLen* %2
240  ret <vscale x 8 x i64> %b
241}
242
243declare { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv8i64(
244  <vscale x 8 x i64>,
245  ptr,
246  <vscale x 8 x i1>,
247  iXLen,
248  iXLen);
249
250define <vscale x 8 x i64> @intrinsic_vleff_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
251; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64:
252; RV32:       # %bb.0: # %entry
253; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
254; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
255; RV32-NEXT:    csrr a0, vl
256; RV32-NEXT:    sw a0, 0(a2)
257; RV32-NEXT:    ret
258;
259; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64:
260; RV64:       # %bb.0: # %entry
261; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
262; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
263; RV64-NEXT:    csrr a0, vl
264; RV64-NEXT:    sd a0, 0(a2)
265; RV64-NEXT:    ret
266entry:
267  %a = call { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv8i64(
268    <vscale x 8 x i64> %0,
269    ptr %1,
270    <vscale x 8 x i1> %2,
271    iXLen %3, iXLen 1)
272  %b = extractvalue { <vscale x 8 x i64>, iXLen } %a, 0
273  %c = extractvalue { <vscale x 8 x i64>, iXLen } %a, 1
274  store iXLen %c, iXLen* %4
275
276  ret <vscale x 8 x i64> %b
277}
278
279declare { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
280  <vscale x 1 x double>,
281  ptr,
282  iXLen);
283
284define <vscale x 1 x double> @intrinsic_vleff_v_nxv1f64_nxv1f64(ptr %0, iXLen %1, iXLen* %2) nounwind {
285; RV32-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64:
286; RV32:       # %bb.0: # %entry
287; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
288; RV32-NEXT:    vle64ff.v v8, (a0)
289; RV32-NEXT:    csrr a0, vl
290; RV32-NEXT:    sw a0, 0(a2)
291; RV32-NEXT:    ret
292;
293; RV64-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64:
294; RV64:       # %bb.0: # %entry
295; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
296; RV64-NEXT:    vle64ff.v v8, (a0)
297; RV64-NEXT:    csrr a0, vl
298; RV64-NEXT:    sd a0, 0(a2)
299; RV64-NEXT:    ret
300entry:
301  %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
302    <vscale x 1 x double> undef,
303    ptr %0,
304    iXLen %1)
305  %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
306  %c = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
307  store iXLen %c, iXLen* %2
308  ret <vscale x 1 x double> %b
309}
310
311declare { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
312  <vscale x 1 x double>,
313  ptr,
314  <vscale x 1 x i1>,
315  iXLen,
316  iXLen);
317
318define <vscale x 1 x double> @intrinsic_vleff_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
319; RV32-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64:
320; RV32:       # %bb.0: # %entry
321; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
322; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
323; RV32-NEXT:    csrr a0, vl
324; RV32-NEXT:    sw a0, 0(a2)
325; RV32-NEXT:    ret
326;
327; RV64-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64:
328; RV64:       # %bb.0: # %entry
329; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
330; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
331; RV64-NEXT:    csrr a0, vl
332; RV64-NEXT:    sd a0, 0(a2)
333; RV64-NEXT:    ret
334entry:
335  %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
336    <vscale x 1 x double> %0,
337    ptr %1,
338    <vscale x 1 x i1> %2,
339    iXLen %3, iXLen 1)
340  %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
341  %c = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
342  store iXLen %c, iXLen* %4
343
344  ret <vscale x 1 x double> %b
345}
346
347declare { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.nxv2f64(
348  <vscale x 2 x double>,
349  ptr,
350  iXLen);
351
352define <vscale x 2 x double> @intrinsic_vleff_v_nxv2f64_nxv2f64(ptr %0, iXLen %1, iXLen* %2) nounwind {
353; RV32-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64:
354; RV32:       # %bb.0: # %entry
355; RV32-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
356; RV32-NEXT:    vle64ff.v v8, (a0)
357; RV32-NEXT:    csrr a0, vl
358; RV32-NEXT:    sw a0, 0(a2)
359; RV32-NEXT:    ret
360;
361; RV64-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64:
362; RV64:       # %bb.0: # %entry
363; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
364; RV64-NEXT:    vle64ff.v v8, (a0)
365; RV64-NEXT:    csrr a0, vl
366; RV64-NEXT:    sd a0, 0(a2)
367; RV64-NEXT:    ret
368entry:
369  %a = call { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.nxv2f64(
370    <vscale x 2 x double> undef,
371    ptr %0,
372    iXLen %1)
373  %b = extractvalue { <vscale x 2 x double>, iXLen } %a, 0
374  %c = extractvalue { <vscale x 2 x double>, iXLen } %a, 1
375  store iXLen %c, iXLen* %2
376  ret <vscale x 2 x double> %b
377}
378
379declare { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.mask.nxv2f64(
380  <vscale x 2 x double>,
381  ptr,
382  <vscale x 2 x i1>,
383  iXLen,
384  iXLen);
385
386define <vscale x 2 x double> @intrinsic_vleff_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
387; RV32-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64:
388; RV32:       # %bb.0: # %entry
389; RV32-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
390; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
391; RV32-NEXT:    csrr a0, vl
392; RV32-NEXT:    sw a0, 0(a2)
393; RV32-NEXT:    ret
394;
395; RV64-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64:
396; RV64:       # %bb.0: # %entry
397; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
398; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
399; RV64-NEXT:    csrr a0, vl
400; RV64-NEXT:    sd a0, 0(a2)
401; RV64-NEXT:    ret
402entry:
403  %a = call { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.mask.nxv2f64(
404    <vscale x 2 x double> %0,
405    ptr %1,
406    <vscale x 2 x i1> %2,
407    iXLen %3, iXLen 1)
408  %b = extractvalue { <vscale x 2 x double>, iXLen } %a, 0
409  %c = extractvalue { <vscale x 2 x double>, iXLen } %a, 1
410  store iXLen %c, iXLen* %4
411
412  ret <vscale x 2 x double> %b
413}
414
415declare { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.nxv4f64(
416  <vscale x 4 x double>,
417  ptr,
418  iXLen);
419
420define <vscale x 4 x double> @intrinsic_vleff_v_nxv4f64_nxv4f64(ptr %0, iXLen %1, iXLen* %2) nounwind {
421; RV32-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64:
422; RV32:       # %bb.0: # %entry
423; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
424; RV32-NEXT:    vle64ff.v v8, (a0)
425; RV32-NEXT:    csrr a0, vl
426; RV32-NEXT:    sw a0, 0(a2)
427; RV32-NEXT:    ret
428;
429; RV64-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64:
430; RV64:       # %bb.0: # %entry
431; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
432; RV64-NEXT:    vle64ff.v v8, (a0)
433; RV64-NEXT:    csrr a0, vl
434; RV64-NEXT:    sd a0, 0(a2)
435; RV64-NEXT:    ret
436entry:
437  %a = call { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.nxv4f64(
438    <vscale x 4 x double> undef,
439    ptr %0,
440    iXLen %1)
441  %b = extractvalue { <vscale x 4 x double>, iXLen } %a, 0
442  %c = extractvalue { <vscale x 4 x double>, iXLen } %a, 1
443  store iXLen %c, iXLen* %2
444  ret <vscale x 4 x double> %b
445}
446
447declare { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.mask.nxv4f64(
448  <vscale x 4 x double>,
449  ptr,
450  <vscale x 4 x i1>,
451  iXLen,
452  iXLen);
453
454define <vscale x 4 x double> @intrinsic_vleff_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
455; RV32-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64:
456; RV32:       # %bb.0: # %entry
457; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
458; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
459; RV32-NEXT:    csrr a0, vl
460; RV32-NEXT:    sw a0, 0(a2)
461; RV32-NEXT:    ret
462;
463; RV64-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64:
464; RV64:       # %bb.0: # %entry
465; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
466; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
467; RV64-NEXT:    csrr a0, vl
468; RV64-NEXT:    sd a0, 0(a2)
469; RV64-NEXT:    ret
470entry:
471  %a = call { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.mask.nxv4f64(
472    <vscale x 4 x double> %0,
473    ptr %1,
474    <vscale x 4 x i1> %2,
475    iXLen %3, iXLen 1)
476  %b = extractvalue { <vscale x 4 x double>, iXLen } %a, 0
477  %c = extractvalue { <vscale x 4 x double>, iXLen } %a, 1
478  store iXLen %c, iXLen* %4
479
480  ret <vscale x 4 x double> %b
481}
482
483declare { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.nxv8f64(
484  <vscale x 8 x double>,
485  ptr,
486  iXLen);
487
488define <vscale x 8 x double> @intrinsic_vleff_v_nxv8f64_nxv8f64(ptr %0, iXLen %1, iXLen* %2) nounwind {
489; RV32-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64:
490; RV32:       # %bb.0: # %entry
491; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
492; RV32-NEXT:    vle64ff.v v8, (a0)
493; RV32-NEXT:    csrr a0, vl
494; RV32-NEXT:    sw a0, 0(a2)
495; RV32-NEXT:    ret
496;
497; RV64-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64:
498; RV64:       # %bb.0: # %entry
499; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
500; RV64-NEXT:    vle64ff.v v8, (a0)
501; RV64-NEXT:    csrr a0, vl
502; RV64-NEXT:    sd a0, 0(a2)
503; RV64-NEXT:    ret
504entry:
505  %a = call { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.nxv8f64(
506    <vscale x 8 x double> undef,
507    ptr %0,
508    iXLen %1)
509  %b = extractvalue { <vscale x 8 x double>, iXLen } %a, 0
510  %c = extractvalue { <vscale x 8 x double>, iXLen } %a, 1
511  store iXLen %c, iXLen* %2
512  ret <vscale x 8 x double> %b
513}
514
515declare { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.mask.nxv8f64(
516  <vscale x 8 x double>,
517  ptr,
518  <vscale x 8 x i1>,
519  iXLen,
520  iXLen);
521
522define <vscale x 8 x double> @intrinsic_vleff_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
523; RV32-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64:
524; RV32:       # %bb.0: # %entry
525; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
526; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
527; RV32-NEXT:    csrr a0, vl
528; RV32-NEXT:    sw a0, 0(a2)
529; RV32-NEXT:    ret
530;
531; RV64-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64:
532; RV64:       # %bb.0: # %entry
533; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
534; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
535; RV64-NEXT:    csrr a0, vl
536; RV64-NEXT:    sd a0, 0(a2)
537; RV64-NEXT:    ret
538entry:
539  %a = call { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.mask.nxv8f64(
540    <vscale x 8 x double> %0,
541    ptr %1,
542    <vscale x 8 x i1> %2,
543    iXLen %3, iXLen 1)
544  %b = extractvalue { <vscale x 8 x double>, iXLen } %a, 0
545  %c = extractvalue { <vscale x 8 x double>, iXLen } %a, 1
546  store iXLen %c, iXLen* %4
547
548  ret <vscale x 8 x double> %b
549}
550
551declare { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.nxv1i32(
552  <vscale x 1 x i32>,
553  ptr,
554  iXLen);
555
556define <vscale x 1 x i32> @intrinsic_vleff_v_nxv1i32_nxv1i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
557; RV32-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32:
558; RV32:       # %bb.0: # %entry
559; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
560; RV32-NEXT:    vle32ff.v v8, (a0)
561; RV32-NEXT:    csrr a0, vl
562; RV32-NEXT:    sw a0, 0(a2)
563; RV32-NEXT:    ret
564;
565; RV64-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32:
566; RV64:       # %bb.0: # %entry
567; RV64-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
568; RV64-NEXT:    vle32ff.v v8, (a0)
569; RV64-NEXT:    csrr a0, vl
570; RV64-NEXT:    sd a0, 0(a2)
571; RV64-NEXT:    ret
572entry:
573  %a = call { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.nxv1i32(
574    <vscale x 1 x i32> undef,
575    ptr %0,
576    iXLen %1)
577  %b = extractvalue { <vscale x 1 x i32>, iXLen } %a, 0
578  %c = extractvalue { <vscale x 1 x i32>, iXLen } %a, 1
579  store iXLen %c, iXLen* %2
580  ret <vscale x 1 x i32> %b
581}
582
583declare { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv1i32(
584  <vscale x 1 x i32>,
585  ptr,
586  <vscale x 1 x i1>,
587  iXLen,
588  iXLen);
589
590define <vscale x 1 x i32> @intrinsic_vleff_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
591; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32:
592; RV32:       # %bb.0: # %entry
593; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
594; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
595; RV32-NEXT:    csrr a0, vl
596; RV32-NEXT:    sw a0, 0(a2)
597; RV32-NEXT:    ret
598;
599; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32:
600; RV64:       # %bb.0: # %entry
601; RV64-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
602; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
603; RV64-NEXT:    csrr a0, vl
604; RV64-NEXT:    sd a0, 0(a2)
605; RV64-NEXT:    ret
606entry:
607  %a = call { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv1i32(
608    <vscale x 1 x i32> %0,
609    ptr %1,
610    <vscale x 1 x i1> %2,
611    iXLen %3, iXLen 1)
612  %b = extractvalue { <vscale x 1 x i32>, iXLen } %a, 0
613  %c = extractvalue { <vscale x 1 x i32>, iXLen } %a, 1
614  store iXLen %c, iXLen* %4
615
616  ret <vscale x 1 x i32> %b
617}
618
619declare { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.nxv2i32(
620  <vscale x 2 x i32>,
621  ptr,
622  iXLen);
623
624define <vscale x 2 x i32> @intrinsic_vleff_v_nxv2i32_nxv2i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
625; RV32-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32:
626; RV32:       # %bb.0: # %entry
627; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
628; RV32-NEXT:    vle32ff.v v8, (a0)
629; RV32-NEXT:    csrr a0, vl
630; RV32-NEXT:    sw a0, 0(a2)
631; RV32-NEXT:    ret
632;
633; RV64-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32:
634; RV64:       # %bb.0: # %entry
635; RV64-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
636; RV64-NEXT:    vle32ff.v v8, (a0)
637; RV64-NEXT:    csrr a0, vl
638; RV64-NEXT:    sd a0, 0(a2)
639; RV64-NEXT:    ret
640entry:
641  %a = call { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.nxv2i32(
642    <vscale x 2 x i32> undef,
643    ptr %0,
644    iXLen %1)
645  %b = extractvalue { <vscale x 2 x i32>, iXLen } %a, 0
646  %c = extractvalue { <vscale x 2 x i32>, iXLen } %a, 1
647  store iXLen %c, iXLen* %2
648  ret <vscale x 2 x i32> %b
649}
650
651declare { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv2i32(
652  <vscale x 2 x i32>,
653  ptr,
654  <vscale x 2 x i1>,
655  iXLen,
656  iXLen);
657
658define <vscale x 2 x i32> @intrinsic_vleff_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
659; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32:
660; RV32:       # %bb.0: # %entry
661; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
662; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
663; RV32-NEXT:    csrr a0, vl
664; RV32-NEXT:    sw a0, 0(a2)
665; RV32-NEXT:    ret
666;
667; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32:
668; RV64:       # %bb.0: # %entry
669; RV64-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
670; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
671; RV64-NEXT:    csrr a0, vl
672; RV64-NEXT:    sd a0, 0(a2)
673; RV64-NEXT:    ret
674entry:
675  %a = call { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv2i32(
676    <vscale x 2 x i32> %0,
677    ptr %1,
678    <vscale x 2 x i1> %2,
679    iXLen %3, iXLen 1)
680  %b = extractvalue { <vscale x 2 x i32>, iXLen } %a, 0
681  %c = extractvalue { <vscale x 2 x i32>, iXLen } %a, 1
682  store iXLen %c, iXLen* %4
683
684  ret <vscale x 2 x i32> %b
685}
686
687declare { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.nxv4i32(
688  <vscale x 4 x i32>,
689  ptr,
690  iXLen);
691
692define <vscale x 4 x i32> @intrinsic_vleff_v_nxv4i32_nxv4i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
693; RV32-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32:
694; RV32:       # %bb.0: # %entry
695; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
696; RV32-NEXT:    vle32ff.v v8, (a0)
697; RV32-NEXT:    csrr a0, vl
698; RV32-NEXT:    sw a0, 0(a2)
699; RV32-NEXT:    ret
700;
701; RV64-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32:
702; RV64:       # %bb.0: # %entry
703; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
704; RV64-NEXT:    vle32ff.v v8, (a0)
705; RV64-NEXT:    csrr a0, vl
706; RV64-NEXT:    sd a0, 0(a2)
707; RV64-NEXT:    ret
708entry:
709  %a = call { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.nxv4i32(
710    <vscale x 4 x i32> undef,
711    ptr %0,
712    iXLen %1)
713  %b = extractvalue { <vscale x 4 x i32>, iXLen } %a, 0
714  %c = extractvalue { <vscale x 4 x i32>, iXLen } %a, 1
715  store iXLen %c, iXLen* %2
716  ret <vscale x 4 x i32> %b
717}
718
719declare { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv4i32(
720  <vscale x 4 x i32>,
721  ptr,
722  <vscale x 4 x i1>,
723  iXLen,
724  iXLen);
725
726define <vscale x 4 x i32> @intrinsic_vleff_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
727; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32:
728; RV32:       # %bb.0: # %entry
729; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
730; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
731; RV32-NEXT:    csrr a0, vl
732; RV32-NEXT:    sw a0, 0(a2)
733; RV32-NEXT:    ret
734;
735; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32:
736; RV64:       # %bb.0: # %entry
737; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
738; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
739; RV64-NEXT:    csrr a0, vl
740; RV64-NEXT:    sd a0, 0(a2)
741; RV64-NEXT:    ret
742entry:
743  %a = call { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv4i32(
744    <vscale x 4 x i32> %0,
745    ptr %1,
746    <vscale x 4 x i1> %2,
747    iXLen %3, iXLen 1)
748  %b = extractvalue { <vscale x 4 x i32>, iXLen } %a, 0
749  %c = extractvalue { <vscale x 4 x i32>, iXLen } %a, 1
750  store iXLen %c, iXLen* %4
751
752  ret <vscale x 4 x i32> %b
753}
754
755declare { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.nxv8i32(
756  <vscale x 8 x i32>,
757  ptr,
758  iXLen);
759
760define <vscale x 8 x i32> @intrinsic_vleff_v_nxv8i32_nxv8i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
761; RV32-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32:
762; RV32:       # %bb.0: # %entry
763; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
764; RV32-NEXT:    vle32ff.v v8, (a0)
765; RV32-NEXT:    csrr a0, vl
766; RV32-NEXT:    sw a0, 0(a2)
767; RV32-NEXT:    ret
768;
769; RV64-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32:
770; RV64:       # %bb.0: # %entry
771; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
772; RV64-NEXT:    vle32ff.v v8, (a0)
773; RV64-NEXT:    csrr a0, vl
774; RV64-NEXT:    sd a0, 0(a2)
775; RV64-NEXT:    ret
776entry:
777  %a = call { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.nxv8i32(
778    <vscale x 8 x i32> undef,
779    ptr %0,
780    iXLen %1)
781  %b = extractvalue { <vscale x 8 x i32>, iXLen } %a, 0
782  %c = extractvalue { <vscale x 8 x i32>, iXLen } %a, 1
783  store iXLen %c, iXLen* %2
784  ret <vscale x 8 x i32> %b
785}
786
787declare { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv8i32(
788  <vscale x 8 x i32>,
789  ptr,
790  <vscale x 8 x i1>,
791  iXLen,
792  iXLen);
793
794define <vscale x 8 x i32> @intrinsic_vleff_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
795; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32:
796; RV32:       # %bb.0: # %entry
797; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
798; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
799; RV32-NEXT:    csrr a0, vl
800; RV32-NEXT:    sw a0, 0(a2)
801; RV32-NEXT:    ret
802;
803; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32:
804; RV64:       # %bb.0: # %entry
805; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
806; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
807; RV64-NEXT:    csrr a0, vl
808; RV64-NEXT:    sd a0, 0(a2)
809; RV64-NEXT:    ret
810entry:
811  %a = call { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv8i32(
812    <vscale x 8 x i32> %0,
813    ptr %1,
814    <vscale x 8 x i1> %2,
815    iXLen %3, iXLen 1)
816  %b = extractvalue { <vscale x 8 x i32>, iXLen } %a, 0
817  %c = extractvalue { <vscale x 8 x i32>, iXLen } %a, 1
818  store iXLen %c, iXLen* %4
819
820  ret <vscale x 8 x i32> %b
821}
822
823declare { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.nxv16i32(
824  <vscale x 16 x i32>,
825  ptr,
826  iXLen);
827
828define <vscale x 16 x i32> @intrinsic_vleff_v_nxv16i32_nxv16i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
829; RV32-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32:
830; RV32:       # %bb.0: # %entry
831; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
832; RV32-NEXT:    vle32ff.v v8, (a0)
833; RV32-NEXT:    csrr a0, vl
834; RV32-NEXT:    sw a0, 0(a2)
835; RV32-NEXT:    ret
836;
837; RV64-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32:
838; RV64:       # %bb.0: # %entry
839; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
840; RV64-NEXT:    vle32ff.v v8, (a0)
841; RV64-NEXT:    csrr a0, vl
842; RV64-NEXT:    sd a0, 0(a2)
843; RV64-NEXT:    ret
844entry:
845  %a = call { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.nxv16i32(
846    <vscale x 16 x i32> undef,
847    ptr %0,
848    iXLen %1)
849  %b = extractvalue { <vscale x 16 x i32>, iXLen } %a, 0
850  %c = extractvalue { <vscale x 16 x i32>, iXLen } %a, 1
851  store iXLen %c, iXLen* %2
852  ret <vscale x 16 x i32> %b
853}
854
855declare { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv16i32(
856  <vscale x 16 x i32>,
857  ptr,
858  <vscale x 16 x i1>,
859  iXLen,
860  iXLen);
861
862define <vscale x 16 x i32> @intrinsic_vleff_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
863; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32:
864; RV32:       # %bb.0: # %entry
865; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
866; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
867; RV32-NEXT:    csrr a0, vl
868; RV32-NEXT:    sw a0, 0(a2)
869; RV32-NEXT:    ret
870;
871; RV64-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32:
872; RV64:       # %bb.0: # %entry
873; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
874; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
875; RV64-NEXT:    csrr a0, vl
876; RV64-NEXT:    sd a0, 0(a2)
877; RV64-NEXT:    ret
878entry:
879  %a = call { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv16i32(
880    <vscale x 16 x i32> %0,
881    ptr %1,
882    <vscale x 16 x i1> %2,
883    iXLen %3, iXLen 1)
884  %b = extractvalue { <vscale x 16 x i32>, iXLen } %a, 0
885  %c = extractvalue { <vscale x 16 x i32>, iXLen } %a, 1
886  store iXLen %c, iXLen* %4
887
888  ret <vscale x 16 x i32> %b
889}
890
891declare { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.nxv1f32(
892  <vscale x 1 x float>,
893  ptr,
894  iXLen);
895
896define <vscale x 1 x float> @intrinsic_vleff_v_nxv1f32_nxv1f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
897; RV32-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32:
898; RV32:       # %bb.0: # %entry
899; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
900; RV32-NEXT:    vle32ff.v v8, (a0)
901; RV32-NEXT:    csrr a0, vl
902; RV32-NEXT:    sw a0, 0(a2)
903; RV32-NEXT:    ret
904;
905; RV64-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32:
906; RV64:       # %bb.0: # %entry
907; RV64-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
908; RV64-NEXT:    vle32ff.v v8, (a0)
909; RV64-NEXT:    csrr a0, vl
910; RV64-NEXT:    sd a0, 0(a2)
911; RV64-NEXT:    ret
912entry:
913  %a = call { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.nxv1f32(
914    <vscale x 1 x float> undef,
915    ptr %0,
916    iXLen %1)
917  %b = extractvalue { <vscale x 1 x float>, iXLen } %a, 0
918  %c = extractvalue { <vscale x 1 x float>, iXLen } %a, 1
919  store iXLen %c, iXLen* %2
920  ret <vscale x 1 x float> %b
921}
922
923declare { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.mask.nxv1f32(
924  <vscale x 1 x float>,
925  ptr,
926  <vscale x 1 x i1>,
927  iXLen,
928  iXLen);
929
930define <vscale x 1 x float> @intrinsic_vleff_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
931; RV32-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32:
932; RV32:       # %bb.0: # %entry
933; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
934; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
935; RV32-NEXT:    csrr a0, vl
936; RV32-NEXT:    sw a0, 0(a2)
937; RV32-NEXT:    ret
938;
939; RV64-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32:
940; RV64:       # %bb.0: # %entry
941; RV64-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
942; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
943; RV64-NEXT:    csrr a0, vl
944; RV64-NEXT:    sd a0, 0(a2)
945; RV64-NEXT:    ret
946entry:
947  %a = call { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.mask.nxv1f32(
948    <vscale x 1 x float> %0,
949    ptr %1,
950    <vscale x 1 x i1> %2,
951    iXLen %3, iXLen 1)
952  %b = extractvalue { <vscale x 1 x float>, iXLen } %a, 0
953  %c = extractvalue { <vscale x 1 x float>, iXLen } %a, 1
954  store iXLen %c, iXLen* %4
955
956  ret <vscale x 1 x float> %b
957}
958
959declare { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.nxv2f32(
960  <vscale x 2 x float>,
961  ptr,
962  iXLen);
963
964define <vscale x 2 x float> @intrinsic_vleff_v_nxv2f32_nxv2f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
965; RV32-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32:
966; RV32:       # %bb.0: # %entry
967; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
968; RV32-NEXT:    vle32ff.v v8, (a0)
969; RV32-NEXT:    csrr a0, vl
970; RV32-NEXT:    sw a0, 0(a2)
971; RV32-NEXT:    ret
972;
973; RV64-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32:
974; RV64:       # %bb.0: # %entry
975; RV64-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
976; RV64-NEXT:    vle32ff.v v8, (a0)
977; RV64-NEXT:    csrr a0, vl
978; RV64-NEXT:    sd a0, 0(a2)
979; RV64-NEXT:    ret
980entry:
981  %a = call { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.nxv2f32(
982    <vscale x 2 x float> undef,
983    ptr %0,
984    iXLen %1)
985  %b = extractvalue { <vscale x 2 x float>, iXLen } %a, 0
986  %c = extractvalue { <vscale x 2 x float>, iXLen } %a, 1
987  store iXLen %c, iXLen* %2
988  ret <vscale x 2 x float> %b
989}
990
991declare { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.mask.nxv2f32(
992  <vscale x 2 x float>,
993  ptr,
994  <vscale x 2 x i1>,
995  iXLen,
996  iXLen);
997
998define <vscale x 2 x float> @intrinsic_vleff_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
999; RV32-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32:
1000; RV32:       # %bb.0: # %entry
1001; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1002; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
1003; RV32-NEXT:    csrr a0, vl
1004; RV32-NEXT:    sw a0, 0(a2)
1005; RV32-NEXT:    ret
1006;
1007; RV64-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32:
1008; RV64:       # %bb.0: # %entry
1009; RV64-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1010; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
1011; RV64-NEXT:    csrr a0, vl
1012; RV64-NEXT:    sd a0, 0(a2)
1013; RV64-NEXT:    ret
1014entry:
1015  %a = call { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.mask.nxv2f32(
1016    <vscale x 2 x float> %0,
1017    ptr %1,
1018    <vscale x 2 x i1> %2,
1019    iXLen %3, iXLen 1)
1020  %b = extractvalue { <vscale x 2 x float>, iXLen } %a, 0
1021  %c = extractvalue { <vscale x 2 x float>, iXLen } %a, 1
1022  store iXLen %c, iXLen* %4
1023
1024  ret <vscale x 2 x float> %b
1025}
1026
1027declare { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.nxv4f32(
1028  <vscale x 4 x float>,
1029  ptr,
1030  iXLen);
1031
1032define <vscale x 4 x float> @intrinsic_vleff_v_nxv4f32_nxv4f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
1033; RV32-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32:
1034; RV32:       # %bb.0: # %entry
1035; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1036; RV32-NEXT:    vle32ff.v v8, (a0)
1037; RV32-NEXT:    csrr a0, vl
1038; RV32-NEXT:    sw a0, 0(a2)
1039; RV32-NEXT:    ret
1040;
1041; RV64-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32:
1042; RV64:       # %bb.0: # %entry
1043; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1044; RV64-NEXT:    vle32ff.v v8, (a0)
1045; RV64-NEXT:    csrr a0, vl
1046; RV64-NEXT:    sd a0, 0(a2)
1047; RV64-NEXT:    ret
1048entry:
1049  %a = call { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.nxv4f32(
1050    <vscale x 4 x float> undef,
1051    ptr %0,
1052    iXLen %1)
1053  %b = extractvalue { <vscale x 4 x float>, iXLen } %a, 0
1054  %c = extractvalue { <vscale x 4 x float>, iXLen } %a, 1
1055  store iXLen %c, iXLen* %2
1056  ret <vscale x 4 x float> %b
1057}
1058
1059declare { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.mask.nxv4f32(
1060  <vscale x 4 x float>,
1061  ptr,
1062  <vscale x 4 x i1>,
1063  iXLen,
1064  iXLen);
1065
1066define <vscale x 4 x float> @intrinsic_vleff_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1067; RV32-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32:
1068; RV32:       # %bb.0: # %entry
1069; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1070; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
1071; RV32-NEXT:    csrr a0, vl
1072; RV32-NEXT:    sw a0, 0(a2)
1073; RV32-NEXT:    ret
1074;
1075; RV64-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32:
1076; RV64:       # %bb.0: # %entry
1077; RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1078; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
1079; RV64-NEXT:    csrr a0, vl
1080; RV64-NEXT:    sd a0, 0(a2)
1081; RV64-NEXT:    ret
1082entry:
1083  %a = call { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.mask.nxv4f32(
1084    <vscale x 4 x float> %0,
1085    ptr %1,
1086    <vscale x 4 x i1> %2,
1087    iXLen %3, iXLen 1)
1088  %b = extractvalue { <vscale x 4 x float>, iXLen } %a, 0
1089  %c = extractvalue { <vscale x 4 x float>, iXLen } %a, 1
1090  store iXLen %c, iXLen* %4
1091
1092  ret <vscale x 4 x float> %b
1093}
1094
1095declare { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.nxv8f32(
1096  <vscale x 8 x float>,
1097  ptr,
1098  iXLen);
1099
1100define <vscale x 8 x float> @intrinsic_vleff_v_nxv8f32_nxv8f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
1101; RV32-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32:
1102; RV32:       # %bb.0: # %entry
1103; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1104; RV32-NEXT:    vle32ff.v v8, (a0)
1105; RV32-NEXT:    csrr a0, vl
1106; RV32-NEXT:    sw a0, 0(a2)
1107; RV32-NEXT:    ret
1108;
1109; RV64-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32:
1110; RV64:       # %bb.0: # %entry
1111; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1112; RV64-NEXT:    vle32ff.v v8, (a0)
1113; RV64-NEXT:    csrr a0, vl
1114; RV64-NEXT:    sd a0, 0(a2)
1115; RV64-NEXT:    ret
1116entry:
1117  %a = call { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.nxv8f32(
1118    <vscale x 8 x float> undef,
1119    ptr %0,
1120    iXLen %1)
1121  %b = extractvalue { <vscale x 8 x float>, iXLen } %a, 0
1122  %c = extractvalue { <vscale x 8 x float>, iXLen } %a, 1
1123  store iXLen %c, iXLen* %2
1124  ret <vscale x 8 x float> %b
1125}
1126
1127declare { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.mask.nxv8f32(
1128  <vscale x 8 x float>,
1129  ptr,
1130  <vscale x 8 x i1>,
1131  iXLen,
1132  iXLen);
1133
1134define <vscale x 8 x float> @intrinsic_vleff_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1135; RV32-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32:
1136; RV32:       # %bb.0: # %entry
1137; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1138; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
1139; RV32-NEXT:    csrr a0, vl
1140; RV32-NEXT:    sw a0, 0(a2)
1141; RV32-NEXT:    ret
1142;
1143; RV64-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32:
1144; RV64:       # %bb.0: # %entry
1145; RV64-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1146; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
1147; RV64-NEXT:    csrr a0, vl
1148; RV64-NEXT:    sd a0, 0(a2)
1149; RV64-NEXT:    ret
1150entry:
1151  %a = call { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.mask.nxv8f32(
1152    <vscale x 8 x float> %0,
1153    ptr %1,
1154    <vscale x 8 x i1> %2,
1155    iXLen %3, iXLen 1)
1156  %b = extractvalue { <vscale x 8 x float>, iXLen } %a, 0
1157  %c = extractvalue { <vscale x 8 x float>, iXLen } %a, 1
1158  store iXLen %c, iXLen* %4
1159
1160  ret <vscale x 8 x float> %b
1161}
1162
1163declare { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.nxv16f32(
1164  <vscale x 16 x float>,
1165  ptr,
1166  iXLen);
1167
1168define <vscale x 16 x float> @intrinsic_vleff_v_nxv16f32_nxv16f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
1169; RV32-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32:
1170; RV32:       # %bb.0: # %entry
1171; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1172; RV32-NEXT:    vle32ff.v v8, (a0)
1173; RV32-NEXT:    csrr a0, vl
1174; RV32-NEXT:    sw a0, 0(a2)
1175; RV32-NEXT:    ret
1176;
1177; RV64-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32:
1178; RV64:       # %bb.0: # %entry
1179; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1180; RV64-NEXT:    vle32ff.v v8, (a0)
1181; RV64-NEXT:    csrr a0, vl
1182; RV64-NEXT:    sd a0, 0(a2)
1183; RV64-NEXT:    ret
1184entry:
1185  %a = call { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.nxv16f32(
1186    <vscale x 16 x float> undef,
1187    ptr %0,
1188    iXLen %1)
1189  %b = extractvalue { <vscale x 16 x float>, iXLen } %a, 0
1190  %c = extractvalue { <vscale x 16 x float>, iXLen } %a, 1
1191  store iXLen %c, iXLen* %2
1192  ret <vscale x 16 x float> %b
1193}
1194
1195declare { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.mask.nxv16f32(
1196  <vscale x 16 x float>,
1197  ptr,
1198  <vscale x 16 x i1>,
1199  iXLen,
1200  iXLen);
1201
1202define <vscale x 16 x float> @intrinsic_vleff_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1203; RV32-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32:
1204; RV32:       # %bb.0: # %entry
1205; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
1206; RV32-NEXT:    vle32ff.v v8, (a0), v0.t
1207; RV32-NEXT:    csrr a0, vl
1208; RV32-NEXT:    sw a0, 0(a2)
1209; RV32-NEXT:    ret
1210;
1211; RV64-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32:
1212; RV64:       # %bb.0: # %entry
1213; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
1214; RV64-NEXT:    vle32ff.v v8, (a0), v0.t
1215; RV64-NEXT:    csrr a0, vl
1216; RV64-NEXT:    sd a0, 0(a2)
1217; RV64-NEXT:    ret
1218entry:
1219  %a = call { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.mask.nxv16f32(
1220    <vscale x 16 x float> %0,
1221    ptr %1,
1222    <vscale x 16 x i1> %2,
1223    iXLen %3, iXLen 1)
1224  %b = extractvalue { <vscale x 16 x float>, iXLen } %a, 0
1225  %c = extractvalue { <vscale x 16 x float>, iXLen } %a, 1
1226  store iXLen %c, iXLen* %4
1227
1228  ret <vscale x 16 x float> %b
1229}
1230
1231declare { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.nxv1i16(
1232  <vscale x 1 x i16>,
1233  ptr,
1234  iXLen);
1235
1236define <vscale x 1 x i16> @intrinsic_vleff_v_nxv1i16_nxv1i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1237; RV32-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16:
1238; RV32:       # %bb.0: # %entry
1239; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1240; RV32-NEXT:    vle16ff.v v8, (a0)
1241; RV32-NEXT:    csrr a0, vl
1242; RV32-NEXT:    sw a0, 0(a2)
1243; RV32-NEXT:    ret
1244;
1245; RV64-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16:
1246; RV64:       # %bb.0: # %entry
1247; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1248; RV64-NEXT:    vle16ff.v v8, (a0)
1249; RV64-NEXT:    csrr a0, vl
1250; RV64-NEXT:    sd a0, 0(a2)
1251; RV64-NEXT:    ret
1252entry:
1253  %a = call { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.nxv1i16(
1254    <vscale x 1 x i16> undef,
1255    ptr %0,
1256    iXLen %1)
1257  %b = extractvalue { <vscale x 1 x i16>, iXLen } %a, 0
1258  %c = extractvalue { <vscale x 1 x i16>, iXLen } %a, 1
1259  store iXLen %c, iXLen* %2
1260  ret <vscale x 1 x i16> %b
1261}
1262
1263declare { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv1i16(
1264  <vscale x 1 x i16>,
1265  ptr,
1266  <vscale x 1 x i1>,
1267  iXLen,
1268  iXLen);
1269
1270define <vscale x 1 x i16> @intrinsic_vleff_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1271; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16:
1272; RV32:       # %bb.0: # %entry
1273; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1274; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
1275; RV32-NEXT:    csrr a0, vl
1276; RV32-NEXT:    sw a0, 0(a2)
1277; RV32-NEXT:    ret
1278;
1279; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16:
1280; RV64:       # %bb.0: # %entry
1281; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1282; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
1283; RV64-NEXT:    csrr a0, vl
1284; RV64-NEXT:    sd a0, 0(a2)
1285; RV64-NEXT:    ret
1286entry:
1287  %a = call { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv1i16(
1288    <vscale x 1 x i16> %0,
1289    ptr %1,
1290    <vscale x 1 x i1> %2,
1291    iXLen %3, iXLen 1)
1292  %b = extractvalue { <vscale x 1 x i16>, iXLen } %a, 0
1293  %c = extractvalue { <vscale x 1 x i16>, iXLen } %a, 1
1294  store iXLen %c, iXLen* %4
1295
1296  ret <vscale x 1 x i16> %b
1297}
1298
1299declare { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.nxv2i16(
1300  <vscale x 2 x i16>,
1301  ptr,
1302  iXLen);
1303
1304define <vscale x 2 x i16> @intrinsic_vleff_v_nxv2i16_nxv2i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1305; RV32-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16:
1306; RV32:       # %bb.0: # %entry
1307; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1308; RV32-NEXT:    vle16ff.v v8, (a0)
1309; RV32-NEXT:    csrr a0, vl
1310; RV32-NEXT:    sw a0, 0(a2)
1311; RV32-NEXT:    ret
1312;
1313; RV64-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16:
1314; RV64:       # %bb.0: # %entry
1315; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1316; RV64-NEXT:    vle16ff.v v8, (a0)
1317; RV64-NEXT:    csrr a0, vl
1318; RV64-NEXT:    sd a0, 0(a2)
1319; RV64-NEXT:    ret
1320entry:
1321  %a = call { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.nxv2i16(
1322    <vscale x 2 x i16> undef,
1323    ptr %0,
1324    iXLen %1)
1325  %b = extractvalue { <vscale x 2 x i16>, iXLen } %a, 0
1326  %c = extractvalue { <vscale x 2 x i16>, iXLen } %a, 1
1327  store iXLen %c, iXLen* %2
1328  ret <vscale x 2 x i16> %b
1329}
1330
1331declare { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv2i16(
1332  <vscale x 2 x i16>,
1333  ptr,
1334  <vscale x 2 x i1>,
1335  iXLen,
1336  iXLen);
1337
1338define <vscale x 2 x i16> @intrinsic_vleff_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1339; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16:
1340; RV32:       # %bb.0: # %entry
1341; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1342; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
1343; RV32-NEXT:    csrr a0, vl
1344; RV32-NEXT:    sw a0, 0(a2)
1345; RV32-NEXT:    ret
1346;
1347; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16:
1348; RV64:       # %bb.0: # %entry
1349; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1350; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
1351; RV64-NEXT:    csrr a0, vl
1352; RV64-NEXT:    sd a0, 0(a2)
1353; RV64-NEXT:    ret
1354entry:
1355  %a = call { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv2i16(
1356    <vscale x 2 x i16> %0,
1357    ptr %1,
1358    <vscale x 2 x i1> %2,
1359    iXLen %3, iXLen 1)
1360  %b = extractvalue { <vscale x 2 x i16>, iXLen } %a, 0
1361  %c = extractvalue { <vscale x 2 x i16>, iXLen } %a, 1
1362  store iXLen %c, iXLen* %4
1363
1364  ret <vscale x 2 x i16> %b
1365}
1366
1367declare { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.nxv4i16(
1368  <vscale x 4 x i16>,
1369  ptr,
1370  iXLen);
1371
1372define <vscale x 4 x i16> @intrinsic_vleff_v_nxv4i16_nxv4i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1373; RV32-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16:
1374; RV32:       # %bb.0: # %entry
1375; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1376; RV32-NEXT:    vle16ff.v v8, (a0)
1377; RV32-NEXT:    csrr a0, vl
1378; RV32-NEXT:    sw a0, 0(a2)
1379; RV32-NEXT:    ret
1380;
1381; RV64-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16:
1382; RV64:       # %bb.0: # %entry
1383; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1384; RV64-NEXT:    vle16ff.v v8, (a0)
1385; RV64-NEXT:    csrr a0, vl
1386; RV64-NEXT:    sd a0, 0(a2)
1387; RV64-NEXT:    ret
1388entry:
1389  %a = call { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.nxv4i16(
1390    <vscale x 4 x i16> undef,
1391    ptr %0,
1392    iXLen %1)
1393  %b = extractvalue { <vscale x 4 x i16>, iXLen } %a, 0
1394  %c = extractvalue { <vscale x 4 x i16>, iXLen } %a, 1
1395  store iXLen %c, iXLen* %2
1396  ret <vscale x 4 x i16> %b
1397}
1398
1399declare { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv4i16(
1400  <vscale x 4 x i16>,
1401  ptr,
1402  <vscale x 4 x i1>,
1403  iXLen,
1404  iXLen);
1405
1406define <vscale x 4 x i16> @intrinsic_vleff_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1407; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16:
1408; RV32:       # %bb.0: # %entry
1409; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1410; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
1411; RV32-NEXT:    csrr a0, vl
1412; RV32-NEXT:    sw a0, 0(a2)
1413; RV32-NEXT:    ret
1414;
1415; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16:
1416; RV64:       # %bb.0: # %entry
1417; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1418; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
1419; RV64-NEXT:    csrr a0, vl
1420; RV64-NEXT:    sd a0, 0(a2)
1421; RV64-NEXT:    ret
1422entry:
1423  %a = call { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv4i16(
1424    <vscale x 4 x i16> %0,
1425    ptr %1,
1426    <vscale x 4 x i1> %2,
1427    iXLen %3, iXLen 1)
1428  %b = extractvalue { <vscale x 4 x i16>, iXLen } %a, 0
1429  %c = extractvalue { <vscale x 4 x i16>, iXLen } %a, 1
1430  store iXLen %c, iXLen* %4
1431
1432  ret <vscale x 4 x i16> %b
1433}
1434
1435declare { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.nxv8i16(
1436  <vscale x 8 x i16>,
1437  ptr,
1438  iXLen);
1439
1440define <vscale x 8 x i16> @intrinsic_vleff_v_nxv8i16_nxv8i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1441; RV32-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16:
1442; RV32:       # %bb.0: # %entry
1443; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1444; RV32-NEXT:    vle16ff.v v8, (a0)
1445; RV32-NEXT:    csrr a0, vl
1446; RV32-NEXT:    sw a0, 0(a2)
1447; RV32-NEXT:    ret
1448;
1449; RV64-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16:
1450; RV64:       # %bb.0: # %entry
1451; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1452; RV64-NEXT:    vle16ff.v v8, (a0)
1453; RV64-NEXT:    csrr a0, vl
1454; RV64-NEXT:    sd a0, 0(a2)
1455; RV64-NEXT:    ret
1456entry:
1457  %a = call { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.nxv8i16(
1458    <vscale x 8 x i16> undef,
1459    ptr %0,
1460    iXLen %1)
1461  %b = extractvalue { <vscale x 8 x i16>, iXLen } %a, 0
1462  %c = extractvalue { <vscale x 8 x i16>, iXLen } %a, 1
1463  store iXLen %c, iXLen* %2
1464  ret <vscale x 8 x i16> %b
1465}
1466
1467declare { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv8i16(
1468  <vscale x 8 x i16>,
1469  ptr,
1470  <vscale x 8 x i1>,
1471  iXLen,
1472  iXLen);
1473
1474define <vscale x 8 x i16> @intrinsic_vleff_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1475; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16:
1476; RV32:       # %bb.0: # %entry
1477; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1478; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
1479; RV32-NEXT:    csrr a0, vl
1480; RV32-NEXT:    sw a0, 0(a2)
1481; RV32-NEXT:    ret
1482;
1483; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16:
1484; RV64:       # %bb.0: # %entry
1485; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1486; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
1487; RV64-NEXT:    csrr a0, vl
1488; RV64-NEXT:    sd a0, 0(a2)
1489; RV64-NEXT:    ret
1490entry:
1491  %a = call { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv8i16(
1492    <vscale x 8 x i16> %0,
1493    ptr %1,
1494    <vscale x 8 x i1> %2,
1495    iXLen %3, iXLen 1)
1496  %b = extractvalue { <vscale x 8 x i16>, iXLen } %a, 0
1497  %c = extractvalue { <vscale x 8 x i16>, iXLen } %a, 1
1498  store iXLen %c, iXLen* %4
1499
1500  ret <vscale x 8 x i16> %b
1501}
1502
1503declare { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.nxv16i16(
1504  <vscale x 16 x i16>,
1505  ptr,
1506  iXLen);
1507
1508define <vscale x 16 x i16> @intrinsic_vleff_v_nxv16i16_nxv16i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1509; RV32-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16:
1510; RV32:       # %bb.0: # %entry
1511; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1512; RV32-NEXT:    vle16ff.v v8, (a0)
1513; RV32-NEXT:    csrr a0, vl
1514; RV32-NEXT:    sw a0, 0(a2)
1515; RV32-NEXT:    ret
1516;
1517; RV64-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16:
1518; RV64:       # %bb.0: # %entry
1519; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1520; RV64-NEXT:    vle16ff.v v8, (a0)
1521; RV64-NEXT:    csrr a0, vl
1522; RV64-NEXT:    sd a0, 0(a2)
1523; RV64-NEXT:    ret
1524entry:
1525  %a = call { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.nxv16i16(
1526    <vscale x 16 x i16> undef,
1527    ptr %0,
1528    iXLen %1)
1529  %b = extractvalue { <vscale x 16 x i16>, iXLen } %a, 0
1530  %c = extractvalue { <vscale x 16 x i16>, iXLen } %a, 1
1531  store iXLen %c, iXLen* %2
1532  ret <vscale x 16 x i16> %b
1533}
1534
1535declare { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv16i16(
1536  <vscale x 16 x i16>,
1537  ptr,
1538  <vscale x 16 x i1>,
1539  iXLen,
1540  iXLen);
1541
1542define <vscale x 16 x i16> @intrinsic_vleff_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1543; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16:
1544; RV32:       # %bb.0: # %entry
1545; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1546; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
1547; RV32-NEXT:    csrr a0, vl
1548; RV32-NEXT:    sw a0, 0(a2)
1549; RV32-NEXT:    ret
1550;
1551; RV64-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16:
1552; RV64:       # %bb.0: # %entry
1553; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1554; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
1555; RV64-NEXT:    csrr a0, vl
1556; RV64-NEXT:    sd a0, 0(a2)
1557; RV64-NEXT:    ret
1558entry:
1559  %a = call { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv16i16(
1560    <vscale x 16 x i16> %0,
1561    ptr %1,
1562    <vscale x 16 x i1> %2,
1563    iXLen %3, iXLen 1)
1564  %b = extractvalue { <vscale x 16 x i16>, iXLen } %a, 0
1565  %c = extractvalue { <vscale x 16 x i16>, iXLen } %a, 1
1566  store iXLen %c, iXLen* %4
1567
1568  ret <vscale x 16 x i16> %b
1569}
1570
1571declare { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.nxv32i16(
1572  <vscale x 32 x i16>,
1573  ptr,
1574  iXLen);
1575
1576define <vscale x 32 x i16> @intrinsic_vleff_v_nxv32i16_nxv32i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1577; RV32-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16:
1578; RV32:       # %bb.0: # %entry
1579; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
1580; RV32-NEXT:    vle16ff.v v8, (a0)
1581; RV32-NEXT:    csrr a0, vl
1582; RV32-NEXT:    sw a0, 0(a2)
1583; RV32-NEXT:    ret
1584;
1585; RV64-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16:
1586; RV64:       # %bb.0: # %entry
1587; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
1588; RV64-NEXT:    vle16ff.v v8, (a0)
1589; RV64-NEXT:    csrr a0, vl
1590; RV64-NEXT:    sd a0, 0(a2)
1591; RV64-NEXT:    ret
1592entry:
1593  %a = call { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.nxv32i16(
1594    <vscale x 32 x i16> undef,
1595    ptr %0,
1596    iXLen %1)
1597  %b = extractvalue { <vscale x 32 x i16>, iXLen } %a, 0
1598  %c = extractvalue { <vscale x 32 x i16>, iXLen } %a, 1
1599  store iXLen %c, iXLen* %2
1600  ret <vscale x 32 x i16> %b
1601}
1602
1603declare { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv32i16(
1604  <vscale x 32 x i16>,
1605  ptr,
1606  <vscale x 32 x i1>,
1607  iXLen,
1608  iXLen);
1609
1610define <vscale x 32 x i16> @intrinsic_vleff_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1611; RV32-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16:
1612; RV32:       # %bb.0: # %entry
1613; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
1614; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
1615; RV32-NEXT:    csrr a0, vl
1616; RV32-NEXT:    sw a0, 0(a2)
1617; RV32-NEXT:    ret
1618;
1619; RV64-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16:
1620; RV64:       # %bb.0: # %entry
1621; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
1622; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
1623; RV64-NEXT:    csrr a0, vl
1624; RV64-NEXT:    sd a0, 0(a2)
1625; RV64-NEXT:    ret
1626entry:
1627  %a = call { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv32i16(
1628    <vscale x 32 x i16> %0,
1629    ptr %1,
1630    <vscale x 32 x i1> %2,
1631    iXLen %3, iXLen 1)
1632  %b = extractvalue { <vscale x 32 x i16>, iXLen } %a, 0
1633  %c = extractvalue { <vscale x 32 x i16>, iXLen } %a, 1
1634  store iXLen %c, iXLen* %4
1635
1636  ret <vscale x 32 x i16> %b
1637}
1638
1639declare { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.nxv1bf16(
1640  <vscale x 1 x half>,
1641  ptr,
1642  iXLen);
1643
1644define <vscale x 1 x half> @intrinsic_vleff_v_nxv1half_nxv1bf16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1645; RV32-LABEL: intrinsic_vleff_v_nxv1half_nxv1bf16:
1646; RV32:       # %bb.0: # %entry
1647; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1648; RV32-NEXT:    vle16ff.v v8, (a0)
1649; RV32-NEXT:    csrr a0, vl
1650; RV32-NEXT:    sw a0, 0(a2)
1651; RV32-NEXT:    ret
1652;
1653; RV64-LABEL: intrinsic_vleff_v_nxv1half_nxv1bf16:
1654; RV64:       # %bb.0: # %entry
1655; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1656; RV64-NEXT:    vle16ff.v v8, (a0)
1657; RV64-NEXT:    csrr a0, vl
1658; RV64-NEXT:    sd a0, 0(a2)
1659; RV64-NEXT:    ret
1660entry:
1661  %a = call { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.nxv1bf16(
1662    <vscale x 1 x half> undef,
1663    ptr %0,
1664    iXLen %1)
1665  %b = extractvalue { <vscale x 1 x half>, iXLen } %a, 0
1666  %c = extractvalue { <vscale x 1 x half>, iXLen } %a, 1
1667  store iXLen %c, iXLen* %2
1668  ret <vscale x 1 x half> %b
1669}
1670
1671declare { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.mask.nxv1bf16(
1672  <vscale x 1 x half>,
1673  ptr,
1674  <vscale x 1 x i1>,
1675  iXLen,
1676  iXLen);
1677
1678define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1half_nxv1bf16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1679; RV32-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1bf16:
1680; RV32:       # %bb.0: # %entry
1681; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1682; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
1683; RV32-NEXT:    csrr a0, vl
1684; RV32-NEXT:    sw a0, 0(a2)
1685; RV32-NEXT:    ret
1686;
1687; RV64-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1bf16:
1688; RV64:       # %bb.0: # %entry
1689; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1690; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
1691; RV64-NEXT:    csrr a0, vl
1692; RV64-NEXT:    sd a0, 0(a2)
1693; RV64-NEXT:    ret
1694entry:
1695  %a = call { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.mask.nxv1bf16(
1696    <vscale x 1 x half> %0,
1697    ptr %1,
1698    <vscale x 1 x i1> %2,
1699    iXLen %3, iXLen 1)
1700  %b = extractvalue { <vscale x 1 x half>, iXLen } %a, 0
1701  %c = extractvalue { <vscale x 1 x half>, iXLen } %a, 1
1702  store iXLen %c, iXLen* %4
1703
1704  ret <vscale x 1 x half> %b
1705}
1706
1707declare { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.nxv2bf16(
1708  <vscale x 2 x half>,
1709  ptr,
1710  iXLen);
1711
1712define <vscale x 2 x half> @intrinsic_vleff_v_nxv2half_nxv2bf16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1713; RV32-LABEL: intrinsic_vleff_v_nxv2half_nxv2bf16:
1714; RV32:       # %bb.0: # %entry
1715; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1716; RV32-NEXT:    vle16ff.v v8, (a0)
1717; RV32-NEXT:    csrr a0, vl
1718; RV32-NEXT:    sw a0, 0(a2)
1719; RV32-NEXT:    ret
1720;
1721; RV64-LABEL: intrinsic_vleff_v_nxv2half_nxv2bf16:
1722; RV64:       # %bb.0: # %entry
1723; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1724; RV64-NEXT:    vle16ff.v v8, (a0)
1725; RV64-NEXT:    csrr a0, vl
1726; RV64-NEXT:    sd a0, 0(a2)
1727; RV64-NEXT:    ret
1728entry:
1729  %a = call { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.nxv2bf16(
1730    <vscale x 2 x half> undef,
1731    ptr %0,
1732    iXLen %1)
1733  %b = extractvalue { <vscale x 2 x half>, iXLen } %a, 0
1734  %c = extractvalue { <vscale x 2 x half>, iXLen } %a, 1
1735  store iXLen %c, iXLen* %2
1736  ret <vscale x 2 x half> %b
1737}
1738
1739declare { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.mask.nxv2bf16(
1740  <vscale x 2 x half>,
1741  ptr,
1742  <vscale x 2 x i1>,
1743  iXLen,
1744  iXLen);
1745
1746define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2half_nxv2bf16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1747; RV32-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2bf16:
1748; RV32:       # %bb.0: # %entry
1749; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1750; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
1751; RV32-NEXT:    csrr a0, vl
1752; RV32-NEXT:    sw a0, 0(a2)
1753; RV32-NEXT:    ret
1754;
1755; RV64-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2bf16:
1756; RV64:       # %bb.0: # %entry
1757; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1758; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
1759; RV64-NEXT:    csrr a0, vl
1760; RV64-NEXT:    sd a0, 0(a2)
1761; RV64-NEXT:    ret
1762entry:
1763  %a = call { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.mask.nxv2bf16(
1764    <vscale x 2 x half> %0,
1765    ptr %1,
1766    <vscale x 2 x i1> %2,
1767    iXLen %3, iXLen 1)
1768  %b = extractvalue { <vscale x 2 x half>, iXLen } %a, 0
1769  %c = extractvalue { <vscale x 2 x half>, iXLen } %a, 1
1770  store iXLen %c, iXLen* %4
1771
1772  ret <vscale x 2 x half> %b
1773}
1774
1775declare { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.nxv4bf16(
1776  <vscale x 4 x half>,
1777  ptr,
1778  iXLen);
1779
1780define <vscale x 4 x half> @intrinsic_vleff_v_nxv4half_nxv4bf16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1781; RV32-LABEL: intrinsic_vleff_v_nxv4half_nxv4bf16:
1782; RV32:       # %bb.0: # %entry
1783; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1784; RV32-NEXT:    vle16ff.v v8, (a0)
1785; RV32-NEXT:    csrr a0, vl
1786; RV32-NEXT:    sw a0, 0(a2)
1787; RV32-NEXT:    ret
1788;
1789; RV64-LABEL: intrinsic_vleff_v_nxv4half_nxv4bf16:
1790; RV64:       # %bb.0: # %entry
1791; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1792; RV64-NEXT:    vle16ff.v v8, (a0)
1793; RV64-NEXT:    csrr a0, vl
1794; RV64-NEXT:    sd a0, 0(a2)
1795; RV64-NEXT:    ret
1796entry:
1797  %a = call { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.nxv4bf16(
1798    <vscale x 4 x half> undef,
1799    ptr %0,
1800    iXLen %1)
1801  %b = extractvalue { <vscale x 4 x half>, iXLen } %a, 0
1802  %c = extractvalue { <vscale x 4 x half>, iXLen } %a, 1
1803  store iXLen %c, iXLen* %2
1804  ret <vscale x 4 x half> %b
1805}
1806
1807declare { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.mask.nxv4bf16(
1808  <vscale x 4 x half>,
1809  ptr,
1810  <vscale x 4 x i1>,
1811  iXLen,
1812  iXLen);
1813
1814define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4half_nxv4bf16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1815; RV32-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4bf16:
1816; RV32:       # %bb.0: # %entry
1817; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1818; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
1819; RV32-NEXT:    csrr a0, vl
1820; RV32-NEXT:    sw a0, 0(a2)
1821; RV32-NEXT:    ret
1822;
1823; RV64-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4bf16:
1824; RV64:       # %bb.0: # %entry
1825; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1826; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
1827; RV64-NEXT:    csrr a0, vl
1828; RV64-NEXT:    sd a0, 0(a2)
1829; RV64-NEXT:    ret
1830entry:
1831  %a = call { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.mask.nxv4bf16(
1832    <vscale x 4 x half> %0,
1833    ptr %1,
1834    <vscale x 4 x i1> %2,
1835    iXLen %3, iXLen 1)
1836  %b = extractvalue { <vscale x 4 x half>, iXLen } %a, 0
1837  %c = extractvalue { <vscale x 4 x half>, iXLen } %a, 1
1838  store iXLen %c, iXLen* %4
1839
1840  ret <vscale x 4 x half> %b
1841}
1842
1843declare { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.nxv8bf16(
1844  <vscale x 8 x half>,
1845  ptr,
1846  iXLen);
1847
1848define <vscale x 8 x half> @intrinsic_vleff_v_nxv8half_nxv8bf16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1849; RV32-LABEL: intrinsic_vleff_v_nxv8half_nxv8bf16:
1850; RV32:       # %bb.0: # %entry
1851; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1852; RV32-NEXT:    vle16ff.v v8, (a0)
1853; RV32-NEXT:    csrr a0, vl
1854; RV32-NEXT:    sw a0, 0(a2)
1855; RV32-NEXT:    ret
1856;
1857; RV64-LABEL: intrinsic_vleff_v_nxv8half_nxv8bf16:
1858; RV64:       # %bb.0: # %entry
1859; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1860; RV64-NEXT:    vle16ff.v v8, (a0)
1861; RV64-NEXT:    csrr a0, vl
1862; RV64-NEXT:    sd a0, 0(a2)
1863; RV64-NEXT:    ret
1864entry:
1865  %a = call { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.nxv8bf16(
1866    <vscale x 8 x half> undef,
1867    ptr %0,
1868    iXLen %1)
1869  %b = extractvalue { <vscale x 8 x half>, iXLen } %a, 0
1870  %c = extractvalue { <vscale x 8 x half>, iXLen } %a, 1
1871  store iXLen %c, iXLen* %2
1872  ret <vscale x 8 x half> %b
1873}
1874
1875declare { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.mask.nxv8bf16(
1876  <vscale x 8 x half>,
1877  ptr,
1878  <vscale x 8 x i1>,
1879  iXLen,
1880  iXLen);
1881
1882define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8half_nxv8bf16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1883; RV32-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8bf16:
1884; RV32:       # %bb.0: # %entry
1885; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1886; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
1887; RV32-NEXT:    csrr a0, vl
1888; RV32-NEXT:    sw a0, 0(a2)
1889; RV32-NEXT:    ret
1890;
1891; RV64-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8bf16:
1892; RV64:       # %bb.0: # %entry
1893; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1894; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
1895; RV64-NEXT:    csrr a0, vl
1896; RV64-NEXT:    sd a0, 0(a2)
1897; RV64-NEXT:    ret
1898entry:
1899  %a = call { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.mask.nxv8bf16(
1900    <vscale x 8 x half> %0,
1901    ptr %1,
1902    <vscale x 8 x i1> %2,
1903    iXLen %3, iXLen 1)
1904  %b = extractvalue { <vscale x 8 x half>, iXLen } %a, 0
1905  %c = extractvalue { <vscale x 8 x half>, iXLen } %a, 1
1906  store iXLen %c, iXLen* %4
1907
1908  ret <vscale x 8 x half> %b
1909}
1910
1911declare { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.nxv16bf16(
1912  <vscale x 16 x half>,
1913  ptr,
1914  iXLen);
1915
1916define <vscale x 16 x half> @intrinsic_vleff_v_nxv16half_nxv16bf16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1917; RV32-LABEL: intrinsic_vleff_v_nxv16half_nxv16bf16:
1918; RV32:       # %bb.0: # %entry
1919; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1920; RV32-NEXT:    vle16ff.v v8, (a0)
1921; RV32-NEXT:    csrr a0, vl
1922; RV32-NEXT:    sw a0, 0(a2)
1923; RV32-NEXT:    ret
1924;
1925; RV64-LABEL: intrinsic_vleff_v_nxv16half_nxv16bf16:
1926; RV64:       # %bb.0: # %entry
1927; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1928; RV64-NEXT:    vle16ff.v v8, (a0)
1929; RV64-NEXT:    csrr a0, vl
1930; RV64-NEXT:    sd a0, 0(a2)
1931; RV64-NEXT:    ret
1932entry:
1933  %a = call { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.nxv16bf16(
1934    <vscale x 16 x half> undef,
1935    ptr %0,
1936    iXLen %1)
1937  %b = extractvalue { <vscale x 16 x half>, iXLen } %a, 0
1938  %c = extractvalue { <vscale x 16 x half>, iXLen } %a, 1
1939  store iXLen %c, iXLen* %2
1940  ret <vscale x 16 x half> %b
1941}
1942
1943declare { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.mask.nxv16bf16(
1944  <vscale x 16 x half>,
1945  ptr,
1946  <vscale x 16 x i1>,
1947  iXLen,
1948  iXLen);
1949
1950define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16half_nxv16bf16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
1951; RV32-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16bf16:
1952; RV32:       # %bb.0: # %entry
1953; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1954; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
1955; RV32-NEXT:    csrr a0, vl
1956; RV32-NEXT:    sw a0, 0(a2)
1957; RV32-NEXT:    ret
1958;
1959; RV64-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16bf16:
1960; RV64:       # %bb.0: # %entry
1961; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1962; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
1963; RV64-NEXT:    csrr a0, vl
1964; RV64-NEXT:    sd a0, 0(a2)
1965; RV64-NEXT:    ret
1966entry:
1967  %a = call { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.mask.nxv16bf16(
1968    <vscale x 16 x half> %0,
1969    ptr %1,
1970    <vscale x 16 x i1> %2,
1971    iXLen %3, iXLen 1)
1972  %b = extractvalue { <vscale x 16 x half>, iXLen } %a, 0
1973  %c = extractvalue { <vscale x 16 x half>, iXLen } %a, 1
1974  store iXLen %c, iXLen* %4
1975
1976  ret <vscale x 16 x half> %b
1977}
1978
1979declare { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.nxv32bf16(
1980  <vscale x 32 x half>,
1981  ptr,
1982  iXLen);
1983
1984define <vscale x 32 x half> @intrinsic_vleff_v_nxv32half_nxv32bf16(ptr %0, iXLen %1, iXLen* %2) nounwind {
1985; RV32-LABEL: intrinsic_vleff_v_nxv32half_nxv32bf16:
1986; RV32:       # %bb.0: # %entry
1987; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
1988; RV32-NEXT:    vle16ff.v v8, (a0)
1989; RV32-NEXT:    csrr a0, vl
1990; RV32-NEXT:    sw a0, 0(a2)
1991; RV32-NEXT:    ret
1992;
1993; RV64-LABEL: intrinsic_vleff_v_nxv32half_nxv32bf16:
1994; RV64:       # %bb.0: # %entry
1995; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
1996; RV64-NEXT:    vle16ff.v v8, (a0)
1997; RV64-NEXT:    csrr a0, vl
1998; RV64-NEXT:    sd a0, 0(a2)
1999; RV64-NEXT:    ret
2000entry:
2001  %a = call { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.nxv32bf16(
2002    <vscale x 32 x half> undef,
2003    ptr %0,
2004    iXLen %1)
2005  %b = extractvalue { <vscale x 32 x half>, iXLen } %a, 0
2006  %c = extractvalue { <vscale x 32 x half>, iXLen } %a, 1
2007  store iXLen %c, iXLen* %2
2008  ret <vscale x 32 x half> %b
2009}
2010
2011declare { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.mask.nxv32bf16(
2012  <vscale x 32 x half>,
2013  ptr,
2014  <vscale x 32 x i1>,
2015  iXLen,
2016  iXLen);
2017
2018define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32half_nxv32bf16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2019; RV32-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32bf16:
2020; RV32:       # %bb.0: # %entry
2021; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
2022; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
2023; RV32-NEXT:    csrr a0, vl
2024; RV32-NEXT:    sw a0, 0(a2)
2025; RV32-NEXT:    ret
2026;
2027; RV64-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32bf16:
2028; RV64:       # %bb.0: # %entry
2029; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
2030; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
2031; RV64-NEXT:    csrr a0, vl
2032; RV64-NEXT:    sd a0, 0(a2)
2033; RV64-NEXT:    ret
2034entry:
2035  %a = call { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.mask.nxv32bf16(
2036    <vscale x 32 x half> %0,
2037    ptr %1,
2038    <vscale x 32 x i1> %2,
2039    iXLen %3, iXLen 1)
2040  %b = extractvalue { <vscale x 32 x half>, iXLen } %a, 0
2041  %c = extractvalue { <vscale x 32 x half>, iXLen } %a, 1
2042  store iXLen %c, iXLen* %4
2043
2044  ret <vscale x 32 x half> %b
2045}
2046
2047declare { <vscale x 1 x bfloat>, iXLen } @llvm.riscv.vleff.nxv1f16(
2048  <vscale x 1 x bfloat>,
2049  ptr,
2050  iXLen);
2051
2052define <vscale x 1 x bfloat> @intrinsic_vleff_v_nxv1bfloat_nxv1f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
2053; RV32-LABEL: intrinsic_vleff_v_nxv1bfloat_nxv1f16:
2054; RV32:       # %bb.0: # %entry
2055; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2056; RV32-NEXT:    vle16ff.v v8, (a0)
2057; RV32-NEXT:    csrr a0, vl
2058; RV32-NEXT:    sw a0, 0(a2)
2059; RV32-NEXT:    ret
2060;
2061; RV64-LABEL: intrinsic_vleff_v_nxv1bfloat_nxv1f16:
2062; RV64:       # %bb.0: # %entry
2063; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2064; RV64-NEXT:    vle16ff.v v8, (a0)
2065; RV64-NEXT:    csrr a0, vl
2066; RV64-NEXT:    sd a0, 0(a2)
2067; RV64-NEXT:    ret
2068entry:
2069  %a = call { <vscale x 1 x bfloat>, iXLen } @llvm.riscv.vleff.nxv1f16(
2070    <vscale x 1 x bfloat> undef,
2071    ptr %0,
2072    iXLen %1)
2073  %b = extractvalue { <vscale x 1 x bfloat>, iXLen } %a, 0
2074  %c = extractvalue { <vscale x 1 x bfloat>, iXLen } %a, 1
2075  store iXLen %c, iXLen* %2
2076  ret <vscale x 1 x bfloat> %b
2077}
2078
2079declare { <vscale x 1 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv1f16(
2080  <vscale x 1 x bfloat>,
2081  ptr,
2082  <vscale x 1 x i1>,
2083  iXLen,
2084  iXLen);
2085
2086define <vscale x 1 x bfloat> @intrinsic_vleff_mask_v_nxv1bfloat_nxv1f16(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2087; RV32-LABEL: intrinsic_vleff_mask_v_nxv1bfloat_nxv1f16:
2088; RV32:       # %bb.0: # %entry
2089; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
2090; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
2091; RV32-NEXT:    csrr a0, vl
2092; RV32-NEXT:    sw a0, 0(a2)
2093; RV32-NEXT:    ret
2094;
2095; RV64-LABEL: intrinsic_vleff_mask_v_nxv1bfloat_nxv1f16:
2096; RV64:       # %bb.0: # %entry
2097; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
2098; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
2099; RV64-NEXT:    csrr a0, vl
2100; RV64-NEXT:    sd a0, 0(a2)
2101; RV64-NEXT:    ret
2102entry:
2103  %a = call { <vscale x 1 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv1f16(
2104    <vscale x 1 x bfloat> %0,
2105    ptr %1,
2106    <vscale x 1 x i1> %2,
2107    iXLen %3, iXLen 1)
2108  %b = extractvalue { <vscale x 1 x bfloat>, iXLen } %a, 0
2109  %c = extractvalue { <vscale x 1 x bfloat>, iXLen } %a, 1
2110  store iXLen %c, iXLen* %4
2111
2112  ret <vscale x 1 x bfloat> %b
2113}
2114
2115declare { <vscale x 2 x bfloat>, iXLen } @llvm.riscv.vleff.nxv2f16(
2116  <vscale x 2 x bfloat>,
2117  ptr,
2118  iXLen);
2119
2120define <vscale x 2 x bfloat> @intrinsic_vleff_v_nxv2bfloat_nxv2f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
2121; RV32-LABEL: intrinsic_vleff_v_nxv2bfloat_nxv2f16:
2122; RV32:       # %bb.0: # %entry
2123; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2124; RV32-NEXT:    vle16ff.v v8, (a0)
2125; RV32-NEXT:    csrr a0, vl
2126; RV32-NEXT:    sw a0, 0(a2)
2127; RV32-NEXT:    ret
2128;
2129; RV64-LABEL: intrinsic_vleff_v_nxv2bfloat_nxv2f16:
2130; RV64:       # %bb.0: # %entry
2131; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2132; RV64-NEXT:    vle16ff.v v8, (a0)
2133; RV64-NEXT:    csrr a0, vl
2134; RV64-NEXT:    sd a0, 0(a2)
2135; RV64-NEXT:    ret
2136entry:
2137  %a = call { <vscale x 2 x bfloat>, iXLen } @llvm.riscv.vleff.nxv2f16(
2138    <vscale x 2 x bfloat> undef,
2139    ptr %0,
2140    iXLen %1)
2141  %b = extractvalue { <vscale x 2 x bfloat>, iXLen } %a, 0
2142  %c = extractvalue { <vscale x 2 x bfloat>, iXLen } %a, 1
2143  store iXLen %c, iXLen* %2
2144  ret <vscale x 2 x bfloat> %b
2145}
2146
2147declare { <vscale x 2 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv2f16(
2148  <vscale x 2 x bfloat>,
2149  ptr,
2150  <vscale x 2 x i1>,
2151  iXLen,
2152  iXLen);
2153
2154define <vscale x 2 x bfloat> @intrinsic_vleff_mask_v_nxv2bfloat_nxv2f16(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2155; RV32-LABEL: intrinsic_vleff_mask_v_nxv2bfloat_nxv2f16:
2156; RV32:       # %bb.0: # %entry
2157; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
2158; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
2159; RV32-NEXT:    csrr a0, vl
2160; RV32-NEXT:    sw a0, 0(a2)
2161; RV32-NEXT:    ret
2162;
2163; RV64-LABEL: intrinsic_vleff_mask_v_nxv2bfloat_nxv2f16:
2164; RV64:       # %bb.0: # %entry
2165; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
2166; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
2167; RV64-NEXT:    csrr a0, vl
2168; RV64-NEXT:    sd a0, 0(a2)
2169; RV64-NEXT:    ret
2170entry:
2171  %a = call { <vscale x 2 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv2f16(
2172    <vscale x 2 x bfloat> %0,
2173    ptr %1,
2174    <vscale x 2 x i1> %2,
2175    iXLen %3, iXLen 1)
2176  %b = extractvalue { <vscale x 2 x bfloat>, iXLen } %a, 0
2177  %c = extractvalue { <vscale x 2 x bfloat>, iXLen } %a, 1
2178  store iXLen %c, iXLen* %4
2179
2180  ret <vscale x 2 x bfloat> %b
2181}
2182
2183declare { <vscale x 4 x bfloat>, iXLen } @llvm.riscv.vleff.nxv4f16(
2184  <vscale x 4 x bfloat>,
2185  ptr,
2186  iXLen);
2187
2188define <vscale x 4 x bfloat> @intrinsic_vleff_v_nxv4bfloat_nxv4f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
2189; RV32-LABEL: intrinsic_vleff_v_nxv4bfloat_nxv4f16:
2190; RV32:       # %bb.0: # %entry
2191; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2192; RV32-NEXT:    vle16ff.v v8, (a0)
2193; RV32-NEXT:    csrr a0, vl
2194; RV32-NEXT:    sw a0, 0(a2)
2195; RV32-NEXT:    ret
2196;
2197; RV64-LABEL: intrinsic_vleff_v_nxv4bfloat_nxv4f16:
2198; RV64:       # %bb.0: # %entry
2199; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2200; RV64-NEXT:    vle16ff.v v8, (a0)
2201; RV64-NEXT:    csrr a0, vl
2202; RV64-NEXT:    sd a0, 0(a2)
2203; RV64-NEXT:    ret
2204entry:
2205  %a = call { <vscale x 4 x bfloat>, iXLen } @llvm.riscv.vleff.nxv4f16(
2206    <vscale x 4 x bfloat> undef,
2207    ptr %0,
2208    iXLen %1)
2209  %b = extractvalue { <vscale x 4 x bfloat>, iXLen } %a, 0
2210  %c = extractvalue { <vscale x 4 x bfloat>, iXLen } %a, 1
2211  store iXLen %c, iXLen* %2
2212  ret <vscale x 4 x bfloat> %b
2213}
2214
2215declare { <vscale x 4 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv4f16(
2216  <vscale x 4 x bfloat>,
2217  ptr,
2218  <vscale x 4 x i1>,
2219  iXLen,
2220  iXLen);
2221
2222define <vscale x 4 x bfloat> @intrinsic_vleff_mask_v_nxv4bfloat_nxv4f16(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2223; RV32-LABEL: intrinsic_vleff_mask_v_nxv4bfloat_nxv4f16:
2224; RV32:       # %bb.0: # %entry
2225; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
2226; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
2227; RV32-NEXT:    csrr a0, vl
2228; RV32-NEXT:    sw a0, 0(a2)
2229; RV32-NEXT:    ret
2230;
2231; RV64-LABEL: intrinsic_vleff_mask_v_nxv4bfloat_nxv4f16:
2232; RV64:       # %bb.0: # %entry
2233; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
2234; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
2235; RV64-NEXT:    csrr a0, vl
2236; RV64-NEXT:    sd a0, 0(a2)
2237; RV64-NEXT:    ret
2238entry:
2239  %a = call { <vscale x 4 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv4f16(
2240    <vscale x 4 x bfloat> %0,
2241    ptr %1,
2242    <vscale x 4 x i1> %2,
2243    iXLen %3, iXLen 1)
2244  %b = extractvalue { <vscale x 4 x bfloat>, iXLen } %a, 0
2245  %c = extractvalue { <vscale x 4 x bfloat>, iXLen } %a, 1
2246  store iXLen %c, iXLen* %4
2247
2248  ret <vscale x 4 x bfloat> %b
2249}
2250
2251declare { <vscale x 8 x bfloat>, iXLen } @llvm.riscv.vleff.nxv8f16(
2252  <vscale x 8 x bfloat>,
2253  ptr,
2254  iXLen);
2255
2256define <vscale x 8 x bfloat> @intrinsic_vleff_v_nxv8bfloat_nxv8f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
2257; RV32-LABEL: intrinsic_vleff_v_nxv8bfloat_nxv8f16:
2258; RV32:       # %bb.0: # %entry
2259; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
2260; RV32-NEXT:    vle16ff.v v8, (a0)
2261; RV32-NEXT:    csrr a0, vl
2262; RV32-NEXT:    sw a0, 0(a2)
2263; RV32-NEXT:    ret
2264;
2265; RV64-LABEL: intrinsic_vleff_v_nxv8bfloat_nxv8f16:
2266; RV64:       # %bb.0: # %entry
2267; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
2268; RV64-NEXT:    vle16ff.v v8, (a0)
2269; RV64-NEXT:    csrr a0, vl
2270; RV64-NEXT:    sd a0, 0(a2)
2271; RV64-NEXT:    ret
2272entry:
2273  %a = call { <vscale x 8 x bfloat>, iXLen } @llvm.riscv.vleff.nxv8f16(
2274    <vscale x 8 x bfloat> undef,
2275    ptr %0,
2276    iXLen %1)
2277  %b = extractvalue { <vscale x 8 x bfloat>, iXLen } %a, 0
2278  %c = extractvalue { <vscale x 8 x bfloat>, iXLen } %a, 1
2279  store iXLen %c, iXLen* %2
2280  ret <vscale x 8 x bfloat> %b
2281}
2282
2283declare { <vscale x 8 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv8f16(
2284  <vscale x 8 x bfloat>,
2285  ptr,
2286  <vscale x 8 x i1>,
2287  iXLen,
2288  iXLen);
2289
2290define <vscale x 8 x bfloat> @intrinsic_vleff_mask_v_nxv8bfloat_nxv8f16(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2291; RV32-LABEL: intrinsic_vleff_mask_v_nxv8bfloat_nxv8f16:
2292; RV32:       # %bb.0: # %entry
2293; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
2294; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
2295; RV32-NEXT:    csrr a0, vl
2296; RV32-NEXT:    sw a0, 0(a2)
2297; RV32-NEXT:    ret
2298;
2299; RV64-LABEL: intrinsic_vleff_mask_v_nxv8bfloat_nxv8f16:
2300; RV64:       # %bb.0: # %entry
2301; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
2302; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
2303; RV64-NEXT:    csrr a0, vl
2304; RV64-NEXT:    sd a0, 0(a2)
2305; RV64-NEXT:    ret
2306entry:
2307  %a = call { <vscale x 8 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv8f16(
2308    <vscale x 8 x bfloat> %0,
2309    ptr %1,
2310    <vscale x 8 x i1> %2,
2311    iXLen %3, iXLen 1)
2312  %b = extractvalue { <vscale x 8 x bfloat>, iXLen } %a, 0
2313  %c = extractvalue { <vscale x 8 x bfloat>, iXLen } %a, 1
2314  store iXLen %c, iXLen* %4
2315
2316  ret <vscale x 8 x bfloat> %b
2317}
2318
2319declare { <vscale x 16 x bfloat>, iXLen } @llvm.riscv.vleff.nxv16f16(
2320  <vscale x 16 x bfloat>,
2321  ptr,
2322  iXLen);
2323
2324define <vscale x 16 x bfloat> @intrinsic_vleff_v_nxv16bfloat_nxv16f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
2325; RV32-LABEL: intrinsic_vleff_v_nxv16bfloat_nxv16f16:
2326; RV32:       # %bb.0: # %entry
2327; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
2328; RV32-NEXT:    vle16ff.v v8, (a0)
2329; RV32-NEXT:    csrr a0, vl
2330; RV32-NEXT:    sw a0, 0(a2)
2331; RV32-NEXT:    ret
2332;
2333; RV64-LABEL: intrinsic_vleff_v_nxv16bfloat_nxv16f16:
2334; RV64:       # %bb.0: # %entry
2335; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
2336; RV64-NEXT:    vle16ff.v v8, (a0)
2337; RV64-NEXT:    csrr a0, vl
2338; RV64-NEXT:    sd a0, 0(a2)
2339; RV64-NEXT:    ret
2340entry:
2341  %a = call { <vscale x 16 x bfloat>, iXLen } @llvm.riscv.vleff.nxv16f16(
2342    <vscale x 16 x bfloat> undef,
2343    ptr %0,
2344    iXLen %1)
2345  %b = extractvalue { <vscale x 16 x bfloat>, iXLen } %a, 0
2346  %c = extractvalue { <vscale x 16 x bfloat>, iXLen } %a, 1
2347  store iXLen %c, iXLen* %2
2348  ret <vscale x 16 x bfloat> %b
2349}
2350
2351declare { <vscale x 16 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv16f16(
2352  <vscale x 16 x bfloat>,
2353  ptr,
2354  <vscale x 16 x i1>,
2355  iXLen,
2356  iXLen);
2357
2358define <vscale x 16 x bfloat> @intrinsic_vleff_mask_v_nxv16bfloat_nxv16f16(<vscale x 16 x bfloat> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2359; RV32-LABEL: intrinsic_vleff_mask_v_nxv16bfloat_nxv16f16:
2360; RV32:       # %bb.0: # %entry
2361; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
2362; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
2363; RV32-NEXT:    csrr a0, vl
2364; RV32-NEXT:    sw a0, 0(a2)
2365; RV32-NEXT:    ret
2366;
2367; RV64-LABEL: intrinsic_vleff_mask_v_nxv16bfloat_nxv16f16:
2368; RV64:       # %bb.0: # %entry
2369; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
2370; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
2371; RV64-NEXT:    csrr a0, vl
2372; RV64-NEXT:    sd a0, 0(a2)
2373; RV64-NEXT:    ret
2374entry:
2375  %a = call { <vscale x 16 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv16f16(
2376    <vscale x 16 x bfloat> %0,
2377    ptr %1,
2378    <vscale x 16 x i1> %2,
2379    iXLen %3, iXLen 1)
2380  %b = extractvalue { <vscale x 16 x bfloat>, iXLen } %a, 0
2381  %c = extractvalue { <vscale x 16 x bfloat>, iXLen } %a, 1
2382  store iXLen %c, iXLen* %4
2383
2384  ret <vscale x 16 x bfloat> %b
2385}
2386
2387declare { <vscale x 32 x bfloat>, iXLen } @llvm.riscv.vleff.nxv32f16(
2388  <vscale x 32 x bfloat>,
2389  ptr,
2390  iXLen);
2391
2392define <vscale x 32 x bfloat> @intrinsic_vleff_v_nxv32bfloat_nxv32f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
2393; RV32-LABEL: intrinsic_vleff_v_nxv32bfloat_nxv32f16:
2394; RV32:       # %bb.0: # %entry
2395; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
2396; RV32-NEXT:    vle16ff.v v8, (a0)
2397; RV32-NEXT:    csrr a0, vl
2398; RV32-NEXT:    sw a0, 0(a2)
2399; RV32-NEXT:    ret
2400;
2401; RV64-LABEL: intrinsic_vleff_v_nxv32bfloat_nxv32f16:
2402; RV64:       # %bb.0: # %entry
2403; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
2404; RV64-NEXT:    vle16ff.v v8, (a0)
2405; RV64-NEXT:    csrr a0, vl
2406; RV64-NEXT:    sd a0, 0(a2)
2407; RV64-NEXT:    ret
2408entry:
2409  %a = call { <vscale x 32 x bfloat>, iXLen } @llvm.riscv.vleff.nxv32f16(
2410    <vscale x 32 x bfloat> undef,
2411    ptr %0,
2412    iXLen %1)
2413  %b = extractvalue { <vscale x 32 x bfloat>, iXLen } %a, 0
2414  %c = extractvalue { <vscale x 32 x bfloat>, iXLen } %a, 1
2415  store iXLen %c, iXLen* %2
2416  ret <vscale x 32 x bfloat> %b
2417}
2418
2419declare { <vscale x 32 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv32f16(
2420  <vscale x 32 x bfloat>,
2421  ptr,
2422  <vscale x 32 x i1>,
2423  iXLen,
2424  iXLen);
2425
2426define <vscale x 32 x bfloat> @intrinsic_vleff_mask_v_nxv32bfloat_nxv32f16(<vscale x 32 x bfloat> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2427; RV32-LABEL: intrinsic_vleff_mask_v_nxv32bfloat_nxv32f16:
2428; RV32:       # %bb.0: # %entry
2429; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
2430; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
2431; RV32-NEXT:    csrr a0, vl
2432; RV32-NEXT:    sw a0, 0(a2)
2433; RV32-NEXT:    ret
2434;
2435; RV64-LABEL: intrinsic_vleff_mask_v_nxv32bfloat_nxv32f16:
2436; RV64:       # %bb.0: # %entry
2437; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
2438; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
2439; RV64-NEXT:    csrr a0, vl
2440; RV64-NEXT:    sd a0, 0(a2)
2441; RV64-NEXT:    ret
2442entry:
2443  %a = call { <vscale x 32 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv32f16(
2444    <vscale x 32 x bfloat> %0,
2445    ptr %1,
2446    <vscale x 32 x i1> %2,
2447    iXLen %3, iXLen 1)
2448  %b = extractvalue { <vscale x 32 x bfloat>, iXLen } %a, 0
2449  %c = extractvalue { <vscale x 32 x bfloat>, iXLen } %a, 1
2450  store iXLen %c, iXLen* %4
2451
2452  ret <vscale x 32 x bfloat> %b
2453}
2454
2455declare { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.nxv1i8(
2456  <vscale x 1 x i8>,
2457  ptr,
2458  iXLen);
2459
2460define <vscale x 1 x i8> @intrinsic_vleff_v_nxv1i8_nxv1i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
2461; RV32-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8:
2462; RV32:       # %bb.0: # %entry
2463; RV32-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
2464; RV32-NEXT:    vle8ff.v v8, (a0)
2465; RV32-NEXT:    csrr a0, vl
2466; RV32-NEXT:    sw a0, 0(a2)
2467; RV32-NEXT:    ret
2468;
2469; RV64-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8:
2470; RV64:       # %bb.0: # %entry
2471; RV64-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
2472; RV64-NEXT:    vle8ff.v v8, (a0)
2473; RV64-NEXT:    csrr a0, vl
2474; RV64-NEXT:    sd a0, 0(a2)
2475; RV64-NEXT:    ret
2476entry:
2477  %a = call { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.nxv1i8(
2478    <vscale x 1 x i8> undef,
2479    ptr %0,
2480    iXLen %1)
2481  %b = extractvalue { <vscale x 1 x i8>, iXLen } %a, 0
2482  %c = extractvalue { <vscale x 1 x i8>, iXLen } %a, 1
2483  store iXLen %c, iXLen* %2
2484  ret <vscale x 1 x i8> %b
2485}
2486
2487declare { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv1i8(
2488  <vscale x 1 x i8>,
2489  ptr,
2490  <vscale x 1 x i1>,
2491  iXLen,
2492  iXLen);
2493
2494define <vscale x 1 x i8> @intrinsic_vleff_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2495; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8:
2496; RV32:       # %bb.0: # %entry
2497; RV32-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
2498; RV32-NEXT:    vle8ff.v v8, (a0), v0.t
2499; RV32-NEXT:    csrr a0, vl
2500; RV32-NEXT:    sw a0, 0(a2)
2501; RV32-NEXT:    ret
2502;
2503; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8:
2504; RV64:       # %bb.0: # %entry
2505; RV64-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
2506; RV64-NEXT:    vle8ff.v v8, (a0), v0.t
2507; RV64-NEXT:    csrr a0, vl
2508; RV64-NEXT:    sd a0, 0(a2)
2509; RV64-NEXT:    ret
2510entry:
2511  %a = call { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv1i8(
2512    <vscale x 1 x i8> %0,
2513    ptr %1,
2514    <vscale x 1 x i1> %2,
2515    iXLen %3, iXLen 1)
2516  %b = extractvalue { <vscale x 1 x i8>, iXLen } %a, 0
2517  %c = extractvalue { <vscale x 1 x i8>, iXLen } %a, 1
2518  store iXLen %c, iXLen* %4
2519
2520  ret <vscale x 1 x i8> %b
2521}
2522
2523declare { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.nxv2i8(
2524  <vscale x 2 x i8>,
2525  ptr,
2526  iXLen);
2527
2528define <vscale x 2 x i8> @intrinsic_vleff_v_nxv2i8_nxv2i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
2529; RV32-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8:
2530; RV32:       # %bb.0: # %entry
2531; RV32-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
2532; RV32-NEXT:    vle8ff.v v8, (a0)
2533; RV32-NEXT:    csrr a0, vl
2534; RV32-NEXT:    sw a0, 0(a2)
2535; RV32-NEXT:    ret
2536;
2537; RV64-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8:
2538; RV64:       # %bb.0: # %entry
2539; RV64-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
2540; RV64-NEXT:    vle8ff.v v8, (a0)
2541; RV64-NEXT:    csrr a0, vl
2542; RV64-NEXT:    sd a0, 0(a2)
2543; RV64-NEXT:    ret
2544entry:
2545  %a = call { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.nxv2i8(
2546    <vscale x 2 x i8> undef,
2547    ptr %0,
2548    iXLen %1)
2549  %b = extractvalue { <vscale x 2 x i8>, iXLen } %a, 0
2550  %c = extractvalue { <vscale x 2 x i8>, iXLen } %a, 1
2551  store iXLen %c, iXLen* %2
2552  ret <vscale x 2 x i8> %b
2553}
2554
2555declare { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv2i8(
2556  <vscale x 2 x i8>,
2557  ptr,
2558  <vscale x 2 x i1>,
2559  iXLen,
2560  iXLen);
2561
2562define <vscale x 2 x i8> @intrinsic_vleff_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2563; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8:
2564; RV32:       # %bb.0: # %entry
2565; RV32-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
2566; RV32-NEXT:    vle8ff.v v8, (a0), v0.t
2567; RV32-NEXT:    csrr a0, vl
2568; RV32-NEXT:    sw a0, 0(a2)
2569; RV32-NEXT:    ret
2570;
2571; RV64-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8:
2572; RV64:       # %bb.0: # %entry
2573; RV64-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
2574; RV64-NEXT:    vle8ff.v v8, (a0), v0.t
2575; RV64-NEXT:    csrr a0, vl
2576; RV64-NEXT:    sd a0, 0(a2)
2577; RV64-NEXT:    ret
2578entry:
2579  %a = call { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv2i8(
2580    <vscale x 2 x i8> %0,
2581    ptr %1,
2582    <vscale x 2 x i1> %2,
2583    iXLen %3, iXLen 1)
2584  %b = extractvalue { <vscale x 2 x i8>, iXLen } %a, 0
2585  %c = extractvalue { <vscale x 2 x i8>, iXLen } %a, 1
2586  store iXLen %c, iXLen* %4
2587
2588  ret <vscale x 2 x i8> %b
2589}
2590
2591declare { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.nxv4i8(
2592  <vscale x 4 x i8>,
2593  ptr,
2594  iXLen);
2595
2596define <vscale x 4 x i8> @intrinsic_vleff_v_nxv4i8_nxv4i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
2597; RV32-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8:
2598; RV32:       # %bb.0: # %entry
2599; RV32-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
2600; RV32-NEXT:    vle8ff.v v8, (a0)
2601; RV32-NEXT:    csrr a0, vl
2602; RV32-NEXT:    sw a0, 0(a2)
2603; RV32-NEXT:    ret
2604;
2605; RV64-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8:
2606; RV64:       # %bb.0: # %entry
2607; RV64-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
2608; RV64-NEXT:    vle8ff.v v8, (a0)
2609; RV64-NEXT:    csrr a0, vl
2610; RV64-NEXT:    sd a0, 0(a2)
2611; RV64-NEXT:    ret
2612entry:
2613  %a = call { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.nxv4i8(
2614    <vscale x 4 x i8> undef,
2615    ptr %0,
2616    iXLen %1)
2617  %b = extractvalue { <vscale x 4 x i8>, iXLen } %a, 0
2618  %c = extractvalue { <vscale x 4 x i8>, iXLen } %a, 1
2619  store iXLen %c, iXLen* %2
2620  ret <vscale x 4 x i8> %b
2621}
2622
2623declare { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv4i8(
2624  <vscale x 4 x i8>,
2625  ptr,
2626  <vscale x 4 x i1>,
2627  iXLen,
2628  iXLen);
2629
2630define <vscale x 4 x i8> @intrinsic_vleff_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2631; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8:
2632; RV32:       # %bb.0: # %entry
2633; RV32-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
2634; RV32-NEXT:    vle8ff.v v8, (a0), v0.t
2635; RV32-NEXT:    csrr a0, vl
2636; RV32-NEXT:    sw a0, 0(a2)
2637; RV32-NEXT:    ret
2638;
2639; RV64-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8:
2640; RV64:       # %bb.0: # %entry
2641; RV64-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
2642; RV64-NEXT:    vle8ff.v v8, (a0), v0.t
2643; RV64-NEXT:    csrr a0, vl
2644; RV64-NEXT:    sd a0, 0(a2)
2645; RV64-NEXT:    ret
2646entry:
2647  %a = call { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv4i8(
2648    <vscale x 4 x i8> %0,
2649    ptr %1,
2650    <vscale x 4 x i1> %2,
2651    iXLen %3, iXLen 1)
2652  %b = extractvalue { <vscale x 4 x i8>, iXLen } %a, 0
2653  %c = extractvalue { <vscale x 4 x i8>, iXLen } %a, 1
2654  store iXLen %c, iXLen* %4
2655
2656  ret <vscale x 4 x i8> %b
2657}
2658
2659declare { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.nxv8i8(
2660  <vscale x 8 x i8>,
2661  ptr,
2662  iXLen);
2663
2664define <vscale x 8 x i8> @intrinsic_vleff_v_nxv8i8_nxv8i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
2665; RV32-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8:
2666; RV32:       # %bb.0: # %entry
2667; RV32-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
2668; RV32-NEXT:    vle8ff.v v8, (a0)
2669; RV32-NEXT:    csrr a0, vl
2670; RV32-NEXT:    sw a0, 0(a2)
2671; RV32-NEXT:    ret
2672;
2673; RV64-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8:
2674; RV64:       # %bb.0: # %entry
2675; RV64-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
2676; RV64-NEXT:    vle8ff.v v8, (a0)
2677; RV64-NEXT:    csrr a0, vl
2678; RV64-NEXT:    sd a0, 0(a2)
2679; RV64-NEXT:    ret
2680entry:
2681  %a = call { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.nxv8i8(
2682    <vscale x 8 x i8> undef,
2683    ptr %0,
2684    iXLen %1)
2685  %b = extractvalue { <vscale x 8 x i8>, iXLen } %a, 0
2686  %c = extractvalue { <vscale x 8 x i8>, iXLen } %a, 1
2687  store iXLen %c, iXLen* %2
2688  ret <vscale x 8 x i8> %b
2689}
2690
2691declare { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv8i8(
2692  <vscale x 8 x i8>,
2693  ptr,
2694  <vscale x 8 x i1>,
2695  iXLen,
2696  iXLen);
2697
2698define <vscale x 8 x i8> @intrinsic_vleff_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2699; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8:
2700; RV32:       # %bb.0: # %entry
2701; RV32-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
2702; RV32-NEXT:    vle8ff.v v8, (a0), v0.t
2703; RV32-NEXT:    csrr a0, vl
2704; RV32-NEXT:    sw a0, 0(a2)
2705; RV32-NEXT:    ret
2706;
2707; RV64-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8:
2708; RV64:       # %bb.0: # %entry
2709; RV64-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
2710; RV64-NEXT:    vle8ff.v v8, (a0), v0.t
2711; RV64-NEXT:    csrr a0, vl
2712; RV64-NEXT:    sd a0, 0(a2)
2713; RV64-NEXT:    ret
2714entry:
2715  %a = call { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv8i8(
2716    <vscale x 8 x i8> %0,
2717    ptr %1,
2718    <vscale x 8 x i1> %2,
2719    iXLen %3, iXLen 1)
2720  %b = extractvalue { <vscale x 8 x i8>, iXLen } %a, 0
2721  %c = extractvalue { <vscale x 8 x i8>, iXLen } %a, 1
2722  store iXLen %c, iXLen* %4
2723
2724  ret <vscale x 8 x i8> %b
2725}
2726
2727declare { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.nxv16i8(
2728  <vscale x 16 x i8>,
2729  ptr,
2730  iXLen);
2731
2732define <vscale x 16 x i8> @intrinsic_vleff_v_nxv16i8_nxv16i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
2733; RV32-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8:
2734; RV32:       # %bb.0: # %entry
2735; RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
2736; RV32-NEXT:    vle8ff.v v8, (a0)
2737; RV32-NEXT:    csrr a0, vl
2738; RV32-NEXT:    sw a0, 0(a2)
2739; RV32-NEXT:    ret
2740;
2741; RV64-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8:
2742; RV64:       # %bb.0: # %entry
2743; RV64-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
2744; RV64-NEXT:    vle8ff.v v8, (a0)
2745; RV64-NEXT:    csrr a0, vl
2746; RV64-NEXT:    sd a0, 0(a2)
2747; RV64-NEXT:    ret
2748entry:
2749  %a = call { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.nxv16i8(
2750    <vscale x 16 x i8> undef,
2751    ptr %0,
2752    iXLen %1)
2753  %b = extractvalue { <vscale x 16 x i8>, iXLen } %a, 0
2754  %c = extractvalue { <vscale x 16 x i8>, iXLen } %a, 1
2755  store iXLen %c, iXLen* %2
2756  ret <vscale x 16 x i8> %b
2757}
2758
2759declare { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv16i8(
2760  <vscale x 16 x i8>,
2761  ptr,
2762  <vscale x 16 x i1>,
2763  iXLen,
2764  iXLen);
2765
2766define <vscale x 16 x i8> @intrinsic_vleff_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2767; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8:
2768; RV32:       # %bb.0: # %entry
2769; RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
2770; RV32-NEXT:    vle8ff.v v8, (a0), v0.t
2771; RV32-NEXT:    csrr a0, vl
2772; RV32-NEXT:    sw a0, 0(a2)
2773; RV32-NEXT:    ret
2774;
2775; RV64-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8:
2776; RV64:       # %bb.0: # %entry
2777; RV64-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
2778; RV64-NEXT:    vle8ff.v v8, (a0), v0.t
2779; RV64-NEXT:    csrr a0, vl
2780; RV64-NEXT:    sd a0, 0(a2)
2781; RV64-NEXT:    ret
2782entry:
2783  %a = call { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv16i8(
2784    <vscale x 16 x i8> %0,
2785    ptr %1,
2786    <vscale x 16 x i1> %2,
2787    iXLen %3, iXLen 1)
2788  %b = extractvalue { <vscale x 16 x i8>, iXLen } %a, 0
2789  %c = extractvalue { <vscale x 16 x i8>, iXLen } %a, 1
2790  store iXLen %c, iXLen* %4
2791
2792  ret <vscale x 16 x i8> %b
2793}
2794
2795declare { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.nxv32i8(
2796  <vscale x 32 x i8>,
2797  ptr,
2798  iXLen);
2799
2800define <vscale x 32 x i8> @intrinsic_vleff_v_nxv32i8_nxv32i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
2801; RV32-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8:
2802; RV32:       # %bb.0: # %entry
2803; RV32-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
2804; RV32-NEXT:    vle8ff.v v8, (a0)
2805; RV32-NEXT:    csrr a0, vl
2806; RV32-NEXT:    sw a0, 0(a2)
2807; RV32-NEXT:    ret
2808;
2809; RV64-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8:
2810; RV64:       # %bb.0: # %entry
2811; RV64-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
2812; RV64-NEXT:    vle8ff.v v8, (a0)
2813; RV64-NEXT:    csrr a0, vl
2814; RV64-NEXT:    sd a0, 0(a2)
2815; RV64-NEXT:    ret
2816entry:
2817  %a = call { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.nxv32i8(
2818    <vscale x 32 x i8> undef,
2819    ptr %0,
2820    iXLen %1)
2821  %b = extractvalue { <vscale x 32 x i8>, iXLen } %a, 0
2822  %c = extractvalue { <vscale x 32 x i8>, iXLen } %a, 1
2823  store iXLen %c, iXLen* %2
2824  ret <vscale x 32 x i8> %b
2825}
2826
2827declare { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv32i8(
2828  <vscale x 32 x i8>,
2829  ptr,
2830  <vscale x 32 x i1>,
2831  iXLen,
2832  iXLen);
2833
2834define <vscale x 32 x i8> @intrinsic_vleff_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2835; RV32-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8:
2836; RV32:       # %bb.0: # %entry
2837; RV32-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
2838; RV32-NEXT:    vle8ff.v v8, (a0), v0.t
2839; RV32-NEXT:    csrr a0, vl
2840; RV32-NEXT:    sw a0, 0(a2)
2841; RV32-NEXT:    ret
2842;
2843; RV64-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8:
2844; RV64:       # %bb.0: # %entry
2845; RV64-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
2846; RV64-NEXT:    vle8ff.v v8, (a0), v0.t
2847; RV64-NEXT:    csrr a0, vl
2848; RV64-NEXT:    sd a0, 0(a2)
2849; RV64-NEXT:    ret
2850entry:
2851  %a = call { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv32i8(
2852    <vscale x 32 x i8> %0,
2853    ptr %1,
2854    <vscale x 32 x i1> %2,
2855    iXLen %3, iXLen 1)
2856  %b = extractvalue { <vscale x 32 x i8>, iXLen } %a, 0
2857  %c = extractvalue { <vscale x 32 x i8>, iXLen } %a, 1
2858  store iXLen %c, iXLen* %4
2859
2860  ret <vscale x 32 x i8> %b
2861}
2862
2863declare { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.nxv64i8(
2864  <vscale x 64 x i8>,
2865  ptr,
2866  iXLen);
2867
2868define <vscale x 64 x i8> @intrinsic_vleff_v_nxv64i8_nxv64i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
2869; RV32-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8:
2870; RV32:       # %bb.0: # %entry
2871; RV32-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
2872; RV32-NEXT:    vle8ff.v v8, (a0)
2873; RV32-NEXT:    csrr a0, vl
2874; RV32-NEXT:    sw a0, 0(a2)
2875; RV32-NEXT:    ret
2876;
2877; RV64-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8:
2878; RV64:       # %bb.0: # %entry
2879; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
2880; RV64-NEXT:    vle8ff.v v8, (a0)
2881; RV64-NEXT:    csrr a0, vl
2882; RV64-NEXT:    sd a0, 0(a2)
2883; RV64-NEXT:    ret
2884entry:
2885  %a = call { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.nxv64i8(
2886    <vscale x 64 x i8> undef,
2887    ptr %0,
2888    iXLen %1)
2889  %b = extractvalue { <vscale x 64 x i8>, iXLen } %a, 0
2890  %c = extractvalue { <vscale x 64 x i8>, iXLen } %a, 1
2891  store iXLen %c, iXLen* %2
2892  ret <vscale x 64 x i8> %b
2893}
2894
2895declare { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv64i8(
2896  <vscale x 64 x i8>,
2897  ptr,
2898  <vscale x 64 x i1>,
2899  iXLen,
2900  iXLen);
2901
2902define <vscale x 64 x i8> @intrinsic_vleff_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2903; RV32-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8:
2904; RV32:       # %bb.0: # %entry
2905; RV32-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
2906; RV32-NEXT:    vle8ff.v v8, (a0), v0.t
2907; RV32-NEXT:    csrr a0, vl
2908; RV32-NEXT:    sw a0, 0(a2)
2909; RV32-NEXT:    ret
2910;
2911; RV64-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8:
2912; RV64:       # %bb.0: # %entry
2913; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
2914; RV64-NEXT:    vle8ff.v v8, (a0), v0.t
2915; RV64-NEXT:    csrr a0, vl
2916; RV64-NEXT:    sd a0, 0(a2)
2917; RV64-NEXT:    ret
2918entry:
2919  %a = call { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv64i8(
2920    <vscale x 64 x i8> %0,
2921    ptr %1,
2922    <vscale x 64 x i1> %2,
2923    iXLen %3, iXLen 1)
2924  %b = extractvalue { <vscale x 64 x i8>, iXLen } %a, 0
2925  %c = extractvalue { <vscale x 64 x i8>, iXLen } %a, 1
2926  store iXLen %c, iXLen* %4
2927
2928  ret <vscale x 64 x i8> %b
2929}
2930
2931; Test with the VL output unused
2932define <vscale x 1 x double> @intrinsic_vleff_dead_vl(ptr %0, iXLen %1, iXLen* %2) nounwind {
2933; CHECK-LABEL: intrinsic_vleff_dead_vl:
2934; CHECK:       # %bb.0: # %entry
2935; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2936; CHECK-NEXT:    vle64ff.v v8, (a0)
2937; CHECK-NEXT:    ret
2938entry:
2939  %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
2940    <vscale x 1 x double> undef,
2941    ptr %0,
2942    iXLen %1)
2943  %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
2944  ret <vscale x 1 x double> %b
2945}
2946
2947define <vscale x 1 x double> @intrinsic_vleff_mask_dead_vl(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2948; CHECK-LABEL: intrinsic_vleff_mask_dead_vl:
2949; CHECK:       # %bb.0: # %entry
2950; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
2951; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
2952; CHECK-NEXT:    ret
2953entry:
2954  %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
2955    <vscale x 1 x double> %0,
2956    ptr %1,
2957    <vscale x 1 x i1> %2,
2958    iXLen %3, iXLen 1)
2959  %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
2960
2961  ret <vscale x 1 x double> %b
2962}
2963
2964; Test with the loaded value unused
2965define void @intrinsic_vleff_dead_value(ptr %0, iXLen %1, iXLen* %2) nounwind {
2966; RV32-LABEL: intrinsic_vleff_dead_value:
2967; RV32:       # %bb.0: # %entry
2968; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2969; RV32-NEXT:    vle64ff.v v8, (a0)
2970; RV32-NEXT:    csrr a0, vl
2971; RV32-NEXT:    sw a0, 0(a2)
2972; RV32-NEXT:    ret
2973;
2974; RV64-LABEL: intrinsic_vleff_dead_value:
2975; RV64:       # %bb.0: # %entry
2976; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2977; RV64-NEXT:    vle64ff.v v8, (a0)
2978; RV64-NEXT:    csrr a0, vl
2979; RV64-NEXT:    sd a0, 0(a2)
2980; RV64-NEXT:    ret
2981entry:
2982  %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
2983    <vscale x 1 x double> undef,
2984    ptr %0,
2985    iXLen %1)
2986  %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
2987  store iXLen %b, iXLen* %2
2988  ret void
2989}
2990
2991define void @intrinsic_vleff_mask_dead_value(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
2992; RV32-LABEL: intrinsic_vleff_mask_dead_value:
2993; RV32:       # %bb.0: # %entry
2994; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
2995; RV32-NEXT:    vle64ff.v v8, (a0), v0.t
2996; RV32-NEXT:    csrr a0, vl
2997; RV32-NEXT:    sw a0, 0(a2)
2998; RV32-NEXT:    ret
2999;
3000; RV64-LABEL: intrinsic_vleff_mask_dead_value:
3001; RV64:       # %bb.0: # %entry
3002; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
3003; RV64-NEXT:    vle64ff.v v8, (a0), v0.t
3004; RV64-NEXT:    csrr a0, vl
3005; RV64-NEXT:    sd a0, 0(a2)
3006; RV64-NEXT:    ret
3007entry:
3008  %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
3009    <vscale x 1 x double> %0,
3010    ptr %1,
3011    <vscale x 1 x i1> %2,
3012    iXLen %3, iXLen 1)
3013  %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
3014  store iXLen %b, iXLen* %4
3015
3016  ret void
3017}
3018
3019; Test with both outputs dead. Make sure the vleff isn't deleted.
3020define void @intrinsic_vleff_dead_all(ptr %0, iXLen %1, iXLen* %2) nounwind {
3021; CHECK-LABEL: intrinsic_vleff_dead_all:
3022; CHECK:       # %bb.0: # %entry
3023; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3024; CHECK-NEXT:    vle64ff.v v8, (a0)
3025; CHECK-NEXT:    ret
3026entry:
3027  %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
3028    <vscale x 1 x double> undef,
3029    ptr %0,
3030    iXLen %1)
3031  ret void
3032}
3033
3034define void @intrinsic_vleff_mask_dead_all(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
3035; CHECK-LABEL: intrinsic_vleff_mask_dead_all:
3036; CHECK:       # %bb.0: # %entry
3037; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
3038; CHECK-NEXT:    vle64ff.v v8, (a0), v0.t
3039; CHECK-NEXT:    ret
3040entry:
3041  %a = call { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
3042    <vscale x 1 x double> %0,
3043    ptr %1,
3044    <vscale x 1 x i1> %2,
3045    iXLen %3, iXLen 1)
3046
3047  ret void
3048}
3049