xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll (revision 1cb599835ccf7ee8b2d1d5a7f3107e19a26fc6f5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s
4
5define <vscale x 1 x i8> @sextload_nxv1i1_nxv1i8(ptr %x) {
6; CHECK-LABEL: sextload_nxv1i1_nxv1i8:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
9; CHECK-NEXT:    vlm.v v0, (a0)
10; CHECK-NEXT:    vmv.v.i v8, 0
11; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
12; CHECK-NEXT:    ret
13  %y = load <vscale x 1 x i1>, ptr %x
14  %z = sext <vscale x 1 x i1> %y to <vscale x 1 x i8>
15  ret <vscale x 1 x i8> %z
16}
17
18define <vscale x 1 x i16> @sextload_nxv1i8_nxv1i16(ptr %x) {
19; CHECK-LABEL: sextload_nxv1i8_nxv1i16:
20; CHECK:       # %bb.0:
21; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
22; CHECK-NEXT:    vle8.v v9, (a0)
23; CHECK-NEXT:    vsext.vf2 v8, v9
24; CHECK-NEXT:    ret
25  %y = load <vscale x 1 x i8>, ptr %x
26  %z = sext <vscale x 1 x i8> %y to <vscale x 1 x i16>
27  ret <vscale x 1 x i16> %z
28}
29
30define <vscale x 1 x i16> @zextload_nxv1i8_nxv1i16(ptr %x) {
31; CHECK-LABEL: zextload_nxv1i8_nxv1i16:
32; CHECK:       # %bb.0:
33; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
34; CHECK-NEXT:    vle8.v v9, (a0)
35; CHECK-NEXT:    vzext.vf2 v8, v9
36; CHECK-NEXT:    ret
37  %y = load <vscale x 1 x i8>, ptr %x
38  %z = zext <vscale x 1 x i8> %y to <vscale x 1 x i16>
39  ret <vscale x 1 x i16> %z
40}
41
42define <vscale x 1 x i32> @sextload_nxv1i8_nxv1i32(ptr %x) {
43; CHECK-LABEL: sextload_nxv1i8_nxv1i32:
44; CHECK:       # %bb.0:
45; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
46; CHECK-NEXT:    vle8.v v9, (a0)
47; CHECK-NEXT:    vsext.vf4 v8, v9
48; CHECK-NEXT:    ret
49  %y = load <vscale x 1 x i8>, ptr %x
50  %z = sext <vscale x 1 x i8> %y to <vscale x 1 x i32>
51  ret <vscale x 1 x i32> %z
52}
53
54define <vscale x 1 x i32> @zextload_nxv1i8_nxv1i32(ptr %x) {
55; CHECK-LABEL: zextload_nxv1i8_nxv1i32:
56; CHECK:       # %bb.0:
57; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
58; CHECK-NEXT:    vle8.v v9, (a0)
59; CHECK-NEXT:    vzext.vf4 v8, v9
60; CHECK-NEXT:    ret
61  %y = load <vscale x 1 x i8>, ptr %x
62  %z = zext <vscale x 1 x i8> %y to <vscale x 1 x i32>
63  ret <vscale x 1 x i32> %z
64}
65
66define <vscale x 1 x i64> @sextload_nxv1i8_nxv1i64(ptr %x) {
67; CHECK-LABEL: sextload_nxv1i8_nxv1i64:
68; CHECK:       # %bb.0:
69; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
70; CHECK-NEXT:    vle8.v v9, (a0)
71; CHECK-NEXT:    vsext.vf8 v8, v9
72; CHECK-NEXT:    ret
73  %y = load <vscale x 1 x i8>, ptr %x
74  %z = sext <vscale x 1 x i8> %y to <vscale x 1 x i64>
75  ret <vscale x 1 x i64> %z
76}
77
78define <vscale x 1 x i64> @zextload_nxv1i8_nxv1i64(ptr %x) {
79; CHECK-LABEL: zextload_nxv1i8_nxv1i64:
80; CHECK:       # %bb.0:
81; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
82; CHECK-NEXT:    vle8.v v9, (a0)
83; CHECK-NEXT:    vzext.vf8 v8, v9
84; CHECK-NEXT:    ret
85  %y = load <vscale x 1 x i8>, ptr %x
86  %z = zext <vscale x 1 x i8> %y to <vscale x 1 x i64>
87  ret <vscale x 1 x i64> %z
88}
89
90define <vscale x 2 x i16> @sextload_nxv2i8_nxv2i16(ptr %x) {
91; CHECK-LABEL: sextload_nxv2i8_nxv2i16:
92; CHECK:       # %bb.0:
93; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
94; CHECK-NEXT:    vle8.v v9, (a0)
95; CHECK-NEXT:    vsext.vf2 v8, v9
96; CHECK-NEXT:    ret
97  %y = load <vscale x 2 x i8>, ptr %x
98  %z = sext <vscale x 2 x i8> %y to <vscale x 2 x i16>
99  ret <vscale x 2 x i16> %z
100}
101
102define <vscale x 2 x i16> @zextload_nxv2i8_nxv2i16(ptr %x) {
103; CHECK-LABEL: zextload_nxv2i8_nxv2i16:
104; CHECK:       # %bb.0:
105; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
106; CHECK-NEXT:    vle8.v v9, (a0)
107; CHECK-NEXT:    vzext.vf2 v8, v9
108; CHECK-NEXT:    ret
109  %y = load <vscale x 2 x i8>, ptr %x
110  %z = zext <vscale x 2 x i8> %y to <vscale x 2 x i16>
111  ret <vscale x 2 x i16> %z
112}
113
114define <vscale x 2 x i32> @sextload_nxv2i8_nxv2i32(ptr %x) {
115; CHECK-LABEL: sextload_nxv2i8_nxv2i32:
116; CHECK:       # %bb.0:
117; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
118; CHECK-NEXT:    vle8.v v9, (a0)
119; CHECK-NEXT:    vsext.vf4 v8, v9
120; CHECK-NEXT:    ret
121  %y = load <vscale x 2 x i8>, ptr %x
122  %z = sext <vscale x 2 x i8> %y to <vscale x 2 x i32>
123  ret <vscale x 2 x i32> %z
124}
125
126define <vscale x 2 x i32> @zextload_nxv2i8_nxv2i32(ptr %x) {
127; CHECK-LABEL: zextload_nxv2i8_nxv2i32:
128; CHECK:       # %bb.0:
129; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
130; CHECK-NEXT:    vle8.v v9, (a0)
131; CHECK-NEXT:    vzext.vf4 v8, v9
132; CHECK-NEXT:    ret
133  %y = load <vscale x 2 x i8>, ptr %x
134  %z = zext <vscale x 2 x i8> %y to <vscale x 2 x i32>
135  ret <vscale x 2 x i32> %z
136}
137
138define <vscale x 2 x i64> @sextload_nxv2i8_nxv2i64(ptr %x) {
139; CHECK-LABEL: sextload_nxv2i8_nxv2i64:
140; CHECK:       # %bb.0:
141; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
142; CHECK-NEXT:    vle8.v v10, (a0)
143; CHECK-NEXT:    vsext.vf8 v8, v10
144; CHECK-NEXT:    ret
145  %y = load <vscale x 2 x i8>, ptr %x
146  %z = sext <vscale x 2 x i8> %y to <vscale x 2 x i64>
147  ret <vscale x 2 x i64> %z
148}
149
150define <vscale x 2 x i64> @zextload_nxv2i8_nxv2i64(ptr %x) {
151; CHECK-LABEL: zextload_nxv2i8_nxv2i64:
152; CHECK:       # %bb.0:
153; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
154; CHECK-NEXT:    vle8.v v10, (a0)
155; CHECK-NEXT:    vzext.vf8 v8, v10
156; CHECK-NEXT:    ret
157  %y = load <vscale x 2 x i8>, ptr %x
158  %z = zext <vscale x 2 x i8> %y to <vscale x 2 x i64>
159  ret <vscale x 2 x i64> %z
160}
161
162define <vscale x 4 x i16> @sextload_nxv4i8_nxv4i16(ptr %x) {
163; CHECK-LABEL: sextload_nxv4i8_nxv4i16:
164; CHECK:       # %bb.0:
165; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
166; CHECK-NEXT:    vle8.v v9, (a0)
167; CHECK-NEXT:    vsext.vf2 v8, v9
168; CHECK-NEXT:    ret
169  %y = load <vscale x 4 x i8>, ptr %x
170  %z = sext <vscale x 4 x i8> %y to <vscale x 4 x i16>
171  ret <vscale x 4 x i16> %z
172}
173
174define <vscale x 4 x i16> @zextload_nxv4i8_nxv4i16(ptr %x) {
175; CHECK-LABEL: zextload_nxv4i8_nxv4i16:
176; CHECK:       # %bb.0:
177; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
178; CHECK-NEXT:    vle8.v v9, (a0)
179; CHECK-NEXT:    vzext.vf2 v8, v9
180; CHECK-NEXT:    ret
181  %y = load <vscale x 4 x i8>, ptr %x
182  %z = zext <vscale x 4 x i8> %y to <vscale x 4 x i16>
183  ret <vscale x 4 x i16> %z
184}
185
186define <vscale x 4 x i32> @sextload_nxv4i8_nxv4i32(ptr %x) {
187; CHECK-LABEL: sextload_nxv4i8_nxv4i32:
188; CHECK:       # %bb.0:
189; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
190; CHECK-NEXT:    vle8.v v10, (a0)
191; CHECK-NEXT:    vsext.vf4 v8, v10
192; CHECK-NEXT:    ret
193  %y = load <vscale x 4 x i8>, ptr %x
194  %z = sext <vscale x 4 x i8> %y to <vscale x 4 x i32>
195  ret <vscale x 4 x i32> %z
196}
197
198define <vscale x 4 x i32> @zextload_nxv4i8_nxv4i32(ptr %x) {
199; CHECK-LABEL: zextload_nxv4i8_nxv4i32:
200; CHECK:       # %bb.0:
201; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
202; CHECK-NEXT:    vle8.v v10, (a0)
203; CHECK-NEXT:    vzext.vf4 v8, v10
204; CHECK-NEXT:    ret
205  %y = load <vscale x 4 x i8>, ptr %x
206  %z = zext <vscale x 4 x i8> %y to <vscale x 4 x i32>
207  ret <vscale x 4 x i32> %z
208}
209
210define <vscale x 4 x i64> @sextload_nxv4i8_nxv4i64(ptr %x) {
211; CHECK-LABEL: sextload_nxv4i8_nxv4i64:
212; CHECK:       # %bb.0:
213; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
214; CHECK-NEXT:    vle8.v v12, (a0)
215; CHECK-NEXT:    vsext.vf8 v8, v12
216; CHECK-NEXT:    ret
217  %y = load <vscale x 4 x i8>, ptr %x
218  %z = sext <vscale x 4 x i8> %y to <vscale x 4 x i64>
219  ret <vscale x 4 x i64> %z
220}
221
222define <vscale x 4 x i64> @zextload_nxv4i8_nxv4i64(ptr %x) {
223; CHECK-LABEL: zextload_nxv4i8_nxv4i64:
224; CHECK:       # %bb.0:
225; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
226; CHECK-NEXT:    vle8.v v12, (a0)
227; CHECK-NEXT:    vzext.vf8 v8, v12
228; CHECK-NEXT:    ret
229  %y = load <vscale x 4 x i8>, ptr %x
230  %z = zext <vscale x 4 x i8> %y to <vscale x 4 x i64>
231  ret <vscale x 4 x i64> %z
232}
233
234define <vscale x 8 x i16> @sextload_nxv8i8_nxv8i16(ptr %x) {
235; CHECK-LABEL: sextload_nxv8i8_nxv8i16:
236; CHECK:       # %bb.0:
237; CHECK-NEXT:    vl1r.v v10, (a0)
238; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
239; CHECK-NEXT:    vsext.vf2 v8, v10
240; CHECK-NEXT:    ret
241  %y = load <vscale x 8 x i8>, ptr %x
242  %z = sext <vscale x 8 x i8> %y to <vscale x 8 x i16>
243  ret <vscale x 8 x i16> %z
244}
245
246define <vscale x 8 x i16> @zextload_nxv8i8_nxv8i16(ptr %x) {
247; CHECK-LABEL: zextload_nxv8i8_nxv8i16:
248; CHECK:       # %bb.0:
249; CHECK-NEXT:    vl1r.v v10, (a0)
250; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
251; CHECK-NEXT:    vzext.vf2 v8, v10
252; CHECK-NEXT:    ret
253  %y = load <vscale x 8 x i8>, ptr %x
254  %z = zext <vscale x 8 x i8> %y to <vscale x 8 x i16>
255  ret <vscale x 8 x i16> %z
256}
257
258define <vscale x 8 x i32> @sextload_nxv8i8_nxv8i32(ptr %x) {
259; CHECK-LABEL: sextload_nxv8i8_nxv8i32:
260; CHECK:       # %bb.0:
261; CHECK-NEXT:    vl1r.v v12, (a0)
262; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
263; CHECK-NEXT:    vsext.vf4 v8, v12
264; CHECK-NEXT:    ret
265  %y = load <vscale x 8 x i8>, ptr %x
266  %z = sext <vscale x 8 x i8> %y to <vscale x 8 x i32>
267  ret <vscale x 8 x i32> %z
268}
269
270define <vscale x 8 x i32> @zextload_nxv8i8_nxv8i32(ptr %x) {
271; CHECK-LABEL: zextload_nxv8i8_nxv8i32:
272; CHECK:       # %bb.0:
273; CHECK-NEXT:    vl1r.v v12, (a0)
274; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
275; CHECK-NEXT:    vzext.vf4 v8, v12
276; CHECK-NEXT:    ret
277  %y = load <vscale x 8 x i8>, ptr %x
278  %z = zext <vscale x 8 x i8> %y to <vscale x 8 x i32>
279  ret <vscale x 8 x i32> %z
280}
281
282define <vscale x 8 x i64> @sextload_nxv8i8_nxv8i64(ptr %x) {
283; CHECK-LABEL: sextload_nxv8i8_nxv8i64:
284; CHECK:       # %bb.0:
285; CHECK-NEXT:    vl1r.v v16, (a0)
286; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
287; CHECK-NEXT:    vsext.vf8 v8, v16
288; CHECK-NEXT:    ret
289  %y = load <vscale x 8 x i8>, ptr %x
290  %z = sext <vscale x 8 x i8> %y to <vscale x 8 x i64>
291  ret <vscale x 8 x i64> %z
292}
293
294define <vscale x 8 x i64> @zextload_nxv8i8_nxv8i64(ptr %x) {
295; CHECK-LABEL: zextload_nxv8i8_nxv8i64:
296; CHECK:       # %bb.0:
297; CHECK-NEXT:    vl1r.v v16, (a0)
298; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
299; CHECK-NEXT:    vzext.vf8 v8, v16
300; CHECK-NEXT:    ret
301  %y = load <vscale x 8 x i8>, ptr %x
302  %z = zext <vscale x 8 x i8> %y to <vscale x 8 x i64>
303  ret <vscale x 8 x i64> %z
304}
305
306define <vscale x 16 x i16> @sextload_nxv16i8_nxv16i16(ptr %x) {
307; CHECK-LABEL: sextload_nxv16i8_nxv16i16:
308; CHECK:       # %bb.0:
309; CHECK-NEXT:    vl2r.v v12, (a0)
310; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
311; CHECK-NEXT:    vsext.vf2 v8, v12
312; CHECK-NEXT:    ret
313  %y = load <vscale x 16 x i8>, ptr %x
314  %z = sext <vscale x 16 x i8> %y to <vscale x 16 x i16>
315  ret <vscale x 16 x i16> %z
316}
317
318define <vscale x 16 x i16> @zextload_nxv16i8_nxv16i16(ptr %x) {
319; CHECK-LABEL: zextload_nxv16i8_nxv16i16:
320; CHECK:       # %bb.0:
321; CHECK-NEXT:    vl2r.v v12, (a0)
322; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
323; CHECK-NEXT:    vzext.vf2 v8, v12
324; CHECK-NEXT:    ret
325  %y = load <vscale x 16 x i8>, ptr %x
326  %z = zext <vscale x 16 x i8> %y to <vscale x 16 x i16>
327  ret <vscale x 16 x i16> %z
328}
329
330define <vscale x 16 x i32> @sextload_nxv16i8_nxv16i32(ptr %x) {
331; CHECK-LABEL: sextload_nxv16i8_nxv16i32:
332; CHECK:       # %bb.0:
333; CHECK-NEXT:    vl2r.v v16, (a0)
334; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
335; CHECK-NEXT:    vsext.vf4 v8, v16
336; CHECK-NEXT:    ret
337  %y = load <vscale x 16 x i8>, ptr %x
338  %z = sext <vscale x 16 x i8> %y to <vscale x 16 x i32>
339  ret <vscale x 16 x i32> %z
340}
341
342define <vscale x 16 x i32> @zextload_nxv16i8_nxv16i32(ptr %x) {
343; CHECK-LABEL: zextload_nxv16i8_nxv16i32:
344; CHECK:       # %bb.0:
345; CHECK-NEXT:    vl2r.v v16, (a0)
346; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
347; CHECK-NEXT:    vzext.vf4 v8, v16
348; CHECK-NEXT:    ret
349  %y = load <vscale x 16 x i8>, ptr %x
350  %z = zext <vscale x 16 x i8> %y to <vscale x 16 x i32>
351  ret <vscale x 16 x i32> %z
352}
353
354define <vscale x 32 x i16> @sextload_nxv32i8_nxv32i16(ptr %x) {
355; CHECK-LABEL: sextload_nxv32i8_nxv32i16:
356; CHECK:       # %bb.0:
357; CHECK-NEXT:    vl4r.v v16, (a0)
358; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
359; CHECK-NEXT:    vsext.vf2 v8, v16
360; CHECK-NEXT:    ret
361  %y = load <vscale x 32 x i8>, ptr %x
362  %z = sext <vscale x 32 x i8> %y to <vscale x 32 x i16>
363  ret <vscale x 32 x i16> %z
364}
365
366define <vscale x 32 x i16> @zextload_nxv32i8_nxv32i16(ptr %x) {
367; CHECK-LABEL: zextload_nxv32i8_nxv32i16:
368; CHECK:       # %bb.0:
369; CHECK-NEXT:    vl4r.v v16, (a0)
370; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
371; CHECK-NEXT:    vzext.vf2 v8, v16
372; CHECK-NEXT:    ret
373  %y = load <vscale x 32 x i8>, ptr %x
374  %z = zext <vscale x 32 x i8> %y to <vscale x 32 x i16>
375  ret <vscale x 32 x i16> %z
376}
377
378define void @truncstore_nxv1i8_nxv1i1(<vscale x 1 x i8> %x, ptr %z) {
379; CHECK-LABEL: truncstore_nxv1i8_nxv1i1:
380; CHECK:       # %bb.0:
381; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
382; CHECK-NEXT:    vand.vi v8, v8, 1
383; CHECK-NEXT:    vmsne.vi v8, v8, 0
384; CHECK-NEXT:    vsm.v v8, (a0)
385; CHECK-NEXT:    ret
386  %y = trunc <vscale x 1 x i8> %x to <vscale x 1 x i1>
387  store <vscale x 1 x i1> %y, ptr %z
388  ret void
389}
390
391define void @truncstore_nxv1i16_nxv1i8(<vscale x 1 x i16> %x, ptr %z) {
392; CHECK-LABEL: truncstore_nxv1i16_nxv1i8:
393; CHECK:       # %bb.0:
394; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
395; CHECK-NEXT:    vnsrl.wi v8, v8, 0
396; CHECK-NEXT:    vse8.v v8, (a0)
397; CHECK-NEXT:    ret
398  %y = trunc <vscale x 1 x i16> %x to <vscale x 1 x i8>
399  store <vscale x 1 x i8> %y, ptr %z
400  ret void
401}
402
403define <vscale x 1 x i32> @sextload_nxv1i16_nxv1i32(ptr %x) {
404; CHECK-LABEL: sextload_nxv1i16_nxv1i32:
405; CHECK:       # %bb.0:
406; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
407; CHECK-NEXT:    vle16.v v9, (a0)
408; CHECK-NEXT:    vsext.vf2 v8, v9
409; CHECK-NEXT:    ret
410  %y = load <vscale x 1 x i16>, ptr %x
411  %z = sext <vscale x 1 x i16> %y to <vscale x 1 x i32>
412  ret <vscale x 1 x i32> %z
413}
414
415define <vscale x 1 x i32> @zextload_nxv1i16_nxv1i32(ptr %x) {
416; CHECK-LABEL: zextload_nxv1i16_nxv1i32:
417; CHECK:       # %bb.0:
418; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
419; CHECK-NEXT:    vle16.v v9, (a0)
420; CHECK-NEXT:    vzext.vf2 v8, v9
421; CHECK-NEXT:    ret
422  %y = load <vscale x 1 x i16>, ptr %x
423  %z = zext <vscale x 1 x i16> %y to <vscale x 1 x i32>
424  ret <vscale x 1 x i32> %z
425}
426
427define <vscale x 1 x i64> @sextload_nxv1i16_nxv1i64(ptr %x) {
428; CHECK-LABEL: sextload_nxv1i16_nxv1i64:
429; CHECK:       # %bb.0:
430; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
431; CHECK-NEXT:    vle16.v v9, (a0)
432; CHECK-NEXT:    vsext.vf4 v8, v9
433; CHECK-NEXT:    ret
434  %y = load <vscale x 1 x i16>, ptr %x
435  %z = sext <vscale x 1 x i16> %y to <vscale x 1 x i64>
436  ret <vscale x 1 x i64> %z
437}
438
439define <vscale x 1 x i64> @zextload_nxv1i16_nxv1i64(ptr %x) {
440; CHECK-LABEL: zextload_nxv1i16_nxv1i64:
441; CHECK:       # %bb.0:
442; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
443; CHECK-NEXT:    vle16.v v9, (a0)
444; CHECK-NEXT:    vzext.vf4 v8, v9
445; CHECK-NEXT:    ret
446  %y = load <vscale x 1 x i16>, ptr %x
447  %z = zext <vscale x 1 x i16> %y to <vscale x 1 x i64>
448  ret <vscale x 1 x i64> %z
449}
450
451define void @truncstore_nxv2i16_nxv2i8(<vscale x 2 x i16> %x, ptr %z) {
452; CHECK-LABEL: truncstore_nxv2i16_nxv2i8:
453; CHECK:       # %bb.0:
454; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
455; CHECK-NEXT:    vnsrl.wi v8, v8, 0
456; CHECK-NEXT:    vse8.v v8, (a0)
457; CHECK-NEXT:    ret
458  %y = trunc <vscale x 2 x i16> %x to <vscale x 2 x i8>
459  store <vscale x 2 x i8> %y, ptr %z
460  ret void
461}
462
463define <vscale x 2 x i32> @sextload_nxv2i16_nxv2i32(ptr %x) {
464; CHECK-LABEL: sextload_nxv2i16_nxv2i32:
465; CHECK:       # %bb.0:
466; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
467; CHECK-NEXT:    vle16.v v9, (a0)
468; CHECK-NEXT:    vsext.vf2 v8, v9
469; CHECK-NEXT:    ret
470  %y = load <vscale x 2 x i16>, ptr %x
471  %z = sext <vscale x 2 x i16> %y to <vscale x 2 x i32>
472  ret <vscale x 2 x i32> %z
473}
474
475define <vscale x 2 x i32> @zextload_nxv2i16_nxv2i32(ptr %x) {
476; CHECK-LABEL: zextload_nxv2i16_nxv2i32:
477; CHECK:       # %bb.0:
478; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
479; CHECK-NEXT:    vle16.v v9, (a0)
480; CHECK-NEXT:    vzext.vf2 v8, v9
481; CHECK-NEXT:    ret
482  %y = load <vscale x 2 x i16>, ptr %x
483  %z = zext <vscale x 2 x i16> %y to <vscale x 2 x i32>
484  ret <vscale x 2 x i32> %z
485}
486
487define <vscale x 2 x i64> @sextload_nxv2i16_nxv2i64(ptr %x) {
488; CHECK-LABEL: sextload_nxv2i16_nxv2i64:
489; CHECK:       # %bb.0:
490; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
491; CHECK-NEXT:    vle16.v v10, (a0)
492; CHECK-NEXT:    vsext.vf4 v8, v10
493; CHECK-NEXT:    ret
494  %y = load <vscale x 2 x i16>, ptr %x
495  %z = sext <vscale x 2 x i16> %y to <vscale x 2 x i64>
496  ret <vscale x 2 x i64> %z
497}
498
499define <vscale x 2 x i64> @zextload_nxv2i16_nxv2i64(ptr %x) {
500; CHECK-LABEL: zextload_nxv2i16_nxv2i64:
501; CHECK:       # %bb.0:
502; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
503; CHECK-NEXT:    vle16.v v10, (a0)
504; CHECK-NEXT:    vzext.vf4 v8, v10
505; CHECK-NEXT:    ret
506  %y = load <vscale x 2 x i16>, ptr %x
507  %z = zext <vscale x 2 x i16> %y to <vscale x 2 x i64>
508  ret <vscale x 2 x i64> %z
509}
510
511define void @truncstore_nxv4i16_nxv4i8(<vscale x 4 x i16> %x, ptr %z) {
512; CHECK-LABEL: truncstore_nxv4i16_nxv4i8:
513; CHECK:       # %bb.0:
514; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
515; CHECK-NEXT:    vnsrl.wi v8, v8, 0
516; CHECK-NEXT:    vse8.v v8, (a0)
517; CHECK-NEXT:    ret
518  %y = trunc <vscale x 4 x i16> %x to <vscale x 4 x i8>
519  store <vscale x 4 x i8> %y, ptr %z
520  ret void
521}
522
523define <vscale x 4 x i32> @sextload_nxv4i16_nxv4i32(ptr %x) {
524; CHECK-LABEL: sextload_nxv4i16_nxv4i32:
525; CHECK:       # %bb.0:
526; CHECK-NEXT:    vl1re16.v v10, (a0)
527; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
528; CHECK-NEXT:    vsext.vf2 v8, v10
529; CHECK-NEXT:    ret
530  %y = load <vscale x 4 x i16>, ptr %x
531  %z = sext <vscale x 4 x i16> %y to <vscale x 4 x i32>
532  ret <vscale x 4 x i32> %z
533}
534
535define <vscale x 4 x i32> @zextload_nxv4i16_nxv4i32(ptr %x) {
536; CHECK-LABEL: zextload_nxv4i16_nxv4i32:
537; CHECK:       # %bb.0:
538; CHECK-NEXT:    vl1re16.v v10, (a0)
539; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
540; CHECK-NEXT:    vzext.vf2 v8, v10
541; CHECK-NEXT:    ret
542  %y = load <vscale x 4 x i16>, ptr %x
543  %z = zext <vscale x 4 x i16> %y to <vscale x 4 x i32>
544  ret <vscale x 4 x i32> %z
545}
546
547define <vscale x 4 x i64> @sextload_nxv4i16_nxv4i64(ptr %x) {
548; CHECK-LABEL: sextload_nxv4i16_nxv4i64:
549; CHECK:       # %bb.0:
550; CHECK-NEXT:    vl1re16.v v12, (a0)
551; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
552; CHECK-NEXT:    vsext.vf4 v8, v12
553; CHECK-NEXT:    ret
554  %y = load <vscale x 4 x i16>, ptr %x
555  %z = sext <vscale x 4 x i16> %y to <vscale x 4 x i64>
556  ret <vscale x 4 x i64> %z
557}
558
559define <vscale x 4 x i64> @zextload_nxv4i16_nxv4i64(ptr %x) {
560; CHECK-LABEL: zextload_nxv4i16_nxv4i64:
561; CHECK:       # %bb.0:
562; CHECK-NEXT:    vl1re16.v v12, (a0)
563; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
564; CHECK-NEXT:    vzext.vf4 v8, v12
565; CHECK-NEXT:    ret
566  %y = load <vscale x 4 x i16>, ptr %x
567  %z = zext <vscale x 4 x i16> %y to <vscale x 4 x i64>
568  ret <vscale x 4 x i64> %z
569}
570
571define void @truncstore_nxv8i16_nxv8i8(<vscale x 8 x i16> %x, ptr %z) {
572; CHECK-LABEL: truncstore_nxv8i16_nxv8i8:
573; CHECK:       # %bb.0:
574; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
575; CHECK-NEXT:    vnsrl.wi v10, v8, 0
576; CHECK-NEXT:    vs1r.v v10, (a0)
577; CHECK-NEXT:    ret
578  %y = trunc <vscale x 8 x i16> %x to <vscale x 8 x i8>
579  store <vscale x 8 x i8> %y, ptr %z
580  ret void
581}
582
583define <vscale x 8 x i32> @sextload_nxv8i16_nxv8i32(ptr %x) {
584; CHECK-LABEL: sextload_nxv8i16_nxv8i32:
585; CHECK:       # %bb.0:
586; CHECK-NEXT:    vl2re16.v v12, (a0)
587; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
588; CHECK-NEXT:    vsext.vf2 v8, v12
589; CHECK-NEXT:    ret
590  %y = load <vscale x 8 x i16>, ptr %x
591  %z = sext <vscale x 8 x i16> %y to <vscale x 8 x i32>
592  ret <vscale x 8 x i32> %z
593}
594
595define <vscale x 8 x i32> @zextload_nxv8i16_nxv8i32(ptr %x) {
596; CHECK-LABEL: zextload_nxv8i16_nxv8i32:
597; CHECK:       # %bb.0:
598; CHECK-NEXT:    vl2re16.v v12, (a0)
599; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
600; CHECK-NEXT:    vzext.vf2 v8, v12
601; CHECK-NEXT:    ret
602  %y = load <vscale x 8 x i16>, ptr %x
603  %z = zext <vscale x 8 x i16> %y to <vscale x 8 x i32>
604  ret <vscale x 8 x i32> %z
605}
606
607define <vscale x 8 x i64> @sextload_nxv8i16_nxv8i64(ptr %x) {
608; CHECK-LABEL: sextload_nxv8i16_nxv8i64:
609; CHECK:       # %bb.0:
610; CHECK-NEXT:    vl2re16.v v16, (a0)
611; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
612; CHECK-NEXT:    vsext.vf4 v8, v16
613; CHECK-NEXT:    ret
614  %y = load <vscale x 8 x i16>, ptr %x
615  %z = sext <vscale x 8 x i16> %y to <vscale x 8 x i64>
616  ret <vscale x 8 x i64> %z
617}
618
619define <vscale x 8 x i64> @zextload_nxv8i16_nxv8i64(ptr %x) {
620; CHECK-LABEL: zextload_nxv8i16_nxv8i64:
621; CHECK:       # %bb.0:
622; CHECK-NEXT:    vl2re16.v v16, (a0)
623; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
624; CHECK-NEXT:    vzext.vf4 v8, v16
625; CHECK-NEXT:    ret
626  %y = load <vscale x 8 x i16>, ptr %x
627  %z = zext <vscale x 8 x i16> %y to <vscale x 8 x i64>
628  ret <vscale x 8 x i64> %z
629}
630
631define void @truncstore_nxv16i16_nxv16i8(<vscale x 16 x i16> %x, ptr %z) {
632; CHECK-LABEL: truncstore_nxv16i16_nxv16i8:
633; CHECK:       # %bb.0:
634; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
635; CHECK-NEXT:    vnsrl.wi v12, v8, 0
636; CHECK-NEXT:    vs2r.v v12, (a0)
637; CHECK-NEXT:    ret
638  %y = trunc <vscale x 16 x i16> %x to <vscale x 16 x i8>
639  store <vscale x 16 x i8> %y, ptr %z
640  ret void
641}
642
643define <vscale x 16 x i32> @sextload_nxv16i16_nxv16i32(ptr %x) {
644; CHECK-LABEL: sextload_nxv16i16_nxv16i32:
645; CHECK:       # %bb.0:
646; CHECK-NEXT:    vl4re16.v v16, (a0)
647; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
648; CHECK-NEXT:    vsext.vf2 v8, v16
649; CHECK-NEXT:    ret
650  %y = load <vscale x 16 x i16>, ptr %x
651  %z = sext <vscale x 16 x i16> %y to <vscale x 16 x i32>
652  ret <vscale x 16 x i32> %z
653}
654
655define <vscale x 16 x i32> @zextload_nxv16i16_nxv16i32(ptr %x) {
656; CHECK-LABEL: zextload_nxv16i16_nxv16i32:
657; CHECK:       # %bb.0:
658; CHECK-NEXT:    vl4re16.v v16, (a0)
659; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
660; CHECK-NEXT:    vzext.vf2 v8, v16
661; CHECK-NEXT:    ret
662  %y = load <vscale x 16 x i16>, ptr %x
663  %z = zext <vscale x 16 x i16> %y to <vscale x 16 x i32>
664  ret <vscale x 16 x i32> %z
665}
666
667define void @truncstore_nxv32i16_nxv32i8(<vscale x 32 x i16> %x, ptr %z) {
668; CHECK-LABEL: truncstore_nxv32i16_nxv32i8:
669; CHECK:       # %bb.0:
670; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
671; CHECK-NEXT:    vnsrl.wi v16, v8, 0
672; CHECK-NEXT:    vs4r.v v16, (a0)
673; CHECK-NEXT:    ret
674  %y = trunc <vscale x 32 x i16> %x to <vscale x 32 x i8>
675  store <vscale x 32 x i8> %y, ptr %z
676  ret void
677}
678
679define void @truncstore_nxv1i32_nxv1i8(<vscale x 1 x i32> %x, ptr %z) {
680; CHECK-LABEL: truncstore_nxv1i32_nxv1i8:
681; CHECK:       # %bb.0:
682; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
683; CHECK-NEXT:    vnsrl.wi v8, v8, 0
684; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
685; CHECK-NEXT:    vnsrl.wi v8, v8, 0
686; CHECK-NEXT:    vse8.v v8, (a0)
687; CHECK-NEXT:    ret
688  %y = trunc <vscale x 1 x i32> %x to <vscale x 1 x i8>
689  store <vscale x 1 x i8> %y, ptr %z
690  ret void
691}
692
693define void @truncstore_nxv1i32_nxv1i16(<vscale x 1 x i32> %x, ptr %z) {
694; CHECK-LABEL: truncstore_nxv1i32_nxv1i16:
695; CHECK:       # %bb.0:
696; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
697; CHECK-NEXT:    vnsrl.wi v8, v8, 0
698; CHECK-NEXT:    vse16.v v8, (a0)
699; CHECK-NEXT:    ret
700  %y = trunc <vscale x 1 x i32> %x to <vscale x 1 x i16>
701  store <vscale x 1 x i16> %y, ptr %z
702  ret void
703}
704
705define <vscale x 1 x i64> @sextload_nxv1i32_nxv1i64(ptr %x) {
706; CHECK-LABEL: sextload_nxv1i32_nxv1i64:
707; CHECK:       # %bb.0:
708; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
709; CHECK-NEXT:    vle32.v v9, (a0)
710; CHECK-NEXT:    vsext.vf2 v8, v9
711; CHECK-NEXT:    ret
712  %y = load <vscale x 1 x i32>, ptr %x
713  %z = sext <vscale x 1 x i32> %y to <vscale x 1 x i64>
714  ret <vscale x 1 x i64> %z
715}
716
717define <vscale x 1 x i64> @zextload_nxv1i32_nxv1i64(ptr %x) {
718; CHECK-LABEL: zextload_nxv1i32_nxv1i64:
719; CHECK:       # %bb.0:
720; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
721; CHECK-NEXT:    vle32.v v9, (a0)
722; CHECK-NEXT:    vzext.vf2 v8, v9
723; CHECK-NEXT:    ret
724  %y = load <vscale x 1 x i32>, ptr %x
725  %z = zext <vscale x 1 x i32> %y to <vscale x 1 x i64>
726  ret <vscale x 1 x i64> %z
727}
728
729define void @truncstore_nxv2i32_nxv2i8(<vscale x 2 x i32> %x, ptr %z) {
730; CHECK-LABEL: truncstore_nxv2i32_nxv2i8:
731; CHECK:       # %bb.0:
732; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
733; CHECK-NEXT:    vnsrl.wi v8, v8, 0
734; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
735; CHECK-NEXT:    vnsrl.wi v8, v8, 0
736; CHECK-NEXT:    vse8.v v8, (a0)
737; CHECK-NEXT:    ret
738  %y = trunc <vscale x 2 x i32> %x to <vscale x 2 x i8>
739  store <vscale x 2 x i8> %y, ptr %z
740  ret void
741}
742
743define void @truncstore_nxv2i32_nxv2i16(<vscale x 2 x i32> %x, ptr %z) {
744; CHECK-LABEL: truncstore_nxv2i32_nxv2i16:
745; CHECK:       # %bb.0:
746; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
747; CHECK-NEXT:    vnsrl.wi v8, v8, 0
748; CHECK-NEXT:    vse16.v v8, (a0)
749; CHECK-NEXT:    ret
750  %y = trunc <vscale x 2 x i32> %x to <vscale x 2 x i16>
751  store <vscale x 2 x i16> %y, ptr %z
752  ret void
753}
754
755define <vscale x 2 x i64> @sextload_nxv2i32_nxv2i64(ptr %x) {
756; CHECK-LABEL: sextload_nxv2i32_nxv2i64:
757; CHECK:       # %bb.0:
758; CHECK-NEXT:    vl1re32.v v10, (a0)
759; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
760; CHECK-NEXT:    vsext.vf2 v8, v10
761; CHECK-NEXT:    ret
762  %y = load <vscale x 2 x i32>, ptr %x
763  %z = sext <vscale x 2 x i32> %y to <vscale x 2 x i64>
764  ret <vscale x 2 x i64> %z
765}
766
767define <vscale x 2 x i64> @zextload_nxv2i32_nxv2i64(ptr %x) {
768; CHECK-LABEL: zextload_nxv2i32_nxv2i64:
769; CHECK:       # %bb.0:
770; CHECK-NEXT:    vl1re32.v v10, (a0)
771; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
772; CHECK-NEXT:    vzext.vf2 v8, v10
773; CHECK-NEXT:    ret
774  %y = load <vscale x 2 x i32>, ptr %x
775  %z = zext <vscale x 2 x i32> %y to <vscale x 2 x i64>
776  ret <vscale x 2 x i64> %z
777}
778
779define void @truncstore_nxv4i32_nxv4i8(<vscale x 4 x i32> %x, ptr %z) {
780; CHECK-LABEL: truncstore_nxv4i32_nxv4i8:
781; CHECK:       # %bb.0:
782; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
783; CHECK-NEXT:    vnsrl.wi v10, v8, 0
784; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
785; CHECK-NEXT:    vnsrl.wi v8, v10, 0
786; CHECK-NEXT:    vse8.v v8, (a0)
787; CHECK-NEXT:    ret
788  %y = trunc <vscale x 4 x i32> %x to <vscale x 4 x i8>
789  store <vscale x 4 x i8> %y, ptr %z
790  ret void
791}
792
793define void @truncstore_nxv4i32_nxv4i16(<vscale x 4 x i32> %x, ptr %z) {
794; CHECK-LABEL: truncstore_nxv4i32_nxv4i16:
795; CHECK:       # %bb.0:
796; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
797; CHECK-NEXT:    vnsrl.wi v10, v8, 0
798; CHECK-NEXT:    vs1r.v v10, (a0)
799; CHECK-NEXT:    ret
800  %y = trunc <vscale x 4 x i32> %x to <vscale x 4 x i16>
801  store <vscale x 4 x i16> %y, ptr %z
802  ret void
803}
804
805define <vscale x 4 x i64> @sextload_nxv4i32_nxv4i64(ptr %x) {
806; CHECK-LABEL: sextload_nxv4i32_nxv4i64:
807; CHECK:       # %bb.0:
808; CHECK-NEXT:    vl2re32.v v12, (a0)
809; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
810; CHECK-NEXT:    vsext.vf2 v8, v12
811; CHECK-NEXT:    ret
812  %y = load <vscale x 4 x i32>, ptr %x
813  %z = sext <vscale x 4 x i32> %y to <vscale x 4 x i64>
814  ret <vscale x 4 x i64> %z
815}
816
817define <vscale x 4 x i64> @zextload_nxv4i32_nxv4i64(ptr %x) {
818; CHECK-LABEL: zextload_nxv4i32_nxv4i64:
819; CHECK:       # %bb.0:
820; CHECK-NEXT:    vl2re32.v v12, (a0)
821; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
822; CHECK-NEXT:    vzext.vf2 v8, v12
823; CHECK-NEXT:    ret
824  %y = load <vscale x 4 x i32>, ptr %x
825  %z = zext <vscale x 4 x i32> %y to <vscale x 4 x i64>
826  ret <vscale x 4 x i64> %z
827}
828
829define void @truncstore_nxv8i32_nxv8i8(<vscale x 8 x i32> %x, ptr %z) {
830; CHECK-LABEL: truncstore_nxv8i32_nxv8i8:
831; CHECK:       # %bb.0:
832; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
833; CHECK-NEXT:    vnsrl.wi v12, v8, 0
834; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
835; CHECK-NEXT:    vnsrl.wi v8, v12, 0
836; CHECK-NEXT:    vs1r.v v8, (a0)
837; CHECK-NEXT:    ret
838  %y = trunc <vscale x 8 x i32> %x to <vscale x 8 x i8>
839  store <vscale x 8 x i8> %y, ptr %z
840  ret void
841}
842
843define void @truncstore_nxv8i32_nxv8i16(<vscale x 8 x i32> %x, ptr %z) {
844; CHECK-LABEL: truncstore_nxv8i32_nxv8i16:
845; CHECK:       # %bb.0:
846; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
847; CHECK-NEXT:    vnsrl.wi v12, v8, 0
848; CHECK-NEXT:    vs2r.v v12, (a0)
849; CHECK-NEXT:    ret
850  %y = trunc <vscale x 8 x i32> %x to <vscale x 8 x i16>
851  store <vscale x 8 x i16> %y, ptr %z
852  ret void
853}
854
855define <vscale x 8 x i64> @sextload_nxv8i32_nxv8i64(ptr %x) {
856; CHECK-LABEL: sextload_nxv8i32_nxv8i64:
857; CHECK:       # %bb.0:
858; CHECK-NEXT:    vl4re32.v v16, (a0)
859; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
860; CHECK-NEXT:    vsext.vf2 v8, v16
861; CHECK-NEXT:    ret
862  %y = load <vscale x 8 x i32>, ptr %x
863  %z = sext <vscale x 8 x i32> %y to <vscale x 8 x i64>
864  ret <vscale x 8 x i64> %z
865}
866
867define <vscale x 8 x i64> @zextload_nxv8i32_nxv8i64(ptr %x) {
868; CHECK-LABEL: zextload_nxv8i32_nxv8i64:
869; CHECK:       # %bb.0:
870; CHECK-NEXT:    vl4re32.v v16, (a0)
871; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
872; CHECK-NEXT:    vzext.vf2 v8, v16
873; CHECK-NEXT:    ret
874  %y = load <vscale x 8 x i32>, ptr %x
875  %z = zext <vscale x 8 x i32> %y to <vscale x 8 x i64>
876  ret <vscale x 8 x i64> %z
877}
878
879define void @truncstore_nxv16i32_nxv16i8(<vscale x 16 x i32> %x, ptr %z) {
880; CHECK-LABEL: truncstore_nxv16i32_nxv16i8:
881; CHECK:       # %bb.0:
882; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
883; CHECK-NEXT:    vnsrl.wi v16, v8, 0
884; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
885; CHECK-NEXT:    vnsrl.wi v8, v16, 0
886; CHECK-NEXT:    vs2r.v v8, (a0)
887; CHECK-NEXT:    ret
888  %y = trunc <vscale x 16 x i32> %x to <vscale x 16 x i8>
889  store <vscale x 16 x i8> %y, ptr %z
890  ret void
891}
892
893define void @truncstore_nxv16i32_nxv16i16(<vscale x 16 x i32> %x, ptr %z) {
894; CHECK-LABEL: truncstore_nxv16i32_nxv16i16:
895; CHECK:       # %bb.0:
896; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
897; CHECK-NEXT:    vnsrl.wi v16, v8, 0
898; CHECK-NEXT:    vs4r.v v16, (a0)
899; CHECK-NEXT:    ret
900  %y = trunc <vscale x 16 x i32> %x to <vscale x 16 x i16>
901  store <vscale x 16 x i16> %y, ptr %z
902  ret void
903}
904
905define void @truncstore_nxv1i64_nxv1i8(<vscale x 1 x i64> %x, ptr %z) {
906; CHECK-LABEL: truncstore_nxv1i64_nxv1i8:
907; CHECK:       # %bb.0:
908; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
909; CHECK-NEXT:    vnsrl.wi v8, v8, 0
910; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
911; CHECK-NEXT:    vnsrl.wi v8, v8, 0
912; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
913; CHECK-NEXT:    vnsrl.wi v8, v8, 0
914; CHECK-NEXT:    vse8.v v8, (a0)
915; CHECK-NEXT:    ret
916  %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i8>
917  store <vscale x 1 x i8> %y, ptr %z
918  ret void
919}
920
921define void @truncstore_nxv1i64_nxv1i16(<vscale x 1 x i64> %x, ptr %z) {
922; CHECK-LABEL: truncstore_nxv1i64_nxv1i16:
923; CHECK:       # %bb.0:
924; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
925; CHECK-NEXT:    vnsrl.wi v8, v8, 0
926; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
927; CHECK-NEXT:    vnsrl.wi v8, v8, 0
928; CHECK-NEXT:    vse16.v v8, (a0)
929; CHECK-NEXT:    ret
930  %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i16>
931  store <vscale x 1 x i16> %y, ptr %z
932  ret void
933}
934
935define void @truncstore_nxv1i64_nxv1i32(<vscale x 1 x i64> %x, ptr %z) {
936; CHECK-LABEL: truncstore_nxv1i64_nxv1i32:
937; CHECK:       # %bb.0:
938; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
939; CHECK-NEXT:    vnsrl.wi v8, v8, 0
940; CHECK-NEXT:    vse32.v v8, (a0)
941; CHECK-NEXT:    ret
942  %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
943  store <vscale x 1 x i32> %y, ptr %z
944  ret void
945}
946
947define void @truncstore_nxv2i64_nxv2i8(<vscale x 2 x i64> %x, ptr %z) {
948; CHECK-LABEL: truncstore_nxv2i64_nxv2i8:
949; CHECK:       # %bb.0:
950; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
951; CHECK-NEXT:    vnsrl.wi v10, v8, 0
952; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
953; CHECK-NEXT:    vnsrl.wi v8, v10, 0
954; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
955; CHECK-NEXT:    vnsrl.wi v8, v8, 0
956; CHECK-NEXT:    vse8.v v8, (a0)
957; CHECK-NEXT:    ret
958  %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i8>
959  store <vscale x 2 x i8> %y, ptr %z
960  ret void
961}
962
963define void @truncstore_nxv2i64_nxv2i16(<vscale x 2 x i64> %x, ptr %z) {
964; CHECK-LABEL: truncstore_nxv2i64_nxv2i16:
965; CHECK:       # %bb.0:
966; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
967; CHECK-NEXT:    vnsrl.wi v10, v8, 0
968; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
969; CHECK-NEXT:    vnsrl.wi v8, v10, 0
970; CHECK-NEXT:    vse16.v v8, (a0)
971; CHECK-NEXT:    ret
972  %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i16>
973  store <vscale x 2 x i16> %y, ptr %z
974  ret void
975}
976
977define void @truncstore_nxv2i64_nxv2i32(<vscale x 2 x i64> %x, ptr %z) {
978; CHECK-LABEL: truncstore_nxv2i64_nxv2i32:
979; CHECK:       # %bb.0:
980; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
981; CHECK-NEXT:    vnsrl.wi v10, v8, 0
982; CHECK-NEXT:    vs1r.v v10, (a0)
983; CHECK-NEXT:    ret
984  %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
985  store <vscale x 2 x i32> %y, ptr %z
986  ret void
987}
988
989define void @truncstore_nxv4i64_nxv4i8(<vscale x 4 x i64> %x, ptr %z) {
990; CHECK-LABEL: truncstore_nxv4i64_nxv4i8:
991; CHECK:       # %bb.0:
992; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
993; CHECK-NEXT:    vnsrl.wi v12, v8, 0
994; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
995; CHECK-NEXT:    vnsrl.wi v8, v12, 0
996; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
997; CHECK-NEXT:    vnsrl.wi v8, v8, 0
998; CHECK-NEXT:    vse8.v v8, (a0)
999; CHECK-NEXT:    ret
1000  %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i8>
1001  store <vscale x 4 x i8> %y, ptr %z
1002  ret void
1003}
1004
1005define void @truncstore_nxv4i64_nxv4i16(<vscale x 4 x i64> %x, ptr %z) {
1006; CHECK-LABEL: truncstore_nxv4i64_nxv4i16:
1007; CHECK:       # %bb.0:
1008; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
1009; CHECK-NEXT:    vnsrl.wi v12, v8, 0
1010; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
1011; CHECK-NEXT:    vnsrl.wi v8, v12, 0
1012; CHECK-NEXT:    vs1r.v v8, (a0)
1013; CHECK-NEXT:    ret
1014  %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i16>
1015  store <vscale x 4 x i16> %y, ptr %z
1016  ret void
1017}
1018
1019define void @truncstore_nxv4i64_nxv4i32(<vscale x 4 x i64> %x, ptr %z) {
1020; CHECK-LABEL: truncstore_nxv4i64_nxv4i32:
1021; CHECK:       # %bb.0:
1022; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
1023; CHECK-NEXT:    vnsrl.wi v12, v8, 0
1024; CHECK-NEXT:    vs2r.v v12, (a0)
1025; CHECK-NEXT:    ret
1026  %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
1027  store <vscale x 4 x i32> %y, ptr %z
1028  ret void
1029}
1030
1031define void @truncstore_nxv8i64_nxv8i8(<vscale x 8 x i64> %x, ptr %z) {
1032; CHECK-LABEL: truncstore_nxv8i64_nxv8i8:
1033; CHECK:       # %bb.0:
1034; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1035; CHECK-NEXT:    vnsrl.wi v16, v8, 0
1036; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
1037; CHECK-NEXT:    vnsrl.wi v8, v16, 0
1038; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
1039; CHECK-NEXT:    vnsrl.wi v10, v8, 0
1040; CHECK-NEXT:    vs1r.v v10, (a0)
1041; CHECK-NEXT:    ret
1042  %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i8>
1043  store <vscale x 8 x i8> %y, ptr %z
1044  ret void
1045}
1046
1047define void @truncstore_nxv8i64_nxv8i16(<vscale x 8 x i64> %x, ptr %z) {
1048; CHECK-LABEL: truncstore_nxv8i64_nxv8i16:
1049; CHECK:       # %bb.0:
1050; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1051; CHECK-NEXT:    vnsrl.wi v16, v8, 0
1052; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
1053; CHECK-NEXT:    vnsrl.wi v8, v16, 0
1054; CHECK-NEXT:    vs2r.v v8, (a0)
1055; CHECK-NEXT:    ret
1056  %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i16>
1057  store <vscale x 8 x i16> %y, ptr %z
1058  ret void
1059}
1060
1061define void @truncstore_nxv8i64_nxv8i32(<vscale x 8 x i64> %x, ptr %z) {
1062; CHECK-LABEL: truncstore_nxv8i64_nxv8i32:
1063; CHECK:       # %bb.0:
1064; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1065; CHECK-NEXT:    vnsrl.wi v16, v8, 0
1066; CHECK-NEXT:    vs4r.v v16, (a0)
1067; CHECK-NEXT:    ret
1068  %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
1069  store <vscale x 8 x i32> %y, ptr %z
1070  ret void
1071}
1072
1073define <vscale x 1 x float> @extload_nxv1f16_nxv1f32(ptr %x) {
1074; CHECK-LABEL: extload_nxv1f16_nxv1f32:
1075; CHECK:       # %bb.0:
1076; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
1077; CHECK-NEXT:    vle16.v v9, (a0)
1078; CHECK-NEXT:    vfwcvt.f.f.v v8, v9
1079; CHECK-NEXT:    ret
1080  %y = load <vscale x 1 x half>, ptr %x
1081  %z = fpext <vscale x 1 x half> %y to <vscale x 1 x float>
1082  ret <vscale x 1 x float> %z
1083}
1084
1085define <vscale x 1 x double> @extload_nxv1f16_nxv1f64(ptr %x) {
1086; CHECK-LABEL: extload_nxv1f16_nxv1f64:
1087; CHECK:       # %bb.0:
1088; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
1089; CHECK-NEXT:    vle16.v v8, (a0)
1090; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
1091; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
1092; CHECK-NEXT:    vfwcvt.f.f.v v8, v9
1093; CHECK-NEXT:    ret
1094  %y = load <vscale x 1 x half>, ptr %x
1095  %z = fpext <vscale x 1 x half> %y to <vscale x 1 x double>
1096  ret <vscale x 1 x double> %z
1097}
1098
1099define <vscale x 2 x float> @extload_nxv2f16_nxv2f32(ptr %x) {
1100; CHECK-LABEL: extload_nxv2f16_nxv2f32:
1101; CHECK:       # %bb.0:
1102; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
1103; CHECK-NEXT:    vle16.v v9, (a0)
1104; CHECK-NEXT:    vfwcvt.f.f.v v8, v9
1105; CHECK-NEXT:    ret
1106  %y = load <vscale x 2 x half>, ptr %x
1107  %z = fpext <vscale x 2 x half> %y to <vscale x 2 x float>
1108  ret <vscale x 2 x float> %z
1109}
1110
1111define <vscale x 2 x double> @extload_nxv2f16_nxv2f64(ptr %x) {
1112; CHECK-LABEL: extload_nxv2f16_nxv2f64:
1113; CHECK:       # %bb.0:
1114; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
1115; CHECK-NEXT:    vle16.v v8, (a0)
1116; CHECK-NEXT:    vfwcvt.f.f.v v10, v8
1117; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
1118; CHECK-NEXT:    vfwcvt.f.f.v v8, v10
1119; CHECK-NEXT:    ret
1120  %y = load <vscale x 2 x half>, ptr %x
1121  %z = fpext <vscale x 2 x half> %y to <vscale x 2 x double>
1122  ret <vscale x 2 x double> %z
1123}
1124
1125define <vscale x 4 x float> @extload_nxv4f16_nxv4f32(ptr %x) {
1126; CHECK-LABEL: extload_nxv4f16_nxv4f32:
1127; CHECK:       # %bb.0:
1128; CHECK-NEXT:    vl1re16.v v10, (a0)
1129; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
1130; CHECK-NEXT:    vfwcvt.f.f.v v8, v10
1131; CHECK-NEXT:    ret
1132  %y = load <vscale x 4 x half>, ptr %x
1133  %z = fpext <vscale x 4 x half> %y to <vscale x 4 x float>
1134  ret <vscale x 4 x float> %z
1135}
1136
1137define <vscale x 4 x double> @extload_nxv4f16_nxv4f64(ptr %x) {
1138; CHECK-LABEL: extload_nxv4f16_nxv4f64:
1139; CHECK:       # %bb.0:
1140; CHECK-NEXT:    vl1re16.v v8, (a0)
1141; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
1142; CHECK-NEXT:    vfwcvt.f.f.v v12, v8
1143; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
1144; CHECK-NEXT:    vfwcvt.f.f.v v8, v12
1145; CHECK-NEXT:    ret
1146  %y = load <vscale x 4 x half>, ptr %x
1147  %z = fpext <vscale x 4 x half> %y to <vscale x 4 x double>
1148  ret <vscale x 4 x double> %z
1149}
1150
1151define <vscale x 8 x float> @extload_nxv8f16_nxv8f32(ptr %x) {
1152; CHECK-LABEL: extload_nxv8f16_nxv8f32:
1153; CHECK:       # %bb.0:
1154; CHECK-NEXT:    vl2re16.v v12, (a0)
1155; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1156; CHECK-NEXT:    vfwcvt.f.f.v v8, v12
1157; CHECK-NEXT:    ret
1158  %y = load <vscale x 8 x half>, ptr %x
1159  %z = fpext <vscale x 8 x half> %y to <vscale x 8 x float>
1160  ret <vscale x 8 x float> %z
1161}
1162
1163define <vscale x 8 x double> @extload_nxv8f16_nxv8f64(ptr %x) {
1164; CHECK-LABEL: extload_nxv8f16_nxv8f64:
1165; CHECK:       # %bb.0:
1166; CHECK-NEXT:    vl2re16.v v8, (a0)
1167; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1168; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
1169; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
1170; CHECK-NEXT:    vfwcvt.f.f.v v8, v16
1171; CHECK-NEXT:    ret
1172  %y = load <vscale x 8 x half>, ptr %x
1173  %z = fpext <vscale x 8 x half> %y to <vscale x 8 x double>
1174  ret <vscale x 8 x double> %z
1175}
1176
1177define <vscale x 16 x float> @extload_nxv16f16_nxv16f32(ptr %x) {
1178; CHECK-LABEL: extload_nxv16f16_nxv16f32:
1179; CHECK:       # %bb.0:
1180; CHECK-NEXT:    vl4re16.v v16, (a0)
1181; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
1182; CHECK-NEXT:    vfwcvt.f.f.v v8, v16
1183; CHECK-NEXT:    ret
1184  %y = load <vscale x 16 x half>, ptr %x
1185  %z = fpext <vscale x 16 x half> %y to <vscale x 16 x float>
1186  ret <vscale x 16 x float> %z
1187}
1188
1189define void @truncstore_nxv1f32_nxv1f16(<vscale x 1 x float> %x, ptr %z) {
1190; CHECK-LABEL: truncstore_nxv1f32_nxv1f16:
1191; CHECK:       # %bb.0:
1192; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
1193; CHECK-NEXT:    vfncvt.f.f.w v9, v8
1194; CHECK-NEXT:    vse16.v v9, (a0)
1195; CHECK-NEXT:    ret
1196  %y = fptrunc <vscale x 1 x float> %x to <vscale x 1 x half>
1197  store <vscale x 1 x half> %y, ptr %z
1198  ret void
1199}
1200
1201define <vscale x 1 x double> @extload_nxv1f32_nxv1f64(ptr %x) {
1202; CHECK-LABEL: extload_nxv1f32_nxv1f64:
1203; CHECK:       # %bb.0:
1204; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
1205; CHECK-NEXT:    vle32.v v9, (a0)
1206; CHECK-NEXT:    vfwcvt.f.f.v v8, v9
1207; CHECK-NEXT:    ret
1208  %y = load <vscale x 1 x float>, ptr %x
1209  %z = fpext <vscale x 1 x float> %y to <vscale x 1 x double>
1210  ret <vscale x 1 x double> %z
1211}
1212
1213define void @truncstore_nxv2f32_nxv2f16(<vscale x 2 x float> %x, ptr %z) {
1214; CHECK-LABEL: truncstore_nxv2f32_nxv2f16:
1215; CHECK:       # %bb.0:
1216; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
1217; CHECK-NEXT:    vfncvt.f.f.w v9, v8
1218; CHECK-NEXT:    vse16.v v9, (a0)
1219; CHECK-NEXT:    ret
1220  %y = fptrunc <vscale x 2 x float> %x to <vscale x 2 x half>
1221  store <vscale x 2 x half> %y, ptr %z
1222  ret void
1223}
1224
1225define <vscale x 2 x double> @extload_nxv2f32_nxv2f64(ptr %x) {
1226; CHECK-LABEL: extload_nxv2f32_nxv2f64:
1227; CHECK:       # %bb.0:
1228; CHECK-NEXT:    vl1re32.v v10, (a0)
1229; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
1230; CHECK-NEXT:    vfwcvt.f.f.v v8, v10
1231; CHECK-NEXT:    ret
1232  %y = load <vscale x 2 x float>, ptr %x
1233  %z = fpext <vscale x 2 x float> %y to <vscale x 2 x double>
1234  ret <vscale x 2 x double> %z
1235}
1236
1237define void @truncstore_nxv4f32_nxv4f16(<vscale x 4 x float> %x, ptr %z) {
1238; CHECK-LABEL: truncstore_nxv4f32_nxv4f16:
1239; CHECK:       # %bb.0:
1240; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
1241; CHECK-NEXT:    vfncvt.f.f.w v10, v8
1242; CHECK-NEXT:    vs1r.v v10, (a0)
1243; CHECK-NEXT:    ret
1244  %y = fptrunc <vscale x 4 x float> %x to <vscale x 4 x half>
1245  store <vscale x 4 x half> %y, ptr %z
1246  ret void
1247}
1248
1249define <vscale x 4 x double> @extload_nxv4f32_nxv4f64(ptr %x) {
1250; CHECK-LABEL: extload_nxv4f32_nxv4f64:
1251; CHECK:       # %bb.0:
1252; CHECK-NEXT:    vl2re32.v v12, (a0)
1253; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
1254; CHECK-NEXT:    vfwcvt.f.f.v v8, v12
1255; CHECK-NEXT:    ret
1256  %y = load <vscale x 4 x float>, ptr %x
1257  %z = fpext <vscale x 4 x float> %y to <vscale x 4 x double>
1258  ret <vscale x 4 x double> %z
1259}
1260
1261define void @truncstore_nxv8f32_nxv8f16(<vscale x 8 x float> %x, ptr %z) {
1262; CHECK-LABEL: truncstore_nxv8f32_nxv8f16:
1263; CHECK:       # %bb.0:
1264; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
1265; CHECK-NEXT:    vfncvt.f.f.w v12, v8
1266; CHECK-NEXT:    vs2r.v v12, (a0)
1267; CHECK-NEXT:    ret
1268  %y = fptrunc <vscale x 8 x float> %x to <vscale x 8 x half>
1269  store <vscale x 8 x half> %y, ptr %z
1270  ret void
1271}
1272
1273define <vscale x 8 x double> @extload_nxv8f32_nxv8f64(ptr %x) {
1274; CHECK-LABEL: extload_nxv8f32_nxv8f64:
1275; CHECK:       # %bb.0:
1276; CHECK-NEXT:    vl4re32.v v16, (a0)
1277; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1278; CHECK-NEXT:    vfwcvt.f.f.v v8, v16
1279; CHECK-NEXT:    ret
1280  %y = load <vscale x 8 x float>, ptr %x
1281  %z = fpext <vscale x 8 x float> %y to <vscale x 8 x double>
1282  ret <vscale x 8 x double> %z
1283}
1284
1285define void @truncstore_nxv16f32_nxv16f16(<vscale x 16 x float> %x, ptr %z) {
1286; CHECK-LABEL: truncstore_nxv16f32_nxv16f16:
1287; CHECK:       # %bb.0:
1288; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
1289; CHECK-NEXT:    vfncvt.f.f.w v16, v8
1290; CHECK-NEXT:    vs4r.v v16, (a0)
1291; CHECK-NEXT:    ret
1292  %y = fptrunc <vscale x 16 x float> %x to <vscale x 16 x half>
1293  store <vscale x 16 x half> %y, ptr %z
1294  ret void
1295}
1296
1297define void @truncstore_nxv1f64_nxv1f16(<vscale x 1 x double> %x, ptr %z) {
1298; CHECK-LABEL: truncstore_nxv1f64_nxv1f16:
1299; CHECK:       # %bb.0:
1300; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
1301; CHECK-NEXT:    vfncvt.rod.f.f.w v9, v8
1302; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
1303; CHECK-NEXT:    vfncvt.f.f.w v8, v9
1304; CHECK-NEXT:    vse16.v v8, (a0)
1305; CHECK-NEXT:    ret
1306  %y = fptrunc <vscale x 1 x double> %x to <vscale x 1 x half>
1307  store <vscale x 1 x half> %y, ptr %z
1308  ret void
1309}
1310
1311define void @truncstore_nxv1f64_nxv1f32(<vscale x 1 x double> %x, ptr %z) {
1312; CHECK-LABEL: truncstore_nxv1f64_nxv1f32:
1313; CHECK:       # %bb.0:
1314; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
1315; CHECK-NEXT:    vfncvt.f.f.w v9, v8
1316; CHECK-NEXT:    vse32.v v9, (a0)
1317; CHECK-NEXT:    ret
1318  %y = fptrunc <vscale x 1 x double> %x to <vscale x 1 x float>
1319  store <vscale x 1 x float> %y, ptr %z
1320  ret void
1321}
1322
1323define void @truncstore_nxv2f64_nxv2f16(<vscale x 2 x double> %x, ptr %z) {
1324; CHECK-LABEL: truncstore_nxv2f64_nxv2f16:
1325; CHECK:       # %bb.0:
1326; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
1327; CHECK-NEXT:    vfncvt.rod.f.f.w v10, v8
1328; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
1329; CHECK-NEXT:    vfncvt.f.f.w v8, v10
1330; CHECK-NEXT:    vse16.v v8, (a0)
1331; CHECK-NEXT:    ret
1332  %y = fptrunc <vscale x 2 x double> %x to <vscale x 2 x half>
1333  store <vscale x 2 x half> %y, ptr %z
1334  ret void
1335}
1336
1337define void @truncstore_nxv2f64_nxv2f32(<vscale x 2 x double> %x, ptr %z) {
1338; CHECK-LABEL: truncstore_nxv2f64_nxv2f32:
1339; CHECK:       # %bb.0:
1340; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
1341; CHECK-NEXT:    vfncvt.f.f.w v10, v8
1342; CHECK-NEXT:    vs1r.v v10, (a0)
1343; CHECK-NEXT:    ret
1344  %y = fptrunc <vscale x 2 x double> %x to <vscale x 2 x float>
1345  store <vscale x 2 x float> %y, ptr %z
1346  ret void
1347}
1348
1349define void @truncstore_nxv4f64_nxv4f16(<vscale x 4 x double> %x, ptr %z) {
1350; CHECK-LABEL: truncstore_nxv4f64_nxv4f16:
1351; CHECK:       # %bb.0:
1352; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
1353; CHECK-NEXT:    vfncvt.rod.f.f.w v12, v8
1354; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
1355; CHECK-NEXT:    vfncvt.f.f.w v8, v12
1356; CHECK-NEXT:    vs1r.v v8, (a0)
1357; CHECK-NEXT:    ret
1358  %y = fptrunc <vscale x 4 x double> %x to <vscale x 4 x half>
1359  store <vscale x 4 x half> %y, ptr %z
1360  ret void
1361}
1362
1363define void @truncstore_nxv4f64_nxv4f32(<vscale x 4 x double> %x, ptr %z) {
1364; CHECK-LABEL: truncstore_nxv4f64_nxv4f32:
1365; CHECK:       # %bb.0:
1366; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
1367; CHECK-NEXT:    vfncvt.f.f.w v12, v8
1368; CHECK-NEXT:    vs2r.v v12, (a0)
1369; CHECK-NEXT:    ret
1370  %y = fptrunc <vscale x 4 x double> %x to <vscale x 4 x float>
1371  store <vscale x 4 x float> %y, ptr %z
1372  ret void
1373}
1374
1375define void @truncstore_nxv8f64_nxv8f16(<vscale x 8 x double> %x, ptr %z) {
1376; CHECK-LABEL: truncstore_nxv8f64_nxv8f16:
1377; CHECK:       # %bb.0:
1378; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1379; CHECK-NEXT:    vfncvt.rod.f.f.w v16, v8
1380; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
1381; CHECK-NEXT:    vfncvt.f.f.w v8, v16
1382; CHECK-NEXT:    vs2r.v v8, (a0)
1383; CHECK-NEXT:    ret
1384  %y = fptrunc <vscale x 8 x double> %x to <vscale x 8 x half>
1385  store <vscale x 8 x half> %y, ptr %z
1386  ret void
1387}
1388
1389define void @truncstore_nxv8f64_nxv8f32(<vscale x 8 x double> %x, ptr %z) {
1390; CHECK-LABEL: truncstore_nxv8f64_nxv8f32:
1391; CHECK:       # %bb.0:
1392; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1393; CHECK-NEXT:    vfncvt.f.f.w v16, v8
1394; CHECK-NEXT:    vs4r.v v16, (a0)
1395; CHECK-NEXT:    ret
1396  %y = fptrunc <vscale x 8 x double> %x to <vscale x 8 x float>
1397  store <vscale x 8 x float> %y, ptr %z
1398  ret void
1399}
1400