xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll (revision 26766a00ff946c281b7dd517b2ba8d594012c21e)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
4
5define <2 x i16> @sextload_v2i1_v2i16(ptr %x) {
6; CHECK-LABEL: sextload_v2i1_v2i16:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
9; CHECK-NEXT:    vlm.v v0, (a0)
10; CHECK-NEXT:    vmv.v.i v8, 0
11; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
12; CHECK-NEXT:    ret
13  %y = load <2 x i1>, ptr %x
14  %z = sext <2 x i1> %y to <2 x i16>
15  ret <2 x i16> %z
16}
17
18define <2 x i16> @sextload_v2i8_v2i16(ptr %x) {
19; CHECK-LABEL: sextload_v2i8_v2i16:
20; CHECK:       # %bb.0:
21; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
22; CHECK-NEXT:    vle8.v v9, (a0)
23; CHECK-NEXT:    vsext.vf2 v8, v9
24; CHECK-NEXT:    ret
25  %y = load <2 x i8>, ptr %x
26  %z = sext <2 x i8> %y to <2 x i16>
27  ret <2 x i16> %z
28}
29
30define <2 x i16> @zextload_v2i8_v2i16(ptr %x) {
31; CHECK-LABEL: zextload_v2i8_v2i16:
32; CHECK:       # %bb.0:
33; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
34; CHECK-NEXT:    vle8.v v9, (a0)
35; CHECK-NEXT:    vzext.vf2 v8, v9
36; CHECK-NEXT:    ret
37  %y = load <2 x i8>, ptr %x
38  %z = zext <2 x i8> %y to <2 x i16>
39  ret <2 x i16> %z
40}
41
42define <2 x i32> @sextload_v2i8_v2i32(ptr %x) {
43; CHECK-LABEL: sextload_v2i8_v2i32:
44; CHECK:       # %bb.0:
45; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
46; CHECK-NEXT:    vle8.v v9, (a0)
47; CHECK-NEXT:    vsext.vf4 v8, v9
48; CHECK-NEXT:    ret
49  %y = load <2 x i8>, ptr %x
50  %z = sext <2 x i8> %y to <2 x i32>
51  ret <2 x i32> %z
52}
53
54define <2 x i32> @zextload_v2i8_v2i32(ptr %x) {
55; CHECK-LABEL: zextload_v2i8_v2i32:
56; CHECK:       # %bb.0:
57; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
58; CHECK-NEXT:    vle8.v v9, (a0)
59; CHECK-NEXT:    vzext.vf4 v8, v9
60; CHECK-NEXT:    ret
61  %y = load <2 x i8>, ptr %x
62  %z = zext <2 x i8> %y to <2 x i32>
63  ret <2 x i32> %z
64}
65
66define <2 x i64> @sextload_v2i8_v2i64(ptr %x) {
67; CHECK-LABEL: sextload_v2i8_v2i64:
68; CHECK:       # %bb.0:
69; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
70; CHECK-NEXT:    vle8.v v9, (a0)
71; CHECK-NEXT:    vsext.vf8 v8, v9
72; CHECK-NEXT:    ret
73  %y = load <2 x i8>, ptr %x
74  %z = sext <2 x i8> %y to <2 x i64>
75  ret <2 x i64> %z
76}
77
78define <2 x i64> @zextload_v2i8_v2i64(ptr %x) {
79; CHECK-LABEL: zextload_v2i8_v2i64:
80; CHECK:       # %bb.0:
81; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
82; CHECK-NEXT:    vle8.v v9, (a0)
83; CHECK-NEXT:    vzext.vf8 v8, v9
84; CHECK-NEXT:    ret
85  %y = load <2 x i8>, ptr %x
86  %z = zext <2 x i8> %y to <2 x i64>
87  ret <2 x i64> %z
88}
89
90define <4 x i16> @sextload_v4i8_v4i16(ptr %x) {
91; CHECK-LABEL: sextload_v4i8_v4i16:
92; CHECK:       # %bb.0:
93; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
94; CHECK-NEXT:    vle8.v v9, (a0)
95; CHECK-NEXT:    vsext.vf2 v8, v9
96; CHECK-NEXT:    ret
97  %y = load <4 x i8>, ptr %x
98  %z = sext <4 x i8> %y to <4 x i16>
99  ret <4 x i16> %z
100}
101
102define <4 x i16> @zextload_v4i8_v4i16(ptr %x) {
103; CHECK-LABEL: zextload_v4i8_v4i16:
104; CHECK:       # %bb.0:
105; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
106; CHECK-NEXT:    vle8.v v9, (a0)
107; CHECK-NEXT:    vzext.vf2 v8, v9
108; CHECK-NEXT:    ret
109  %y = load <4 x i8>, ptr %x
110  %z = zext <4 x i8> %y to <4 x i16>
111  ret <4 x i16> %z
112}
113
114define <4 x i32> @sextload_v4i8_v4i32(ptr %x) {
115; CHECK-LABEL: sextload_v4i8_v4i32:
116; CHECK:       # %bb.0:
117; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
118; CHECK-NEXT:    vle8.v v9, (a0)
119; CHECK-NEXT:    vsext.vf4 v8, v9
120; CHECK-NEXT:    ret
121  %y = load <4 x i8>, ptr %x
122  %z = sext <4 x i8> %y to <4 x i32>
123  ret <4 x i32> %z
124}
125
126define <4 x i32> @zextload_v4i8_v4i32(ptr %x) {
127; CHECK-LABEL: zextload_v4i8_v4i32:
128; CHECK:       # %bb.0:
129; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
130; CHECK-NEXT:    vle8.v v9, (a0)
131; CHECK-NEXT:    vzext.vf4 v8, v9
132; CHECK-NEXT:    ret
133  %y = load <4 x i8>, ptr %x
134  %z = zext <4 x i8> %y to <4 x i32>
135  ret <4 x i32> %z
136}
137
138define <4 x i64> @sextload_v4i8_v4i64(ptr %x) {
139; CHECK-LABEL: sextload_v4i8_v4i64:
140; CHECK:       # %bb.0:
141; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
142; CHECK-NEXT:    vle8.v v10, (a0)
143; CHECK-NEXT:    vsext.vf8 v8, v10
144; CHECK-NEXT:    ret
145  %y = load <4 x i8>, ptr %x
146  %z = sext <4 x i8> %y to <4 x i64>
147  ret <4 x i64> %z
148}
149
150define <4 x i64> @zextload_v4i8_v4i64(ptr %x) {
151; CHECK-LABEL: zextload_v4i8_v4i64:
152; CHECK:       # %bb.0:
153; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
154; CHECK-NEXT:    vle8.v v10, (a0)
155; CHECK-NEXT:    vzext.vf8 v8, v10
156; CHECK-NEXT:    ret
157  %y = load <4 x i8>, ptr %x
158  %z = zext <4 x i8> %y to <4 x i64>
159  ret <4 x i64> %z
160}
161
162define <8 x i16> @sextload_v8i8_v8i16(ptr %x) {
163; CHECK-LABEL: sextload_v8i8_v8i16:
164; CHECK:       # %bb.0:
165; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
166; CHECK-NEXT:    vle8.v v9, (a0)
167; CHECK-NEXT:    vsext.vf2 v8, v9
168; CHECK-NEXT:    ret
169  %y = load <8 x i8>, ptr %x
170  %z = sext <8 x i8> %y to <8 x i16>
171  ret <8 x i16> %z
172}
173
174define <8 x i16> @zextload_v8i8_v8i16(ptr %x) {
175; CHECK-LABEL: zextload_v8i8_v8i16:
176; CHECK:       # %bb.0:
177; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
178; CHECK-NEXT:    vle8.v v9, (a0)
179; CHECK-NEXT:    vzext.vf2 v8, v9
180; CHECK-NEXT:    ret
181  %y = load <8 x i8>, ptr %x
182  %z = zext <8 x i8> %y to <8 x i16>
183  ret <8 x i16> %z
184}
185
186define <8 x i32> @sextload_v8i8_v8i32(ptr %x) {
187; CHECK-LABEL: sextload_v8i8_v8i32:
188; CHECK:       # %bb.0:
189; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
190; CHECK-NEXT:    vle8.v v10, (a0)
191; CHECK-NEXT:    vsext.vf4 v8, v10
192; CHECK-NEXT:    ret
193  %y = load <8 x i8>, ptr %x
194  %z = sext <8 x i8> %y to <8 x i32>
195  ret <8 x i32> %z
196}
197
198define <8 x i32> @zextload_v8i8_v8i32(ptr %x) {
199; CHECK-LABEL: zextload_v8i8_v8i32:
200; CHECK:       # %bb.0:
201; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
202; CHECK-NEXT:    vle8.v v10, (a0)
203; CHECK-NEXT:    vzext.vf4 v8, v10
204; CHECK-NEXT:    ret
205  %y = load <8 x i8>, ptr %x
206  %z = zext <8 x i8> %y to <8 x i32>
207  ret <8 x i32> %z
208}
209
210define <8 x i64> @sextload_v8i8_v8i64(ptr %x) {
211; CHECK-LABEL: sextload_v8i8_v8i64:
212; CHECK:       # %bb.0:
213; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
214; CHECK-NEXT:    vle8.v v12, (a0)
215; CHECK-NEXT:    vsext.vf8 v8, v12
216; CHECK-NEXT:    ret
217  %y = load <8 x i8>, ptr %x
218  %z = sext <8 x i8> %y to <8 x i64>
219  ret <8 x i64> %z
220}
221
222define <8 x i64> @zextload_v8i8_v8i64(ptr %x) {
223; CHECK-LABEL: zextload_v8i8_v8i64:
224; CHECK:       # %bb.0:
225; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
226; CHECK-NEXT:    vle8.v v12, (a0)
227; CHECK-NEXT:    vzext.vf8 v8, v12
228; CHECK-NEXT:    ret
229  %y = load <8 x i8>, ptr %x
230  %z = zext <8 x i8> %y to <8 x i64>
231  ret <8 x i64> %z
232}
233
234define <16 x i16> @sextload_v16i8_v16i16(ptr %x) {
235; CHECK-LABEL: sextload_v16i8_v16i16:
236; CHECK:       # %bb.0:
237; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
238; CHECK-NEXT:    vle8.v v10, (a0)
239; CHECK-NEXT:    vsext.vf2 v8, v10
240; CHECK-NEXT:    ret
241  %y = load <16 x i8>, ptr %x
242  %z = sext <16 x i8> %y to <16 x i16>
243  ret <16 x i16> %z
244}
245
246define <16 x i16> @zextload_v16i8_v16i16(ptr %x) {
247; CHECK-LABEL: zextload_v16i8_v16i16:
248; CHECK:       # %bb.0:
249; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
250; CHECK-NEXT:    vle8.v v10, (a0)
251; CHECK-NEXT:    vzext.vf2 v8, v10
252; CHECK-NEXT:    ret
253  %y = load <16 x i8>, ptr %x
254  %z = zext <16 x i8> %y to <16 x i16>
255  ret <16 x i16> %z
256}
257
258define <16 x i32> @sextload_v16i8_v16i32(ptr %x) {
259; CHECK-LABEL: sextload_v16i8_v16i32:
260; CHECK:       # %bb.0:
261; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
262; CHECK-NEXT:    vle8.v v12, (a0)
263; CHECK-NEXT:    vsext.vf4 v8, v12
264; CHECK-NEXT:    ret
265  %y = load <16 x i8>, ptr %x
266  %z = sext <16 x i8> %y to <16 x i32>
267  ret <16 x i32> %z
268}
269
270define <16 x i32> @zextload_v16i8_v16i32(ptr %x) {
271; CHECK-LABEL: zextload_v16i8_v16i32:
272; CHECK:       # %bb.0:
273; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
274; CHECK-NEXT:    vle8.v v12, (a0)
275; CHECK-NEXT:    vzext.vf4 v8, v12
276; CHECK-NEXT:    ret
277  %y = load <16 x i8>, ptr %x
278  %z = zext <16 x i8> %y to <16 x i32>
279  ret <16 x i32> %z
280}
281
282define <16 x i64> @sextload_v16i8_v16i64(ptr %x) {
283; CHECK-LABEL: sextload_v16i8_v16i64:
284; CHECK:       # %bb.0:
285; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
286; CHECK-NEXT:    vle8.v v16, (a0)
287; CHECK-NEXT:    vsext.vf8 v8, v16
288; CHECK-NEXT:    ret
289  %y = load <16 x i8>, ptr %x
290  %z = sext <16 x i8> %y to <16 x i64>
291  ret <16 x i64> %z
292}
293
294define <16 x i64> @zextload_v16i8_v16i64(ptr %x) {
295; CHECK-LABEL: zextload_v16i8_v16i64:
296; CHECK:       # %bb.0:
297; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
298; CHECK-NEXT:    vle8.v v16, (a0)
299; CHECK-NEXT:    vzext.vf8 v8, v16
300; CHECK-NEXT:    ret
301  %y = load <16 x i8>, ptr %x
302  %z = zext <16 x i8> %y to <16 x i64>
303  ret <16 x i64> %z
304}
305
306define void @truncstore_v2i8_v2i1(<2 x i8> %x, ptr %z) {
307; CHECK-LABEL: truncstore_v2i8_v2i1:
308; CHECK:       # %bb.0:
309; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
310; CHECK-NEXT:    vand.vi v8, v8, 1
311; CHECK-NEXT:    vmsne.vi v0, v8, 0
312; CHECK-NEXT:    vmv.v.i v8, 0
313; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
314; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
315; CHECK-NEXT:    vmv.v.i v9, 0
316; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, ma
317; CHECK-NEXT:    vmv.v.v v9, v8
318; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
319; CHECK-NEXT:    vmsne.vi v8, v9, 0
320; CHECK-NEXT:    vsm.v v8, (a0)
321; CHECK-NEXT:    ret
322  %y = trunc <2 x i8> %x to <2 x i1>
323  store <2 x i1> %y, ptr %z
324  ret void
325}
326
327define void @truncstore_v2i16_v2i8(<2 x i16> %x, ptr %z) {
328; CHECK-LABEL: truncstore_v2i16_v2i8:
329; CHECK:       # %bb.0:
330; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
331; CHECK-NEXT:    vnsrl.wi v8, v8, 0
332; CHECK-NEXT:    vse8.v v8, (a0)
333; CHECK-NEXT:    ret
334  %y = trunc <2 x i16> %x to <2 x i8>
335  store <2 x i8> %y, ptr %z
336  ret void
337}
338
339define <2 x i32> @sextload_v2i16_v2i32(ptr %x) {
340; CHECK-LABEL: sextload_v2i16_v2i32:
341; CHECK:       # %bb.0:
342; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
343; CHECK-NEXT:    vle16.v v9, (a0)
344; CHECK-NEXT:    vsext.vf2 v8, v9
345; CHECK-NEXT:    ret
346  %y = load <2 x i16>, ptr %x
347  %z = sext <2 x i16> %y to <2 x i32>
348  ret <2 x i32> %z
349}
350
351define <2 x i32> @zextload_v2i16_v2i32(ptr %x) {
352; CHECK-LABEL: zextload_v2i16_v2i32:
353; CHECK:       # %bb.0:
354; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
355; CHECK-NEXT:    vle16.v v9, (a0)
356; CHECK-NEXT:    vzext.vf2 v8, v9
357; CHECK-NEXT:    ret
358  %y = load <2 x i16>, ptr %x
359  %z = zext <2 x i16> %y to <2 x i32>
360  ret <2 x i32> %z
361}
362
363define <2 x i64> @sextload_v2i16_v2i64(ptr %x) {
364; CHECK-LABEL: sextload_v2i16_v2i64:
365; CHECK:       # %bb.0:
366; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
367; CHECK-NEXT:    vle16.v v9, (a0)
368; CHECK-NEXT:    vsext.vf4 v8, v9
369; CHECK-NEXT:    ret
370  %y = load <2 x i16>, ptr %x
371  %z = sext <2 x i16> %y to <2 x i64>
372  ret <2 x i64> %z
373}
374
375define <2 x i64> @zextload_v2i16_v2i64(ptr %x) {
376; CHECK-LABEL: zextload_v2i16_v2i64:
377; CHECK:       # %bb.0:
378; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
379; CHECK-NEXT:    vle16.v v9, (a0)
380; CHECK-NEXT:    vzext.vf4 v8, v9
381; CHECK-NEXT:    ret
382  %y = load <2 x i16>, ptr %x
383  %z = zext <2 x i16> %y to <2 x i64>
384  ret <2 x i64> %z
385}
386
387define void @truncstore_v4i16_v4i8(<4 x i16> %x, ptr %z) {
388; CHECK-LABEL: truncstore_v4i16_v4i8:
389; CHECK:       # %bb.0:
390; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
391; CHECK-NEXT:    vnsrl.wi v8, v8, 0
392; CHECK-NEXT:    vse8.v v8, (a0)
393; CHECK-NEXT:    ret
394  %y = trunc <4 x i16> %x to <4 x i8>
395  store <4 x i8> %y, ptr %z
396  ret void
397}
398
399define <4 x i32> @sextload_v4i16_v4i32(ptr %x) {
400; CHECK-LABEL: sextload_v4i16_v4i32:
401; CHECK:       # %bb.0:
402; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
403; CHECK-NEXT:    vle16.v v9, (a0)
404; CHECK-NEXT:    vsext.vf2 v8, v9
405; CHECK-NEXT:    ret
406  %y = load <4 x i16>, ptr %x
407  %z = sext <4 x i16> %y to <4 x i32>
408  ret <4 x i32> %z
409}
410
411define <4 x i32> @zextload_v4i16_v4i32(ptr %x) {
412; CHECK-LABEL: zextload_v4i16_v4i32:
413; CHECK:       # %bb.0:
414; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
415; CHECK-NEXT:    vle16.v v9, (a0)
416; CHECK-NEXT:    vzext.vf2 v8, v9
417; CHECK-NEXT:    ret
418  %y = load <4 x i16>, ptr %x
419  %z = zext <4 x i16> %y to <4 x i32>
420  ret <4 x i32> %z
421}
422
423define <4 x i64> @sextload_v4i16_v4i64(ptr %x) {
424; CHECK-LABEL: sextload_v4i16_v4i64:
425; CHECK:       # %bb.0:
426; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
427; CHECK-NEXT:    vle16.v v10, (a0)
428; CHECK-NEXT:    vsext.vf4 v8, v10
429; CHECK-NEXT:    ret
430  %y = load <4 x i16>, ptr %x
431  %z = sext <4 x i16> %y to <4 x i64>
432  ret <4 x i64> %z
433}
434
435define <4 x i64> @zextload_v4i16_v4i64(ptr %x) {
436; CHECK-LABEL: zextload_v4i16_v4i64:
437; CHECK:       # %bb.0:
438; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
439; CHECK-NEXT:    vle16.v v10, (a0)
440; CHECK-NEXT:    vzext.vf4 v8, v10
441; CHECK-NEXT:    ret
442  %y = load <4 x i16>, ptr %x
443  %z = zext <4 x i16> %y to <4 x i64>
444  ret <4 x i64> %z
445}
446
447define void @truncstore_v8i16_v8i8(<8 x i16> %x, ptr %z) {
448; CHECK-LABEL: truncstore_v8i16_v8i8:
449; CHECK:       # %bb.0:
450; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
451; CHECK-NEXT:    vnsrl.wi v8, v8, 0
452; CHECK-NEXT:    vse8.v v8, (a0)
453; CHECK-NEXT:    ret
454  %y = trunc <8 x i16> %x to <8 x i8>
455  store <8 x i8> %y, ptr %z
456  ret void
457}
458
459define <8 x i32> @sextload_v8i16_v8i32(ptr %x) {
460; CHECK-LABEL: sextload_v8i16_v8i32:
461; CHECK:       # %bb.0:
462; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
463; CHECK-NEXT:    vle16.v v10, (a0)
464; CHECK-NEXT:    vsext.vf2 v8, v10
465; CHECK-NEXT:    ret
466  %y = load <8 x i16>, ptr %x
467  %z = sext <8 x i16> %y to <8 x i32>
468  ret <8 x i32> %z
469}
470
471define <8 x i32> @zextload_v8i16_v8i32(ptr %x) {
472; CHECK-LABEL: zextload_v8i16_v8i32:
473; CHECK:       # %bb.0:
474; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
475; CHECK-NEXT:    vle16.v v10, (a0)
476; CHECK-NEXT:    vzext.vf2 v8, v10
477; CHECK-NEXT:    ret
478  %y = load <8 x i16>, ptr %x
479  %z = zext <8 x i16> %y to <8 x i32>
480  ret <8 x i32> %z
481}
482
483define <8 x i64> @sextload_v8i16_v8i64(ptr %x) {
484; CHECK-LABEL: sextload_v8i16_v8i64:
485; CHECK:       # %bb.0:
486; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
487; CHECK-NEXT:    vle16.v v12, (a0)
488; CHECK-NEXT:    vsext.vf4 v8, v12
489; CHECK-NEXT:    ret
490  %y = load <8 x i16>, ptr %x
491  %z = sext <8 x i16> %y to <8 x i64>
492  ret <8 x i64> %z
493}
494
495define <8 x i64> @zextload_v8i16_v8i64(ptr %x) {
496; CHECK-LABEL: zextload_v8i16_v8i64:
497; CHECK:       # %bb.0:
498; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
499; CHECK-NEXT:    vle16.v v12, (a0)
500; CHECK-NEXT:    vzext.vf4 v8, v12
501; CHECK-NEXT:    ret
502  %y = load <8 x i16>, ptr %x
503  %z = zext <8 x i16> %y to <8 x i64>
504  ret <8 x i64> %z
505}
506
507define void @truncstore_v16i16_v16i8(<16 x i16> %x, ptr %z) {
508; CHECK-LABEL: truncstore_v16i16_v16i8:
509; CHECK:       # %bb.0:
510; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
511; CHECK-NEXT:    vnsrl.wi v10, v8, 0
512; CHECK-NEXT:    vse8.v v10, (a0)
513; CHECK-NEXT:    ret
514  %y = trunc <16 x i16> %x to <16 x i8>
515  store <16 x i8> %y, ptr %z
516  ret void
517}
518
519define <16 x i32> @sextload_v16i16_v16i32(ptr %x) {
520; CHECK-LABEL: sextload_v16i16_v16i32:
521; CHECK:       # %bb.0:
522; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
523; CHECK-NEXT:    vle16.v v12, (a0)
524; CHECK-NEXT:    vsext.vf2 v8, v12
525; CHECK-NEXT:    ret
526  %y = load <16 x i16>, ptr %x
527  %z = sext <16 x i16> %y to <16 x i32>
528  ret <16 x i32> %z
529}
530
531define <16 x i32> @zextload_v16i16_v16i32(ptr %x) {
532; CHECK-LABEL: zextload_v16i16_v16i32:
533; CHECK:       # %bb.0:
534; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
535; CHECK-NEXT:    vle16.v v12, (a0)
536; CHECK-NEXT:    vzext.vf2 v8, v12
537; CHECK-NEXT:    ret
538  %y = load <16 x i16>, ptr %x
539  %z = zext <16 x i16> %y to <16 x i32>
540  ret <16 x i32> %z
541}
542
543define <16 x i64> @sextload_v16i16_v16i64(ptr %x) {
544; CHECK-LABEL: sextload_v16i16_v16i64:
545; CHECK:       # %bb.0:
546; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
547; CHECK-NEXT:    vle16.v v16, (a0)
548; CHECK-NEXT:    vsext.vf4 v8, v16
549; CHECK-NEXT:    ret
550  %y = load <16 x i16>, ptr %x
551  %z = sext <16 x i16> %y to <16 x i64>
552  ret <16 x i64> %z
553}
554
555define <16 x i64> @zextload_v16i16_v16i64(ptr %x) {
556; CHECK-LABEL: zextload_v16i16_v16i64:
557; CHECK:       # %bb.0:
558; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
559; CHECK-NEXT:    vle16.v v16, (a0)
560; CHECK-NEXT:    vzext.vf4 v8, v16
561; CHECK-NEXT:    ret
562  %y = load <16 x i16>, ptr %x
563  %z = zext <16 x i16> %y to <16 x i64>
564  ret <16 x i64> %z
565}
566
567define void @truncstore_v2i32_v2i8(<2 x i32> %x, ptr %z) {
568; CHECK-LABEL: truncstore_v2i32_v2i8:
569; CHECK:       # %bb.0:
570; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
571; CHECK-NEXT:    vnsrl.wi v8, v8, 0
572; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
573; CHECK-NEXT:    vnsrl.wi v8, v8, 0
574; CHECK-NEXT:    vse8.v v8, (a0)
575; CHECK-NEXT:    ret
576  %y = trunc <2 x i32> %x to <2 x i8>
577  store <2 x i8> %y, ptr %z
578  ret void
579}
580
581define void @truncstore_v2i32_v2i16(<2 x i32> %x, ptr %z) {
582; CHECK-LABEL: truncstore_v2i32_v2i16:
583; CHECK:       # %bb.0:
584; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
585; CHECK-NEXT:    vnsrl.wi v8, v8, 0
586; CHECK-NEXT:    vse16.v v8, (a0)
587; CHECK-NEXT:    ret
588  %y = trunc <2 x i32> %x to <2 x i16>
589  store <2 x i16> %y, ptr %z
590  ret void
591}
592
593define <2 x i64> @sextload_v2i32_v2i64(ptr %x) {
594; CHECK-LABEL: sextload_v2i32_v2i64:
595; CHECK:       # %bb.0:
596; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
597; CHECK-NEXT:    vle32.v v9, (a0)
598; CHECK-NEXT:    vsext.vf2 v8, v9
599; CHECK-NEXT:    ret
600  %y = load <2 x i32>, ptr %x
601  %z = sext <2 x i32> %y to <2 x i64>
602  ret <2 x i64> %z
603}
604
605define <2 x i64> @zextload_v2i32_v2i64(ptr %x) {
606; CHECK-LABEL: zextload_v2i32_v2i64:
607; CHECK:       # %bb.0:
608; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
609; CHECK-NEXT:    vle32.v v9, (a0)
610; CHECK-NEXT:    vzext.vf2 v8, v9
611; CHECK-NEXT:    ret
612  %y = load <2 x i32>, ptr %x
613  %z = zext <2 x i32> %y to <2 x i64>
614  ret <2 x i64> %z
615}
616
617define void @truncstore_v4i32_v4i8(<4 x i32> %x, ptr %z) {
618; CHECK-LABEL: truncstore_v4i32_v4i8:
619; CHECK:       # %bb.0:
620; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
621; CHECK-NEXT:    vnsrl.wi v8, v8, 0
622; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
623; CHECK-NEXT:    vnsrl.wi v8, v8, 0
624; CHECK-NEXT:    vse8.v v8, (a0)
625; CHECK-NEXT:    ret
626  %y = trunc <4 x i32> %x to <4 x i8>
627  store <4 x i8> %y, ptr %z
628  ret void
629}
630
631define void @truncstore_v4i32_v4i16(<4 x i32> %x, ptr %z) {
632; CHECK-LABEL: truncstore_v4i32_v4i16:
633; CHECK:       # %bb.0:
634; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
635; CHECK-NEXT:    vnsrl.wi v8, v8, 0
636; CHECK-NEXT:    vse16.v v8, (a0)
637; CHECK-NEXT:    ret
638  %y = trunc <4 x i32> %x to <4 x i16>
639  store <4 x i16> %y, ptr %z
640  ret void
641}
642
643define <4 x i64> @sextload_v4i32_v4i64(ptr %x) {
644; CHECK-LABEL: sextload_v4i32_v4i64:
645; CHECK:       # %bb.0:
646; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
647; CHECK-NEXT:    vle32.v v10, (a0)
648; CHECK-NEXT:    vsext.vf2 v8, v10
649; CHECK-NEXT:    ret
650  %y = load <4 x i32>, ptr %x
651  %z = sext <4 x i32> %y to <4 x i64>
652  ret <4 x i64> %z
653}
654
655define <4 x i64> @zextload_v4i32_v4i64(ptr %x) {
656; CHECK-LABEL: zextload_v4i32_v4i64:
657; CHECK:       # %bb.0:
658; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
659; CHECK-NEXT:    vle32.v v10, (a0)
660; CHECK-NEXT:    vzext.vf2 v8, v10
661; CHECK-NEXT:    ret
662  %y = load <4 x i32>, ptr %x
663  %z = zext <4 x i32> %y to <4 x i64>
664  ret <4 x i64> %z
665}
666
667define void @truncstore_v8i32_v8i8(<8 x i32> %x, ptr %z) {
668; CHECK-LABEL: truncstore_v8i32_v8i8:
669; CHECK:       # %bb.0:
670; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
671; CHECK-NEXT:    vnsrl.wi v10, v8, 0
672; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
673; CHECK-NEXT:    vnsrl.wi v8, v10, 0
674; CHECK-NEXT:    vse8.v v8, (a0)
675; CHECK-NEXT:    ret
676  %y = trunc <8 x i32> %x to <8 x i8>
677  store <8 x i8> %y, ptr %z
678  ret void
679}
680
681define void @truncstore_v8i32_v8i16(<8 x i32> %x, ptr %z) {
682; CHECK-LABEL: truncstore_v8i32_v8i16:
683; CHECK:       # %bb.0:
684; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
685; CHECK-NEXT:    vnsrl.wi v10, v8, 0
686; CHECK-NEXT:    vse16.v v10, (a0)
687; CHECK-NEXT:    ret
688  %y = trunc <8 x i32> %x to <8 x i16>
689  store <8 x i16> %y, ptr %z
690  ret void
691}
692
693define <8 x i64> @sextload_v8i32_v8i64(ptr %x) {
694; CHECK-LABEL: sextload_v8i32_v8i64:
695; CHECK:       # %bb.0:
696; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
697; CHECK-NEXT:    vle32.v v12, (a0)
698; CHECK-NEXT:    vsext.vf2 v8, v12
699; CHECK-NEXT:    ret
700  %y = load <8 x i32>, ptr %x
701  %z = sext <8 x i32> %y to <8 x i64>
702  ret <8 x i64> %z
703}
704
705define <8 x i64> @zextload_v8i32_v8i64(ptr %x) {
706; CHECK-LABEL: zextload_v8i32_v8i64:
707; CHECK:       # %bb.0:
708; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
709; CHECK-NEXT:    vle32.v v12, (a0)
710; CHECK-NEXT:    vzext.vf2 v8, v12
711; CHECK-NEXT:    ret
712  %y = load <8 x i32>, ptr %x
713  %z = zext <8 x i32> %y to <8 x i64>
714  ret <8 x i64> %z
715}
716
717define void @truncstore_v16i32_v16i8(<16 x i32> %x, ptr %z) {
718; CHECK-LABEL: truncstore_v16i32_v16i8:
719; CHECK:       # %bb.0:
720; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
721; CHECK-NEXT:    vnsrl.wi v12, v8, 0
722; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
723; CHECK-NEXT:    vnsrl.wi v8, v12, 0
724; CHECK-NEXT:    vse8.v v8, (a0)
725; CHECK-NEXT:    ret
726  %y = trunc <16 x i32> %x to <16 x i8>
727  store <16 x i8> %y, ptr %z
728  ret void
729}
730
731define void @truncstore_v16i32_v16i16(<16 x i32> %x, ptr %z) {
732; CHECK-LABEL: truncstore_v16i32_v16i16:
733; CHECK:       # %bb.0:
734; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
735; CHECK-NEXT:    vnsrl.wi v12, v8, 0
736; CHECK-NEXT:    vse16.v v12, (a0)
737; CHECK-NEXT:    ret
738  %y = trunc <16 x i32> %x to <16 x i16>
739  store <16 x i16> %y, ptr %z
740  ret void
741}
742
743define <16 x i64> @sextload_v16i32_v16i64(ptr %x) {
744; CHECK-LABEL: sextload_v16i32_v16i64:
745; CHECK:       # %bb.0:
746; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
747; CHECK-NEXT:    vle32.v v16, (a0)
748; CHECK-NEXT:    vsext.vf2 v8, v16
749; CHECK-NEXT:    ret
750  %y = load <16 x i32>, ptr %x
751  %z = sext <16 x i32> %y to <16 x i64>
752  ret <16 x i64> %z
753}
754
755define <16 x i64> @zextload_v16i32_v16i64(ptr %x) {
756; CHECK-LABEL: zextload_v16i32_v16i64:
757; CHECK:       # %bb.0:
758; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
759; CHECK-NEXT:    vle32.v v16, (a0)
760; CHECK-NEXT:    vzext.vf2 v8, v16
761; CHECK-NEXT:    ret
762  %y = load <16 x i32>, ptr %x
763  %z = zext <16 x i32> %y to <16 x i64>
764  ret <16 x i64> %z
765}
766
767define void @truncstore_v2i64_v2i8(<2 x i64> %x, ptr %z) {
768; CHECK-LABEL: truncstore_v2i64_v2i8:
769; CHECK:       # %bb.0:
770; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
771; CHECK-NEXT:    vnsrl.wi v8, v8, 0
772; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
773; CHECK-NEXT:    vnsrl.wi v8, v8, 0
774; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
775; CHECK-NEXT:    vnsrl.wi v8, v8, 0
776; CHECK-NEXT:    vse8.v v8, (a0)
777; CHECK-NEXT:    ret
778  %y = trunc <2 x i64> %x to <2 x i8>
779  store <2 x i8> %y, ptr %z
780  ret void
781}
782
783define void @truncstore_v2i64_v2i16(<2 x i64> %x, ptr %z) {
784; CHECK-LABEL: truncstore_v2i64_v2i16:
785; CHECK:       # %bb.0:
786; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
787; CHECK-NEXT:    vnsrl.wi v8, v8, 0
788; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
789; CHECK-NEXT:    vnsrl.wi v8, v8, 0
790; CHECK-NEXT:    vse16.v v8, (a0)
791; CHECK-NEXT:    ret
792  %y = trunc <2 x i64> %x to <2 x i16>
793  store <2 x i16> %y, ptr %z
794  ret void
795}
796
797define void @truncstore_v2i64_v2i32(<2 x i64> %x, ptr %z) {
798; CHECK-LABEL: truncstore_v2i64_v2i32:
799; CHECK:       # %bb.0:
800; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
801; CHECK-NEXT:    vnsrl.wi v8, v8, 0
802; CHECK-NEXT:    vse32.v v8, (a0)
803; CHECK-NEXT:    ret
804  %y = trunc <2 x i64> %x to <2 x i32>
805  store <2 x i32> %y, ptr %z
806  ret void
807}
808
809define void @truncstore_v4i64_v4i8(<4 x i64> %x, ptr %z) {
810; CHECK-LABEL: truncstore_v4i64_v4i8:
811; CHECK:       # %bb.0:
812; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
813; CHECK-NEXT:    vnsrl.wi v10, v8, 0
814; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
815; CHECK-NEXT:    vnsrl.wi v8, v10, 0
816; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
817; CHECK-NEXT:    vnsrl.wi v8, v8, 0
818; CHECK-NEXT:    vse8.v v8, (a0)
819; CHECK-NEXT:    ret
820  %y = trunc <4 x i64> %x to <4 x i8>
821  store <4 x i8> %y, ptr %z
822  ret void
823}
824
825define void @truncstore_v4i64_v4i16(<4 x i64> %x, ptr %z) {
826; CHECK-LABEL: truncstore_v4i64_v4i16:
827; CHECK:       # %bb.0:
828; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
829; CHECK-NEXT:    vnsrl.wi v10, v8, 0
830; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
831; CHECK-NEXT:    vnsrl.wi v8, v10, 0
832; CHECK-NEXT:    vse16.v v8, (a0)
833; CHECK-NEXT:    ret
834  %y = trunc <4 x i64> %x to <4 x i16>
835  store <4 x i16> %y, ptr %z
836  ret void
837}
838
839define void @truncstore_v4i64_v4i32(<4 x i64> %x, ptr %z) {
840; CHECK-LABEL: truncstore_v4i64_v4i32:
841; CHECK:       # %bb.0:
842; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
843; CHECK-NEXT:    vnsrl.wi v10, v8, 0
844; CHECK-NEXT:    vse32.v v10, (a0)
845; CHECK-NEXT:    ret
846  %y = trunc <4 x i64> %x to <4 x i32>
847  store <4 x i32> %y, ptr %z
848  ret void
849}
850
851define void @truncstore_v8i64_v8i8(<8 x i64> %x, ptr %z) {
852; CHECK-LABEL: truncstore_v8i64_v8i8:
853; CHECK:       # %bb.0:
854; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
855; CHECK-NEXT:    vnsrl.wi v12, v8, 0
856; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
857; CHECK-NEXT:    vnsrl.wi v8, v12, 0
858; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
859; CHECK-NEXT:    vnsrl.wi v8, v8, 0
860; CHECK-NEXT:    vse8.v v8, (a0)
861; CHECK-NEXT:    ret
862  %y = trunc <8 x i64> %x to <8 x i8>
863  store <8 x i8> %y, ptr %z
864  ret void
865}
866
867define void @truncstore_v8i64_v8i16(<8 x i64> %x, ptr %z) {
868; CHECK-LABEL: truncstore_v8i64_v8i16:
869; CHECK:       # %bb.0:
870; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
871; CHECK-NEXT:    vnsrl.wi v12, v8, 0
872; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
873; CHECK-NEXT:    vnsrl.wi v8, v12, 0
874; CHECK-NEXT:    vse16.v v8, (a0)
875; CHECK-NEXT:    ret
876  %y = trunc <8 x i64> %x to <8 x i16>
877  store <8 x i16> %y, ptr %z
878  ret void
879}
880
881define void @truncstore_v8i64_v8i32(<8 x i64> %x, ptr %z) {
882; CHECK-LABEL: truncstore_v8i64_v8i32:
883; CHECK:       # %bb.0:
884; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
885; CHECK-NEXT:    vnsrl.wi v12, v8, 0
886; CHECK-NEXT:    vse32.v v12, (a0)
887; CHECK-NEXT:    ret
888  %y = trunc <8 x i64> %x to <8 x i32>
889  store <8 x i32> %y, ptr %z
890  ret void
891}
892
893define void @truncstore_v16i64_v16i8(<16 x i64> %x, ptr %z) {
894; CHECK-LABEL: truncstore_v16i64_v16i8:
895; CHECK:       # %bb.0:
896; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
897; CHECK-NEXT:    vnsrl.wi v16, v8, 0
898; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
899; CHECK-NEXT:    vnsrl.wi v8, v16, 0
900; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
901; CHECK-NEXT:    vnsrl.wi v10, v8, 0
902; CHECK-NEXT:    vse8.v v10, (a0)
903; CHECK-NEXT:    ret
904  %y = trunc <16 x i64> %x to <16 x i8>
905  store <16 x i8> %y, ptr %z
906  ret void
907}
908
909define void @truncstore_v16i64_v16i16(<16 x i64> %x, ptr %z) {
910; CHECK-LABEL: truncstore_v16i64_v16i16:
911; CHECK:       # %bb.0:
912; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
913; CHECK-NEXT:    vnsrl.wi v16, v8, 0
914; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
915; CHECK-NEXT:    vnsrl.wi v8, v16, 0
916; CHECK-NEXT:    vse16.v v8, (a0)
917; CHECK-NEXT:    ret
918  %y = trunc <16 x i64> %x to <16 x i16>
919  store <16 x i16> %y, ptr %z
920  ret void
921}
922
923define void @truncstore_v16i64_v16i32(<16 x i64> %x, ptr %z) {
924; CHECK-LABEL: truncstore_v16i64_v16i32:
925; CHECK:       # %bb.0:
926; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
927; CHECK-NEXT:    vnsrl.wi v16, v8, 0
928; CHECK-NEXT:    vse32.v v16, (a0)
929; CHECK-NEXT:    ret
930  %y = trunc <16 x i64> %x to <16 x i32>
931  store <16 x i32> %y, ptr %z
932  ret void
933}
934