xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll (revision 9e1ad3cff6a855fdfdc1d91323e2021726da04ea)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
4
5define <vscale x 1 x i8> @vtrunc_nxv1i16_nxv1i8(<vscale x 1 x i16> %va) {
6; CHECK-LABEL: vtrunc_nxv1i16_nxv1i8:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
9; CHECK-NEXT:    vnsrl.wi v8, v8, 0
10; CHECK-NEXT:    ret
11  %tvec = trunc <vscale x 1 x i16> %va to <vscale x 1 x i8>
12  ret <vscale x 1 x i8> %tvec
13}
14
15define <vscale x 2 x i8> @vtrunc_nxv2i16_nxv2i8(<vscale x 2 x i16> %va) {
16; CHECK-LABEL: vtrunc_nxv2i16_nxv2i8:
17; CHECK:       # %bb.0:
18; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
19; CHECK-NEXT:    vnsrl.wi v8, v8, 0
20; CHECK-NEXT:    ret
21  %tvec = trunc <vscale x 2 x i16> %va to <vscale x 2 x i8>
22  ret <vscale x 2 x i8> %tvec
23}
24
25define <vscale x 4 x i8> @vtrunc_nxv4i16_nxv4i8(<vscale x 4 x i16> %va) {
26; CHECK-LABEL: vtrunc_nxv4i16_nxv4i8:
27; CHECK:       # %bb.0:
28; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
29; CHECK-NEXT:    vnsrl.wi v8, v8, 0
30; CHECK-NEXT:    ret
31  %tvec = trunc <vscale x 4 x i16> %va to <vscale x 4 x i8>
32  ret <vscale x 4 x i8> %tvec
33}
34
35define <vscale x 8 x i8> @vtrunc_nxv8i16_nxv8i8(<vscale x 8 x i16> %va) {
36; CHECK-LABEL: vtrunc_nxv8i16_nxv8i8:
37; CHECK:       # %bb.0:
38; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
39; CHECK-NEXT:    vnsrl.wi v10, v8, 0
40; CHECK-NEXT:    vmv.v.v v8, v10
41; CHECK-NEXT:    ret
42  %tvec = trunc <vscale x 8 x i16> %va to <vscale x 8 x i8>
43  ret <vscale x 8 x i8> %tvec
44}
45
46define <vscale x 16 x i8> @vtrunc_nxv16i16_nxv16i8(<vscale x 16 x i16> %va) {
47; CHECK-LABEL: vtrunc_nxv16i16_nxv16i8:
48; CHECK:       # %bb.0:
49; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
50; CHECK-NEXT:    vnsrl.wi v12, v8, 0
51; CHECK-NEXT:    vmv.v.v v8, v12
52; CHECK-NEXT:    ret
53  %tvec = trunc <vscale x 16 x i16> %va to <vscale x 16 x i8>
54  ret <vscale x 16 x i8> %tvec
55}
56
57define <vscale x 1 x i8> @vtrunc_nxv1i32_nxv1i8(<vscale x 1 x i32> %va) {
58; CHECK-LABEL: vtrunc_nxv1i32_nxv1i8:
59; CHECK:       # %bb.0:
60; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
61; CHECK-NEXT:    vnsrl.wi v8, v8, 0
62; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
63; CHECK-NEXT:    vnsrl.wi v8, v8, 0
64; CHECK-NEXT:    ret
65  %tvec = trunc <vscale x 1 x i32> %va to <vscale x 1 x i8>
66  ret <vscale x 1 x i8> %tvec
67}
68
69define <vscale x 1 x i16> @vtrunc_nxv1i32_nxv1i16(<vscale x 1 x i32> %va) {
70; CHECK-LABEL: vtrunc_nxv1i32_nxv1i16:
71; CHECK:       # %bb.0:
72; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
73; CHECK-NEXT:    vnsrl.wi v8, v8, 0
74; CHECK-NEXT:    ret
75  %tvec = trunc <vscale x 1 x i32> %va to <vscale x 1 x i16>
76  ret <vscale x 1 x i16> %tvec
77}
78
79define <vscale x 2 x i8> @vtrunc_nxv2i32_nxv2i8(<vscale x 2 x i32> %va) {
80; CHECK-LABEL: vtrunc_nxv2i32_nxv2i8:
81; CHECK:       # %bb.0:
82; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
83; CHECK-NEXT:    vnsrl.wi v8, v8, 0
84; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
85; CHECK-NEXT:    vnsrl.wi v8, v8, 0
86; CHECK-NEXT:    ret
87  %tvec = trunc <vscale x 2 x i32> %va to <vscale x 2 x i8>
88  ret <vscale x 2 x i8> %tvec
89}
90
91define <vscale x 2 x i16> @vtrunc_nxv2i32_nxv2i16(<vscale x 2 x i32> %va) {
92; CHECK-LABEL: vtrunc_nxv2i32_nxv2i16:
93; CHECK:       # %bb.0:
94; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
95; CHECK-NEXT:    vnsrl.wi v8, v8, 0
96; CHECK-NEXT:    ret
97  %tvec = trunc <vscale x 2 x i32> %va to <vscale x 2 x i16>
98  ret <vscale x 2 x i16> %tvec
99}
100
101define <vscale x 4 x i8> @vtrunc_nxv4i32_nxv4i8(<vscale x 4 x i32> %va) {
102; CHECK-LABEL: vtrunc_nxv4i32_nxv4i8:
103; CHECK:       # %bb.0:
104; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
105; CHECK-NEXT:    vnsrl.wi v10, v8, 0
106; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
107; CHECK-NEXT:    vnsrl.wi v8, v10, 0
108; CHECK-NEXT:    ret
109  %tvec = trunc <vscale x 4 x i32> %va to <vscale x 4 x i8>
110  ret <vscale x 4 x i8> %tvec
111}
112
113define <vscale x 4 x i16> @vtrunc_nxv4i32_nxv4i16(<vscale x 4 x i32> %va) {
114; CHECK-LABEL: vtrunc_nxv4i32_nxv4i16:
115; CHECK:       # %bb.0:
116; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
117; CHECK-NEXT:    vnsrl.wi v10, v8, 0
118; CHECK-NEXT:    vmv.v.v v8, v10
119; CHECK-NEXT:    ret
120  %tvec = trunc <vscale x 4 x i32> %va to <vscale x 4 x i16>
121  ret <vscale x 4 x i16> %tvec
122}
123
124define <vscale x 8 x i8> @vtrunc_nxv8i32_nxv8i8(<vscale x 8 x i32> %va) {
125; CHECK-LABEL: vtrunc_nxv8i32_nxv8i8:
126; CHECK:       # %bb.0:
127; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
128; CHECK-NEXT:    vnsrl.wi v12, v8, 0
129; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
130; CHECK-NEXT:    vnsrl.wi v8, v12, 0
131; CHECK-NEXT:    ret
132  %tvec = trunc <vscale x 8 x i32> %va to <vscale x 8 x i8>
133  ret <vscale x 8 x i8> %tvec
134}
135
136define <vscale x 8 x i16> @vtrunc_nxv8i32_nxv8i16(<vscale x 8 x i32> %va) {
137; CHECK-LABEL: vtrunc_nxv8i32_nxv8i16:
138; CHECK:       # %bb.0:
139; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
140; CHECK-NEXT:    vnsrl.wi v12, v8, 0
141; CHECK-NEXT:    vmv.v.v v8, v12
142; CHECK-NEXT:    ret
143  %tvec = trunc <vscale x 8 x i32> %va to <vscale x 8 x i16>
144  ret <vscale x 8 x i16> %tvec
145}
146
147define <vscale x 16 x i8> @vtrunc_nxv16i32_nxv16i8(<vscale x 16 x i32> %va) {
148; CHECK-LABEL: vtrunc_nxv16i32_nxv16i8:
149; CHECK:       # %bb.0:
150; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
151; CHECK-NEXT:    vnsrl.wi v16, v8, 0
152; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
153; CHECK-NEXT:    vnsrl.wi v8, v16, 0
154; CHECK-NEXT:    ret
155  %tvec = trunc <vscale x 16 x i32> %va to <vscale x 16 x i8>
156  ret <vscale x 16 x i8> %tvec
157}
158
159define <vscale x 16 x i16> @vtrunc_nxv16i32_nxv16i16(<vscale x 16 x i32> %va) {
160; CHECK-LABEL: vtrunc_nxv16i32_nxv16i16:
161; CHECK:       # %bb.0:
162; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
163; CHECK-NEXT:    vnsrl.wi v16, v8, 0
164; CHECK-NEXT:    vmv.v.v v8, v16
165; CHECK-NEXT:    ret
166  %tvec = trunc <vscale x 16 x i32> %va to <vscale x 16 x i16>
167  ret <vscale x 16 x i16> %tvec
168}
169
170define <vscale x 1 x i8> @vtrunc_nxv1i64_nxv1i8(<vscale x 1 x i64> %va) {
171; CHECK-LABEL: vtrunc_nxv1i64_nxv1i8:
172; CHECK:       # %bb.0:
173; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
174; CHECK-NEXT:    vnsrl.wi v8, v8, 0
175; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
176; CHECK-NEXT:    vnsrl.wi v8, v8, 0
177; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
178; CHECK-NEXT:    vnsrl.wi v8, v8, 0
179; CHECK-NEXT:    ret
180  %tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i8>
181  ret <vscale x 1 x i8> %tvec
182}
183
184define <vscale x 1 x i16> @vtrunc_nxv1i64_nxv1i16(<vscale x 1 x i64> %va) {
185; CHECK-LABEL: vtrunc_nxv1i64_nxv1i16:
186; CHECK:       # %bb.0:
187; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
188; CHECK-NEXT:    vnsrl.wi v8, v8, 0
189; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
190; CHECK-NEXT:    vnsrl.wi v8, v8, 0
191; CHECK-NEXT:    ret
192  %tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i16>
193  ret <vscale x 1 x i16> %tvec
194}
195
196define <vscale x 1 x i32> @vtrunc_nxv1i64_nxv1i32(<vscale x 1 x i64> %va) {
197; CHECK-LABEL: vtrunc_nxv1i64_nxv1i32:
198; CHECK:       # %bb.0:
199; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
200; CHECK-NEXT:    vnsrl.wi v8, v8, 0
201; CHECK-NEXT:    ret
202  %tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i32>
203  ret <vscale x 1 x i32> %tvec
204}
205
206define <vscale x 2 x i8> @vtrunc_nxv2i64_nxv2i8(<vscale x 2 x i64> %va) {
207; CHECK-LABEL: vtrunc_nxv2i64_nxv2i8:
208; CHECK:       # %bb.0:
209; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
210; CHECK-NEXT:    vnsrl.wi v10, v8, 0
211; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
212; CHECK-NEXT:    vnsrl.wi v8, v10, 0
213; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
214; CHECK-NEXT:    vnsrl.wi v8, v8, 0
215; CHECK-NEXT:    ret
216  %tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i8>
217  ret <vscale x 2 x i8> %tvec
218}
219
220define <vscale x 2 x i16> @vtrunc_nxv2i64_nxv2i16(<vscale x 2 x i64> %va) {
221; CHECK-LABEL: vtrunc_nxv2i64_nxv2i16:
222; CHECK:       # %bb.0:
223; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
224; CHECK-NEXT:    vnsrl.wi v10, v8, 0
225; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
226; CHECK-NEXT:    vnsrl.wi v8, v10, 0
227; CHECK-NEXT:    ret
228  %tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i16>
229  ret <vscale x 2 x i16> %tvec
230}
231
232define <vscale x 2 x i32> @vtrunc_nxv2i64_nxv2i32(<vscale x 2 x i64> %va) {
233; CHECK-LABEL: vtrunc_nxv2i64_nxv2i32:
234; CHECK:       # %bb.0:
235; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
236; CHECK-NEXT:    vnsrl.wi v10, v8, 0
237; CHECK-NEXT:    vmv.v.v v8, v10
238; CHECK-NEXT:    ret
239  %tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i32>
240  ret <vscale x 2 x i32> %tvec
241}
242
243define <vscale x 4 x i8> @vtrunc_nxv4i64_nxv4i8(<vscale x 4 x i64> %va) {
244; CHECK-LABEL: vtrunc_nxv4i64_nxv4i8:
245; CHECK:       # %bb.0:
246; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
247; CHECK-NEXT:    vnsrl.wi v12, v8, 0
248; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
249; CHECK-NEXT:    vnsrl.wi v8, v12, 0
250; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
251; CHECK-NEXT:    vnsrl.wi v8, v8, 0
252; CHECK-NEXT:    ret
253  %tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i8>
254  ret <vscale x 4 x i8> %tvec
255}
256
257define <vscale x 4 x i16> @vtrunc_nxv4i64_nxv4i16(<vscale x 4 x i64> %va) {
258; CHECK-LABEL: vtrunc_nxv4i64_nxv4i16:
259; CHECK:       # %bb.0:
260; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
261; CHECK-NEXT:    vnsrl.wi v12, v8, 0
262; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
263; CHECK-NEXT:    vnsrl.wi v8, v12, 0
264; CHECK-NEXT:    ret
265  %tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i16>
266  ret <vscale x 4 x i16> %tvec
267}
268
269define <vscale x 4 x i32> @vtrunc_nxv4i64_nxv4i32(<vscale x 4 x i64> %va) {
270; CHECK-LABEL: vtrunc_nxv4i64_nxv4i32:
271; CHECK:       # %bb.0:
272; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
273; CHECK-NEXT:    vnsrl.wi v12, v8, 0
274; CHECK-NEXT:    vmv.v.v v8, v12
275; CHECK-NEXT:    ret
276  %tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i32>
277  ret <vscale x 4 x i32> %tvec
278}
279
280define <vscale x 8 x i8> @vtrunc_nxv8i64_nxv8i8(<vscale x 8 x i64> %va) {
281; CHECK-LABEL: vtrunc_nxv8i64_nxv8i8:
282; CHECK:       # %bb.0:
283; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
284; CHECK-NEXT:    vnsrl.wi v16, v8, 0
285; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
286; CHECK-NEXT:    vnsrl.wi v10, v16, 0
287; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
288; CHECK-NEXT:    vnsrl.wi v8, v10, 0
289; CHECK-NEXT:    ret
290  %tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i8>
291  ret <vscale x 8 x i8> %tvec
292}
293
294define <vscale x 8 x i16> @vtrunc_nxv8i64_nxv8i16(<vscale x 8 x i64> %va) {
295; CHECK-LABEL: vtrunc_nxv8i64_nxv8i16:
296; CHECK:       # %bb.0:
297; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
298; CHECK-NEXT:    vnsrl.wi v16, v8, 0
299; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
300; CHECK-NEXT:    vnsrl.wi v8, v16, 0
301; CHECK-NEXT:    ret
302  %tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i16>
303  ret <vscale x 8 x i16> %tvec
304}
305
306define <vscale x 8 x i32> @vtrunc_nxv8i64_nxv8i32(<vscale x 8 x i64> %va) {
307; CHECK-LABEL: vtrunc_nxv8i64_nxv8i32:
308; CHECK:       # %bb.0:
309; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
310; CHECK-NEXT:    vnsrl.wi v16, v8, 0
311; CHECK-NEXT:    vmv.v.v v8, v16
312; CHECK-NEXT:    ret
313  %tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i32>
314  ret <vscale x 8 x i32> %tvec
315}
316