xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll (revision d89d45ca9a6e51be388a6ff3893d59e54748b928)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+m,+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
3; RUN: llc -mtriple=riscv64 -mattr=+m,+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
4; RUN: llc -mtriple=riscv32 -mattr=+m,+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2
5; RUN: llc -mtriple=riscv64 -mattr=+m,+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=2 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX2
6; RUN: llc -mtriple=riscv32 -mattr=+m,+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
7; RUN: llc -mtriple=riscv64 -mattr=+m,+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
8
9define void @sext_v4i8_v4i32(<4 x i8>* %x, <4 x i32>* %z) {
10; CHECK-LABEL: sext_v4i8_v4i32:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
13; CHECK-NEXT:    vle8.v v8, (a0)
14; CHECK-NEXT:    vsext.vf4 v9, v8
15; CHECK-NEXT:    vse32.v v9, (a1)
16; CHECK-NEXT:    ret
17  %a = load <4 x i8>, <4 x i8>* %x
18  %b = sext <4 x i8> %a to <4 x i32>
19  store <4 x i32> %b, <4 x i32>* %z
20  ret void
21}
22
23define void @zext_v4i8_v4i32(<4 x i8>* %x, <4 x i32>* %z) {
24; CHECK-LABEL: zext_v4i8_v4i32:
25; CHECK:       # %bb.0:
26; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
27; CHECK-NEXT:    vle8.v v8, (a0)
28; CHECK-NEXT:    vzext.vf4 v9, v8
29; CHECK-NEXT:    vse32.v v9, (a1)
30; CHECK-NEXT:    ret
31  %a = load <4 x i8>, <4 x i8>* %x
32  %b = zext <4 x i8> %a to <4 x i32>
33  store <4 x i32> %b, <4 x i32>* %z
34  ret void
35}
36
37define void @sext_v8i8_v8i32(<8 x i8>* %x, <8 x i32>* %z) {
38; LMULMAX8-LABEL: sext_v8i8_v8i32:
39; LMULMAX8:       # %bb.0:
40; LMULMAX8-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
41; LMULMAX8-NEXT:    vle8.v v8, (a0)
42; LMULMAX8-NEXT:    vsext.vf4 v10, v8
43; LMULMAX8-NEXT:    vse32.v v10, (a1)
44; LMULMAX8-NEXT:    ret
45;
46; LMULMAX2-LABEL: sext_v8i8_v8i32:
47; LMULMAX2:       # %bb.0:
48; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
49; LMULMAX2-NEXT:    vle8.v v8, (a0)
50; LMULMAX2-NEXT:    vsext.vf4 v10, v8
51; LMULMAX2-NEXT:    vse32.v v10, (a1)
52; LMULMAX2-NEXT:    ret
53;
54; LMULMAX1-LABEL: sext_v8i8_v8i32:
55; LMULMAX1:       # %bb.0:
56; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
57; LMULMAX1-NEXT:    vle8.v v8, (a0)
58; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
59; LMULMAX1-NEXT:    vslidedown.vi v9, v8, 4
60; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
61; LMULMAX1-NEXT:    vsext.vf4 v10, v9
62; LMULMAX1-NEXT:    vsext.vf4 v9, v8
63; LMULMAX1-NEXT:    addi a0, a1, 16
64; LMULMAX1-NEXT:    vse32.v v10, (a0)
65; LMULMAX1-NEXT:    vse32.v v9, (a1)
66; LMULMAX1-NEXT:    ret
67  %a = load <8 x i8>, <8 x i8>* %x
68  %b = sext <8 x i8> %a to <8 x i32>
69  store <8 x i32> %b, <8 x i32>* %z
70  ret void
71}
72
73define void @sext_v32i8_v32i32(<32 x i8>* %x, <32 x i32>* %z) {
74; LMULMAX8-LABEL: sext_v32i8_v32i32:
75; LMULMAX8:       # %bb.0:
76; LMULMAX8-NEXT:    li a2, 32
77; LMULMAX8-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
78; LMULMAX8-NEXT:    vle8.v v8, (a0)
79; LMULMAX8-NEXT:    vsext.vf4 v16, v8
80; LMULMAX8-NEXT:    vse32.v v16, (a1)
81; LMULMAX8-NEXT:    ret
82;
83; LMULMAX2-LABEL: sext_v32i8_v32i32:
84; LMULMAX2:       # %bb.0:
85; LMULMAX2-NEXT:    li a2, 32
86; LMULMAX2-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
87; LMULMAX2-NEXT:    vle8.v v8, (a0)
88; LMULMAX2-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
89; LMULMAX2-NEXT:    vslidedown.vi v10, v8, 8
90; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
91; LMULMAX2-NEXT:    vsext.vf4 v12, v10
92; LMULMAX2-NEXT:    vsetivli zero, 16, e8, m2, ta, ma
93; LMULMAX2-NEXT:    vslidedown.vi v10, v8, 16
94; LMULMAX2-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
95; LMULMAX2-NEXT:    vslidedown.vi v14, v10, 8
96; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
97; LMULMAX2-NEXT:    vsext.vf4 v16, v14
98; LMULMAX2-NEXT:    vsext.vf4 v14, v8
99; LMULMAX2-NEXT:    vsext.vf4 v8, v10
100; LMULMAX2-NEXT:    addi a0, a1, 64
101; LMULMAX2-NEXT:    vse32.v v8, (a0)
102; LMULMAX2-NEXT:    vse32.v v14, (a1)
103; LMULMAX2-NEXT:    addi a0, a1, 96
104; LMULMAX2-NEXT:    vse32.v v16, (a0)
105; LMULMAX2-NEXT:    addi a0, a1, 32
106; LMULMAX2-NEXT:    vse32.v v12, (a0)
107; LMULMAX2-NEXT:    ret
108;
109; LMULMAX1-LABEL: sext_v32i8_v32i32:
110; LMULMAX1:       # %bb.0:
111; LMULMAX1-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
112; LMULMAX1-NEXT:    addi a2, a0, 16
113; LMULMAX1-NEXT:    vle8.v v8, (a2)
114; LMULMAX1-NEXT:    vle8.v v9, (a0)
115; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
116; LMULMAX1-NEXT:    vslidedown.vi v10, v8, 4
117; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
118; LMULMAX1-NEXT:    vsext.vf4 v11, v10
119; LMULMAX1-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
120; LMULMAX1-NEXT:    vslidedown.vi v10, v8, 8
121; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
122; LMULMAX1-NEXT:    vslidedown.vi v12, v10, 4
123; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
124; LMULMAX1-NEXT:    vsext.vf4 v13, v12
125; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
126; LMULMAX1-NEXT:    vslidedown.vi v12, v9, 4
127; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
128; LMULMAX1-NEXT:    vsext.vf4 v14, v12
129; LMULMAX1-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
130; LMULMAX1-NEXT:    vslidedown.vi v12, v9, 8
131; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
132; LMULMAX1-NEXT:    vslidedown.vi v15, v12, 4
133; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
134; LMULMAX1-NEXT:    vsext.vf4 v16, v15
135; LMULMAX1-NEXT:    vsext.vf4 v15, v10
136; LMULMAX1-NEXT:    vsext.vf4 v10, v12
137; LMULMAX1-NEXT:    vsext.vf4 v12, v8
138; LMULMAX1-NEXT:    vsext.vf4 v8, v9
139; LMULMAX1-NEXT:    addi a0, a1, 32
140; LMULMAX1-NEXT:    vse32.v v10, (a0)
141; LMULMAX1-NEXT:    vse32.v v8, (a1)
142; LMULMAX1-NEXT:    addi a0, a1, 96
143; LMULMAX1-NEXT:    vse32.v v15, (a0)
144; LMULMAX1-NEXT:    addi a0, a1, 64
145; LMULMAX1-NEXT:    vse32.v v12, (a0)
146; LMULMAX1-NEXT:    addi a0, a1, 48
147; LMULMAX1-NEXT:    vse32.v v16, (a0)
148; LMULMAX1-NEXT:    addi a0, a1, 16
149; LMULMAX1-NEXT:    vse32.v v14, (a0)
150; LMULMAX1-NEXT:    addi a0, a1, 112
151; LMULMAX1-NEXT:    vse32.v v13, (a0)
152; LMULMAX1-NEXT:    addi a0, a1, 80
153; LMULMAX1-NEXT:    vse32.v v11, (a0)
154; LMULMAX1-NEXT:    ret
155  %a = load <32 x i8>, <32 x i8>* %x
156  %b = sext <32 x i8> %a to <32 x i32>
157  store <32 x i32> %b, <32 x i32>* %z
158  ret void
159}
160
161define void @trunc_v4i8_v4i32(<4 x i32>* %x, <4 x i8>* %z) {
162; CHECK-LABEL: trunc_v4i8_v4i32:
163; CHECK:       # %bb.0:
164; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
165; CHECK-NEXT:    vle32.v v8, (a0)
166; CHECK-NEXT:    vnsrl.wi v8, v8, 0
167; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
168; CHECK-NEXT:    vnsrl.wi v8, v8, 0
169; CHECK-NEXT:    vse8.v v8, (a1)
170; CHECK-NEXT:    ret
171  %a = load <4 x i32>, <4 x i32>* %x
172  %b = trunc <4 x i32> %a to <4 x i8>
173  store <4 x i8> %b, <4 x i8>* %z
174  ret void
175}
176
177define void @trunc_v8i8_v8i32(<8 x i32>* %x, <8 x i8>* %z) {
178; LMULMAX8-LABEL: trunc_v8i8_v8i32:
179; LMULMAX8:       # %bb.0:
180; LMULMAX8-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
181; LMULMAX8-NEXT:    vle32.v v8, (a0)
182; LMULMAX8-NEXT:    vnsrl.wi v10, v8, 0
183; LMULMAX8-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
184; LMULMAX8-NEXT:    vnsrl.wi v8, v10, 0
185; LMULMAX8-NEXT:    vse8.v v8, (a1)
186; LMULMAX8-NEXT:    ret
187;
188; LMULMAX2-LABEL: trunc_v8i8_v8i32:
189; LMULMAX2:       # %bb.0:
190; LMULMAX2-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
191; LMULMAX2-NEXT:    vle32.v v8, (a0)
192; LMULMAX2-NEXT:    vnsrl.wi v10, v8, 0
193; LMULMAX2-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
194; LMULMAX2-NEXT:    vnsrl.wi v8, v10, 0
195; LMULMAX2-NEXT:    vse8.v v8, (a1)
196; LMULMAX2-NEXT:    ret
197;
198; LMULMAX1-LABEL: trunc_v8i8_v8i32:
199; LMULMAX1:       # %bb.0:
200; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
201; LMULMAX1-NEXT:    vle32.v v8, (a0)
202; LMULMAX1-NEXT:    addi a0, a0, 16
203; LMULMAX1-NEXT:    vle32.v v9, (a0)
204; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
205; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
206; LMULMAX1-NEXT:    vnsrl.wi v8, v8, 0
207; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
208; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
209; LMULMAX1-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
210; LMULMAX1-NEXT:    vnsrl.wi v9, v9, 0
211; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, tu, ma
212; LMULMAX1-NEXT:    vslideup.vi v8, v9, 4
213; LMULMAX1-NEXT:    vse8.v v8, (a1)
214; LMULMAX1-NEXT:    ret
215  %a = load <8 x i32>, <8 x i32>* %x
216  %b = trunc <8 x i32> %a to <8 x i8>
217  store <8 x i8> %b, <8 x i8>* %z
218  ret void
219}
220