xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll (revision 169c32eb49fa9b559d388b9b8f4374ff9e1be9be)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
4
5define <vscale x 1 x i8> @vsra_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
6; CHECK-LABEL: vsra_vv_nxv1i8:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
9; CHECK-NEXT:    vsra.vv v8, v8, v9
10; CHECK-NEXT:    ret
11  %vc = ashr <vscale x 1 x i8> %va, %vb
12  ret <vscale x 1 x i8> %vc
13}
14
15define <vscale x 1 x i8> @vsra_vv_nxv1i8_sext_zext(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
16; CHECK-LABEL: vsra_vv_nxv1i8_sext_zext:
17; CHECK:       # %bb.0:
18; CHECK-NEXT:    li a0, 7
19; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
20; CHECK-NEXT:    vmin.vx v9, v8, a0
21; CHECK-NEXT:    vsra.vv v8, v8, v9
22; CHECK-NEXT:    ret
23  %sexted_va = sext <vscale x 1 x i8> %va to <vscale x 1 x i32>
24  %zexted_vb = zext <vscale x 1 x i8> %va to <vscale x 1 x i32>
25  %expand = ashr <vscale x 1 x i32> %sexted_va, %zexted_vb
26  %vc = trunc <vscale x 1 x i32> %expand to <vscale x 1 x i8>
27  ret <vscale x 1 x i8> %vc
28}
29
30define <vscale x 1 x i8> @vsra_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
31; CHECK-LABEL: vsra_vx_nxv1i8:
32; CHECK:       # %bb.0:
33; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
34; CHECK-NEXT:    vsra.vx v8, v8, a0
35; CHECK-NEXT:    ret
36  %head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
37  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
38  %vc = ashr <vscale x 1 x i8> %va, %splat
39  ret <vscale x 1 x i8> %vc
40}
41
42define <vscale x 1 x i8> @vsra_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
43; CHECK-LABEL: vsra_vi_nxv1i8_0:
44; CHECK:       # %bb.0:
45; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
46; CHECK-NEXT:    vsra.vi v8, v8, 6
47; CHECK-NEXT:    ret
48  %vc = ashr <vscale x 1 x i8> %va, splat (i8 6)
49  ret <vscale x 1 x i8> %vc
50}
51
52define <vscale x 2 x i8> @vsra_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
53; CHECK-LABEL: vsra_vv_nxv2i8:
54; CHECK:       # %bb.0:
55; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
56; CHECK-NEXT:    vsra.vv v8, v8, v9
57; CHECK-NEXT:    ret
58  %vc = ashr <vscale x 2 x i8> %va, %vb
59  ret <vscale x 2 x i8> %vc
60}
61
62define <vscale x 2 x i8> @vsra_vv_nxv2i8_sext_zext(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
63; CHECK-LABEL: vsra_vv_nxv2i8_sext_zext:
64; CHECK:       # %bb.0:
65; CHECK-NEXT:    li a0, 7
66; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
67; CHECK-NEXT:    vmin.vx v9, v8, a0
68; CHECK-NEXT:    vsra.vv v8, v8, v9
69; CHECK-NEXT:    ret
70  %sexted_va = sext <vscale x 2 x i8> %va to <vscale x 2 x i32>
71  %zexted_vb = zext <vscale x 2 x i8> %va to <vscale x 2 x i32>
72  %expand = ashr <vscale x 2 x i32> %sexted_va, %zexted_vb
73  %vc = trunc <vscale x 2 x i32> %expand to <vscale x 2 x i8>
74  ret <vscale x 2 x i8> %vc
75}
76
77define <vscale x 2 x i8> @vsra_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
78; CHECK-LABEL: vsra_vx_nxv2i8:
79; CHECK:       # %bb.0:
80; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
81; CHECK-NEXT:    vsra.vx v8, v8, a0
82; CHECK-NEXT:    ret
83  %head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
84  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
85  %vc = ashr <vscale x 2 x i8> %va, %splat
86  ret <vscale x 2 x i8> %vc
87}
88
89define <vscale x 2 x i8> @vsra_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
90; CHECK-LABEL: vsra_vi_nxv2i8_0:
91; CHECK:       # %bb.0:
92; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
93; CHECK-NEXT:    vsra.vi v8, v8, 6
94; CHECK-NEXT:    ret
95  %vc = ashr <vscale x 2 x i8> %va, splat (i8 6)
96  ret <vscale x 2 x i8> %vc
97}
98
99define <vscale x 4 x i8> @vsra_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
100; CHECK-LABEL: vsra_vv_nxv4i8:
101; CHECK:       # %bb.0:
102; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
103; CHECK-NEXT:    vsra.vv v8, v8, v9
104; CHECK-NEXT:    ret
105  %vc = ashr <vscale x 4 x i8> %va, %vb
106  ret <vscale x 4 x i8> %vc
107}
108
109define <vscale x 4 x i8> @vsra_vv_nxv4i8_sext_zext(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
110; CHECK-LABEL: vsra_vv_nxv4i8_sext_zext:
111; CHECK:       # %bb.0:
112; CHECK-NEXT:    li a0, 7
113; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
114; CHECK-NEXT:    vmin.vx v9, v8, a0
115; CHECK-NEXT:    vsra.vv v8, v8, v9
116; CHECK-NEXT:    ret
117  %sexted_va = sext <vscale x 4 x i8> %va to <vscale x 4 x i32>
118  %zexted_vb = zext <vscale x 4 x i8> %va to <vscale x 4 x i32>
119  %expand = ashr <vscale x 4 x i32> %sexted_va, %zexted_vb
120  %vc = trunc <vscale x 4 x i32> %expand to <vscale x 4 x i8>
121  ret <vscale x 4 x i8> %vc
122}
123
124define <vscale x 4 x i8> @vsra_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
125; CHECK-LABEL: vsra_vx_nxv4i8:
126; CHECK:       # %bb.0:
127; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
128; CHECK-NEXT:    vsra.vx v8, v8, a0
129; CHECK-NEXT:    ret
130  %head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
131  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
132  %vc = ashr <vscale x 4 x i8> %va, %splat
133  ret <vscale x 4 x i8> %vc
134}
135
136define <vscale x 4 x i8> @vsra_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
137; CHECK-LABEL: vsra_vi_nxv4i8_0:
138; CHECK:       # %bb.0:
139; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
140; CHECK-NEXT:    vsra.vi v8, v8, 6
141; CHECK-NEXT:    ret
142  %vc = ashr <vscale x 4 x i8> %va, splat (i8 6)
143  ret <vscale x 4 x i8> %vc
144}
145
146define <vscale x 8 x i8> @vsra_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
147; CHECK-LABEL: vsra_vv_nxv8i8:
148; CHECK:       # %bb.0:
149; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
150; CHECK-NEXT:    vsra.vv v8, v8, v9
151; CHECK-NEXT:    ret
152  %vc = ashr <vscale x 8 x i8> %va, %vb
153  ret <vscale x 8 x i8> %vc
154}
155
156define <vscale x 8 x i8> @vsra_vv_nxv8i8_sext_zext(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
157; CHECK-LABEL: vsra_vv_nxv8i8_sext_zext:
158; CHECK:       # %bb.0:
159; CHECK-NEXT:    li a0, 7
160; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
161; CHECK-NEXT:    vmin.vx v9, v8, a0
162; CHECK-NEXT:    vsra.vv v8, v8, v9
163; CHECK-NEXT:    ret
164  %sexted_va = sext <vscale x 8 x i8> %va to <vscale x 8 x i32>
165  %zexted_vb = zext <vscale x 8 x i8> %va to <vscale x 8 x i32>
166  %expand = ashr <vscale x 8 x i32> %sexted_va, %zexted_vb
167  %vc = trunc <vscale x 8 x i32> %expand to <vscale x 8 x i8>
168  ret <vscale x 8 x i8> %vc
169}
170
171define <vscale x 8 x i8> @vsra_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
172; CHECK-LABEL: vsra_vx_nxv8i8:
173; CHECK:       # %bb.0:
174; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
175; CHECK-NEXT:    vsra.vx v8, v8, a0
176; CHECK-NEXT:    ret
177  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
178  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
179  %vc = ashr <vscale x 8 x i8> %va, %splat
180  ret <vscale x 8 x i8> %vc
181}
182
183define <vscale x 8 x i8> @vsra_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
184; CHECK-LABEL: vsra_vi_nxv8i8_0:
185; CHECK:       # %bb.0:
186; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
187; CHECK-NEXT:    vsra.vi v8, v8, 6
188; CHECK-NEXT:    ret
189  %vc = ashr <vscale x 8 x i8> %va, splat (i8 6)
190  ret <vscale x 8 x i8> %vc
191}
192
193define <vscale x 16 x i8> @vsra_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
194; CHECK-LABEL: vsra_vv_nxv16i8:
195; CHECK:       # %bb.0:
196; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
197; CHECK-NEXT:    vsra.vv v8, v8, v10
198; CHECK-NEXT:    ret
199  %vc = ashr <vscale x 16 x i8> %va, %vb
200  ret <vscale x 16 x i8> %vc
201}
202
203define <vscale x 16 x i8> @vsra_vv_nxv16i8_sext_zext(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
204; CHECK-LABEL: vsra_vv_nxv16i8_sext_zext:
205; CHECK:       # %bb.0:
206; CHECK-NEXT:    li a0, 7
207; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
208; CHECK-NEXT:    vmin.vx v10, v8, a0
209; CHECK-NEXT:    vsra.vv v8, v8, v10
210; CHECK-NEXT:    ret
211  %sexted_va = sext <vscale x 16 x i8> %va to <vscale x 16 x i32>
212  %zexted_vb = zext <vscale x 16 x i8> %va to <vscale x 16 x i32>
213  %expand = ashr <vscale x 16 x i32> %sexted_va, %zexted_vb
214  %vc = trunc <vscale x 16 x i32> %expand to <vscale x 16 x i8>
215  ret <vscale x 16 x i8> %vc
216}
217
218define <vscale x 16 x i8> @vsra_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
219; CHECK-LABEL: vsra_vx_nxv16i8:
220; CHECK:       # %bb.0:
221; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
222; CHECK-NEXT:    vsra.vx v8, v8, a0
223; CHECK-NEXT:    ret
224  %head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
225  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
226  %vc = ashr <vscale x 16 x i8> %va, %splat
227  ret <vscale x 16 x i8> %vc
228}
229
230define <vscale x 16 x i8> @vsra_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
231; CHECK-LABEL: vsra_vi_nxv16i8_0:
232; CHECK:       # %bb.0:
233; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
234; CHECK-NEXT:    vsra.vi v8, v8, 6
235; CHECK-NEXT:    ret
236  %vc = ashr <vscale x 16 x i8> %va, splat (i8 6)
237  ret <vscale x 16 x i8> %vc
238}
239
240define <vscale x 32 x i8> @vsra_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
241; CHECK-LABEL: vsra_vv_nxv32i8:
242; CHECK:       # %bb.0:
243; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
244; CHECK-NEXT:    vsra.vv v8, v8, v12
245; CHECK-NEXT:    ret
246  %vc = ashr <vscale x 32 x i8> %va, %vb
247  ret <vscale x 32 x i8> %vc
248}
249
250define <vscale x 32 x i8> @vsra_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
251; CHECK-LABEL: vsra_vx_nxv32i8:
252; CHECK:       # %bb.0:
253; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
254; CHECK-NEXT:    vsra.vx v8, v8, a0
255; CHECK-NEXT:    ret
256  %head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
257  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
258  %vc = ashr <vscale x 32 x i8> %va, %splat
259  ret <vscale x 32 x i8> %vc
260}
261
262define <vscale x 32 x i8> @vsra_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
263; CHECK-LABEL: vsra_vi_nxv32i8_0:
264; CHECK:       # %bb.0:
265; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
266; CHECK-NEXT:    vsra.vi v8, v8, 6
267; CHECK-NEXT:    ret
268  %vc = ashr <vscale x 32 x i8> %va, splat (i8 6)
269  ret <vscale x 32 x i8> %vc
270}
271
272define <vscale x 64 x i8> @vsra_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) {
273; CHECK-LABEL: vsra_vv_nxv64i8:
274; CHECK:       # %bb.0:
275; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
276; CHECK-NEXT:    vsra.vv v8, v8, v16
277; CHECK-NEXT:    ret
278  %vc = ashr <vscale x 64 x i8> %va, %vb
279  ret <vscale x 64 x i8> %vc
280}
281
282define <vscale x 64 x i8> @vsra_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
283; CHECK-LABEL: vsra_vx_nxv64i8:
284; CHECK:       # %bb.0:
285; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
286; CHECK-NEXT:    vsra.vx v8, v8, a0
287; CHECK-NEXT:    ret
288  %head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
289  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
290  %vc = ashr <vscale x 64 x i8> %va, %splat
291  ret <vscale x 64 x i8> %vc
292}
293
294define <vscale x 64 x i8> @vsra_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
295; CHECK-LABEL: vsra_vi_nxv64i8_0:
296; CHECK:       # %bb.0:
297; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
298; CHECK-NEXT:    vsra.vi v8, v8, 6
299; CHECK-NEXT:    ret
300  %vc = ashr <vscale x 64 x i8> %va, splat (i8 6)
301  ret <vscale x 64 x i8> %vc
302}
303
304define <vscale x 1 x i16> @vsra_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
305; CHECK-LABEL: vsra_vv_nxv1i16:
306; CHECK:       # %bb.0:
307; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
308; CHECK-NEXT:    vsra.vv v8, v8, v9
309; CHECK-NEXT:    ret
310  %vc = ashr <vscale x 1 x i16> %va, %vb
311  ret <vscale x 1 x i16> %vc
312}
313
314define <vscale x 1 x i16> @vsra_vv_nxv1i16_sext_zext(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
315; CHECK-LABEL: vsra_vv_nxv1i16_sext_zext:
316; CHECK:       # %bb.0:
317; CHECK-NEXT:    li a0, 15
318; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
319; CHECK-NEXT:    vmin.vx v9, v8, a0
320; CHECK-NEXT:    vsra.vv v8, v8, v9
321; CHECK-NEXT:    ret
322  %sexted_va = sext <vscale x 1 x i16> %va to <vscale x 1 x i32>
323  %zexted_vb = zext <vscale x 1 x i16> %va to <vscale x 1 x i32>
324  %expand = ashr <vscale x 1 x i32> %sexted_va, %zexted_vb
325  %vc = trunc <vscale x 1 x i32> %expand to <vscale x 1 x i16>
326  ret <vscale x 1 x i16> %vc
327}
328
329define <vscale x 1 x i16> @vsra_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
330; CHECK-LABEL: vsra_vx_nxv1i16:
331; CHECK:       # %bb.0:
332; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
333; CHECK-NEXT:    vsra.vx v8, v8, a0
334; CHECK-NEXT:    ret
335  %head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
336  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
337  %vc = ashr <vscale x 1 x i16> %va, %splat
338  ret <vscale x 1 x i16> %vc
339}
340
341define <vscale x 1 x i16> @vsra_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
342; CHECK-LABEL: vsra_vi_nxv1i16_0:
343; CHECK:       # %bb.0:
344; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
345; CHECK-NEXT:    vsra.vi v8, v8, 6
346; CHECK-NEXT:    ret
347  %vc = ashr <vscale x 1 x i16> %va, splat (i16 6)
348  ret <vscale x 1 x i16> %vc
349}
350
351define <vscale x 2 x i16> @vsra_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
352; CHECK-LABEL: vsra_vv_nxv2i16:
353; CHECK:       # %bb.0:
354; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
355; CHECK-NEXT:    vsra.vv v8, v8, v9
356; CHECK-NEXT:    ret
357  %vc = ashr <vscale x 2 x i16> %va, %vb
358  ret <vscale x 2 x i16> %vc
359}
360
361define <vscale x 2 x i16> @vsra_vv_nxv2i16_sext_zext(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
362; CHECK-LABEL: vsra_vv_nxv2i16_sext_zext:
363; CHECK:       # %bb.0:
364; CHECK-NEXT:    li a0, 15
365; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
366; CHECK-NEXT:    vmin.vx v9, v8, a0
367; CHECK-NEXT:    vsra.vv v8, v8, v9
368; CHECK-NEXT:    ret
369  %sexted_va = sext <vscale x 2 x i16> %va to <vscale x 2 x i32>
370  %zexted_vb = zext <vscale x 2 x i16> %va to <vscale x 2 x i32>
371  %expand = ashr <vscale x 2 x i32> %sexted_va, %zexted_vb
372  %vc = trunc <vscale x 2 x i32> %expand to <vscale x 2 x i16>
373  ret <vscale x 2 x i16> %vc
374}
375
376define <vscale x 2 x i16> @vsra_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
377; CHECK-LABEL: vsra_vx_nxv2i16:
378; CHECK:       # %bb.0:
379; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
380; CHECK-NEXT:    vsra.vx v8, v8, a0
381; CHECK-NEXT:    ret
382  %head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
383  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
384  %vc = ashr <vscale x 2 x i16> %va, %splat
385  ret <vscale x 2 x i16> %vc
386}
387
388define <vscale x 2 x i16> @vsra_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
389; CHECK-LABEL: vsra_vi_nxv2i16_0:
390; CHECK:       # %bb.0:
391; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
392; CHECK-NEXT:    vsra.vi v8, v8, 6
393; CHECK-NEXT:    ret
394  %vc = ashr <vscale x 2 x i16> %va, splat (i16 6)
395  ret <vscale x 2 x i16> %vc
396}
397
398define <vscale x 4 x i16> @vsra_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
399; CHECK-LABEL: vsra_vv_nxv4i16:
400; CHECK:       # %bb.0:
401; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
402; CHECK-NEXT:    vsra.vv v8, v8, v9
403; CHECK-NEXT:    ret
404  %vc = ashr <vscale x 4 x i16> %va, %vb
405  ret <vscale x 4 x i16> %vc
406}
407
408define <vscale x 4 x i16> @vsra_vv_nxv4i16_sext_zext(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
409; CHECK-LABEL: vsra_vv_nxv4i16_sext_zext:
410; CHECK:       # %bb.0:
411; CHECK-NEXT:    li a0, 15
412; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
413; CHECK-NEXT:    vmin.vx v9, v8, a0
414; CHECK-NEXT:    vsra.vv v8, v8, v9
415; CHECK-NEXT:    ret
416  %sexted_va = sext <vscale x 4 x i16> %va to <vscale x 4 x i32>
417  %zexted_vb = zext <vscale x 4 x i16> %va to <vscale x 4 x i32>
418  %expand = ashr <vscale x 4 x i32> %sexted_va, %zexted_vb
419  %vc = trunc <vscale x 4 x i32> %expand to <vscale x 4 x i16>
420  ret <vscale x 4 x i16> %vc
421}
422
423define <vscale x 4 x i16> @vsra_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
424; CHECK-LABEL: vsra_vx_nxv4i16:
425; CHECK:       # %bb.0:
426; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
427; CHECK-NEXT:    vsra.vx v8, v8, a0
428; CHECK-NEXT:    ret
429  %head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
430  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
431  %vc = ashr <vscale x 4 x i16> %va, %splat
432  ret <vscale x 4 x i16> %vc
433}
434
435define <vscale x 4 x i16> @vsra_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
436; CHECK-LABEL: vsra_vi_nxv4i16_0:
437; CHECK:       # %bb.0:
438; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
439; CHECK-NEXT:    vsra.vi v8, v8, 6
440; CHECK-NEXT:    ret
441  %vc = ashr <vscale x 4 x i16> %va, splat (i16 6)
442  ret <vscale x 4 x i16> %vc
443}
444
445define <vscale x 8 x i16> @vsra_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
446; CHECK-LABEL: vsra_vv_nxv8i16:
447; CHECK:       # %bb.0:
448; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
449; CHECK-NEXT:    vsra.vv v8, v8, v10
450; CHECK-NEXT:    ret
451  %vc = ashr <vscale x 8 x i16> %va, %vb
452  ret <vscale x 8 x i16> %vc
453}
454
455define <vscale x 8 x i16> @vsra_vv_nxv8i16_sext_zext(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
456; CHECK-LABEL: vsra_vv_nxv8i16_sext_zext:
457; CHECK:       # %bb.0:
458; CHECK-NEXT:    li a0, 15
459; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
460; CHECK-NEXT:    vmin.vx v10, v8, a0
461; CHECK-NEXT:    vsra.vv v8, v8, v10
462; CHECK-NEXT:    ret
463  %sexted_va = sext <vscale x 8 x i16> %va to <vscale x 8 x i32>
464  %zexted_vb = zext <vscale x 8 x i16> %va to <vscale x 8 x i32>
465  %expand = ashr <vscale x 8 x i32> %sexted_va, %zexted_vb
466  %vc = trunc <vscale x 8 x i32> %expand to <vscale x 8 x i16>
467  ret <vscale x 8 x i16> %vc
468}
469
470define <vscale x 8 x i16> @vsra_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
471; CHECK-LABEL: vsra_vx_nxv8i16:
472; CHECK:       # %bb.0:
473; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
474; CHECK-NEXT:    vsra.vx v8, v8, a0
475; CHECK-NEXT:    ret
476  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
477  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
478  %vc = ashr <vscale x 8 x i16> %va, %splat
479  ret <vscale x 8 x i16> %vc
480}
481
482define <vscale x 8 x i16> @vsra_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
483; CHECK-LABEL: vsra_vi_nxv8i16_0:
484; CHECK:       # %bb.0:
485; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
486; CHECK-NEXT:    vsra.vi v8, v8, 6
487; CHECK-NEXT:    ret
488  %vc = ashr <vscale x 8 x i16> %va, splat (i16 6)
489  ret <vscale x 8 x i16> %vc
490}
491
492define <vscale x 16 x i16> @vsra_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
493; CHECK-LABEL: vsra_vv_nxv16i16:
494; CHECK:       # %bb.0:
495; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
496; CHECK-NEXT:    vsra.vv v8, v8, v12
497; CHECK-NEXT:    ret
498  %vc = ashr <vscale x 16 x i16> %va, %vb
499  ret <vscale x 16 x i16> %vc
500}
501
502define <vscale x 16 x i16> @vsra_vv_nxv16i16_sext_zext(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
503; CHECK-LABEL: vsra_vv_nxv16i16_sext_zext:
504; CHECK:       # %bb.0:
505; CHECK-NEXT:    li a0, 15
506; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
507; CHECK-NEXT:    vmin.vx v12, v8, a0
508; CHECK-NEXT:    vsra.vv v8, v8, v12
509; CHECK-NEXT:    ret
510  %sexted_va = sext <vscale x 16 x i16> %va to <vscale x 16 x i32>
511  %zexted_vb = zext <vscale x 16 x i16> %va to <vscale x 16 x i32>
512  %expand = ashr <vscale x 16 x i32> %sexted_va, %zexted_vb
513  %vc = trunc <vscale x 16 x i32> %expand to <vscale x 16 x i16>
514  ret <vscale x 16 x i16> %vc
515}
516
517define <vscale x 16 x i16> @vsra_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
518; CHECK-LABEL: vsra_vx_nxv16i16:
519; CHECK:       # %bb.0:
520; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
521; CHECK-NEXT:    vsra.vx v8, v8, a0
522; CHECK-NEXT:    ret
523  %head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
524  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
525  %vc = ashr <vscale x 16 x i16> %va, %splat
526  ret <vscale x 16 x i16> %vc
527}
528
529define <vscale x 16 x i16> @vsra_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
530; CHECK-LABEL: vsra_vi_nxv16i16_0:
531; CHECK:       # %bb.0:
532; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
533; CHECK-NEXT:    vsra.vi v8, v8, 6
534; CHECK-NEXT:    ret
535  %vc = ashr <vscale x 16 x i16> %va, splat (i16 6)
536  ret <vscale x 16 x i16> %vc
537}
538
539define <vscale x 32 x i16> @vsra_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) {
540; CHECK-LABEL: vsra_vv_nxv32i16:
541; CHECK:       # %bb.0:
542; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
543; CHECK-NEXT:    vsra.vv v8, v8, v16
544; CHECK-NEXT:    ret
545  %vc = ashr <vscale x 32 x i16> %va, %vb
546  ret <vscale x 32 x i16> %vc
547}
548
549define <vscale x 32 x i16> @vsra_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
550; CHECK-LABEL: vsra_vx_nxv32i16:
551; CHECK:       # %bb.0:
552; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
553; CHECK-NEXT:    vsra.vx v8, v8, a0
554; CHECK-NEXT:    ret
555  %head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
556  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
557  %vc = ashr <vscale x 32 x i16> %va, %splat
558  ret <vscale x 32 x i16> %vc
559}
560
561define <vscale x 32 x i16> @vsra_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
562; CHECK-LABEL: vsra_vi_nxv32i16_0:
563; CHECK:       # %bb.0:
564; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
565; CHECK-NEXT:    vsra.vi v8, v8, 6
566; CHECK-NEXT:    ret
567  %vc = ashr <vscale x 32 x i16> %va, splat (i16 6)
568  ret <vscale x 32 x i16> %vc
569}
570
571define <vscale x 1 x i32> @vsra_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
572; CHECK-LABEL: vsra_vv_nxv1i32:
573; CHECK:       # %bb.0:
574; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
575; CHECK-NEXT:    vsra.vv v8, v8, v9
576; CHECK-NEXT:    ret
577  %vc = ashr <vscale x 1 x i32> %va, %vb
578  ret <vscale x 1 x i32> %vc
579}
580
581define <vscale x 1 x i32> @vsra_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %b) {
582; CHECK-LABEL: vsra_vx_nxv1i32:
583; CHECK:       # %bb.0:
584; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
585; CHECK-NEXT:    vsra.vx v8, v8, a0
586; CHECK-NEXT:    ret
587  %head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
588  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
589  %vc = ashr <vscale x 1 x i32> %va, %splat
590  ret <vscale x 1 x i32> %vc
591}
592
593define <vscale x 1 x i32> @vsra_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
594; CHECK-LABEL: vsra_vi_nxv1i32_0:
595; CHECK:       # %bb.0:
596; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
597; CHECK-NEXT:    vsra.vi v8, v8, 31
598; CHECK-NEXT:    ret
599  %vc = ashr <vscale x 1 x i32> %va, splat (i32 31)
600  ret <vscale x 1 x i32> %vc
601}
602
603define <vscale x 2 x i32> @vsra_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
604; CHECK-LABEL: vsra_vv_nxv2i32:
605; CHECK:       # %bb.0:
606; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
607; CHECK-NEXT:    vsra.vv v8, v8, v9
608; CHECK-NEXT:    ret
609  %vc = ashr <vscale x 2 x i32> %va, %vb
610  ret <vscale x 2 x i32> %vc
611}
612
613define <vscale x 2 x i32> @vsra_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %b) {
614; CHECK-LABEL: vsra_vx_nxv2i32:
615; CHECK:       # %bb.0:
616; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
617; CHECK-NEXT:    vsra.vx v8, v8, a0
618; CHECK-NEXT:    ret
619  %head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
620  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
621  %vc = ashr <vscale x 2 x i32> %va, %splat
622  ret <vscale x 2 x i32> %vc
623}
624
625define <vscale x 2 x i32> @vsra_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
626; CHECK-LABEL: vsra_vi_nxv2i32_0:
627; CHECK:       # %bb.0:
628; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
629; CHECK-NEXT:    vsra.vi v8, v8, 31
630; CHECK-NEXT:    ret
631  %vc = ashr <vscale x 2 x i32> %va, splat (i32 31)
632  ret <vscale x 2 x i32> %vc
633}
634
635define <vscale x 4 x i32> @vsra_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
636; CHECK-LABEL: vsra_vv_nxv4i32:
637; CHECK:       # %bb.0:
638; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
639; CHECK-NEXT:    vsra.vv v8, v8, v10
640; CHECK-NEXT:    ret
641  %vc = ashr <vscale x 4 x i32> %va, %vb
642  ret <vscale x 4 x i32> %vc
643}
644
645define <vscale x 4 x i32> @vsra_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %b) {
646; CHECK-LABEL: vsra_vx_nxv4i32:
647; CHECK:       # %bb.0:
648; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
649; CHECK-NEXT:    vsra.vx v8, v8, a0
650; CHECK-NEXT:    ret
651  %head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
652  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
653  %vc = ashr <vscale x 4 x i32> %va, %splat
654  ret <vscale x 4 x i32> %vc
655}
656
657define <vscale x 4 x i32> @vsra_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
658; CHECK-LABEL: vsra_vi_nxv4i32_0:
659; CHECK:       # %bb.0:
660; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
661; CHECK-NEXT:    vsra.vi v8, v8, 31
662; CHECK-NEXT:    ret
663  %vc = ashr <vscale x 4 x i32> %va, splat (i32 31)
664  ret <vscale x 4 x i32> %vc
665}
666
667define <vscale x 8 x i32> @vsra_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
668; CHECK-LABEL: vsra_vv_nxv8i32:
669; CHECK:       # %bb.0:
670; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
671; CHECK-NEXT:    vsra.vv v8, v8, v12
672; CHECK-NEXT:    ret
673  %vc = ashr <vscale x 8 x i32> %va, %vb
674  ret <vscale x 8 x i32> %vc
675}
676
677define <vscale x 8 x i32> @vsra_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b) {
678; CHECK-LABEL: vsra_vx_nxv8i32:
679; CHECK:       # %bb.0:
680; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
681; CHECK-NEXT:    vsra.vx v8, v8, a0
682; CHECK-NEXT:    ret
683  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
684  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
685  %vc = ashr <vscale x 8 x i32> %va, %splat
686  ret <vscale x 8 x i32> %vc
687}
688
689define <vscale x 8 x i32> @vsra_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
690; CHECK-LABEL: vsra_vi_nxv8i32_0:
691; CHECK:       # %bb.0:
692; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
693; CHECK-NEXT:    vsra.vi v8, v8, 31
694; CHECK-NEXT:    ret
695  %vc = ashr <vscale x 8 x i32> %va, splat (i32 31)
696  ret <vscale x 8 x i32> %vc
697}
698
699define <vscale x 16 x i32> @vsra_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) {
700; CHECK-LABEL: vsra_vv_nxv16i32:
701; CHECK:       # %bb.0:
702; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
703; CHECK-NEXT:    vsra.vv v8, v8, v16
704; CHECK-NEXT:    ret
705  %vc = ashr <vscale x 16 x i32> %va, %vb
706  ret <vscale x 16 x i32> %vc
707}
708
709define <vscale x 16 x i32> @vsra_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signext %b) {
710; CHECK-LABEL: vsra_vx_nxv16i32:
711; CHECK:       # %bb.0:
712; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
713; CHECK-NEXT:    vsra.vx v8, v8, a0
714; CHECK-NEXT:    ret
715  %head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
716  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
717  %vc = ashr <vscale x 16 x i32> %va, %splat
718  ret <vscale x 16 x i32> %vc
719}
720
721define <vscale x 16 x i32> @vsra_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
722; CHECK-LABEL: vsra_vi_nxv16i32_0:
723; CHECK:       # %bb.0:
724; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
725; CHECK-NEXT:    vsra.vi v8, v8, 31
726; CHECK-NEXT:    ret
727  %vc = ashr <vscale x 16 x i32> %va, splat (i32 31)
728  ret <vscale x 16 x i32> %vc
729}
730
731define <vscale x 1 x i64> @vsra_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
732; CHECK-LABEL: vsra_vv_nxv1i64:
733; CHECK:       # %bb.0:
734; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
735; CHECK-NEXT:    vsra.vv v8, v8, v9
736; CHECK-NEXT:    ret
737  %vc = ashr <vscale x 1 x i64> %va, %vb
738  ret <vscale x 1 x i64> %vc
739}
740
741define <vscale x 1 x i64> @vsra_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
742; CHECK-LABEL: vsra_vx_nxv1i64:
743; CHECK:       # %bb.0:
744; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
745; CHECK-NEXT:    vsra.vx v8, v8, a0
746; CHECK-NEXT:    ret
747  %head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
748  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
749  %vc = ashr <vscale x 1 x i64> %va, %splat
750  ret <vscale x 1 x i64> %vc
751}
752
753define <vscale x 1 x i64> @vsra_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
754; CHECK-LABEL: vsra_vi_nxv1i64_0:
755; CHECK:       # %bb.0:
756; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
757; CHECK-NEXT:    vsra.vi v8, v8, 31
758; CHECK-NEXT:    ret
759  %vc = ashr <vscale x 1 x i64> %va, splat (i64 31)
760  ret <vscale x 1 x i64> %vc
761}
762
763define <vscale x 1 x i64> @vsra_vi_nxv1i64_1(<vscale x 1 x i64> %va) {
764; CHECK-LABEL: vsra_vi_nxv1i64_1:
765; CHECK:       # %bb.0:
766; CHECK-NEXT:    li a0, 32
767; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
768; CHECK-NEXT:    vsra.vx v8, v8, a0
769; CHECK-NEXT:    ret
770  %vc = ashr <vscale x 1 x i64> %va, splat (i64 32)
771  ret <vscale x 1 x i64> %vc
772}
773
774define <vscale x 2 x i64> @vsra_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
775; CHECK-LABEL: vsra_vv_nxv2i64:
776; CHECK:       # %bb.0:
777; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
778; CHECK-NEXT:    vsra.vv v8, v8, v10
779; CHECK-NEXT:    ret
780  %vc = ashr <vscale x 2 x i64> %va, %vb
781  ret <vscale x 2 x i64> %vc
782}
783
784define <vscale x 2 x i64> @vsra_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
785; CHECK-LABEL: vsra_vx_nxv2i64:
786; CHECK:       # %bb.0:
787; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
788; CHECK-NEXT:    vsra.vx v8, v8, a0
789; CHECK-NEXT:    ret
790  %head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
791  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
792  %vc = ashr <vscale x 2 x i64> %va, %splat
793  ret <vscale x 2 x i64> %vc
794}
795
796define <vscale x 2 x i64> @vsra_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
797; CHECK-LABEL: vsra_vi_nxv2i64_0:
798; CHECK:       # %bb.0:
799; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
800; CHECK-NEXT:    vsra.vi v8, v8, 31
801; CHECK-NEXT:    ret
802  %vc = ashr <vscale x 2 x i64> %va, splat (i64 31)
803  ret <vscale x 2 x i64> %vc
804}
805
806define <vscale x 2 x i64> @vsra_vi_nxv2i64_1(<vscale x 2 x i64> %va) {
807; CHECK-LABEL: vsra_vi_nxv2i64_1:
808; CHECK:       # %bb.0:
809; CHECK-NEXT:    li a0, 32
810; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
811; CHECK-NEXT:    vsra.vx v8, v8, a0
812; CHECK-NEXT:    ret
813  %vc = ashr <vscale x 2 x i64> %va, splat (i64 32)
814  ret <vscale x 2 x i64> %vc
815}
816
817define <vscale x 4 x i64> @vsra_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
818; CHECK-LABEL: vsra_vv_nxv4i64:
819; CHECK:       # %bb.0:
820; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
821; CHECK-NEXT:    vsra.vv v8, v8, v12
822; CHECK-NEXT:    ret
823  %vc = ashr <vscale x 4 x i64> %va, %vb
824  ret <vscale x 4 x i64> %vc
825}
826
827define <vscale x 4 x i64> @vsra_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
828; CHECK-LABEL: vsra_vx_nxv4i64:
829; CHECK:       # %bb.0:
830; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
831; CHECK-NEXT:    vsra.vx v8, v8, a0
832; CHECK-NEXT:    ret
833  %head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
834  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
835  %vc = ashr <vscale x 4 x i64> %va, %splat
836  ret <vscale x 4 x i64> %vc
837}
838
839define <vscale x 4 x i64> @vsra_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
840; CHECK-LABEL: vsra_vi_nxv4i64_0:
841; CHECK:       # %bb.0:
842; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
843; CHECK-NEXT:    vsra.vi v8, v8, 31
844; CHECK-NEXT:    ret
845  %vc = ashr <vscale x 4 x i64> %va, splat (i64 31)
846  ret <vscale x 4 x i64> %vc
847}
848
849define <vscale x 4 x i64> @vsra_vi_nxv4i64_1(<vscale x 4 x i64> %va) {
850; CHECK-LABEL: vsra_vi_nxv4i64_1:
851; CHECK:       # %bb.0:
852; CHECK-NEXT:    li a0, 32
853; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
854; CHECK-NEXT:    vsra.vx v8, v8, a0
855; CHECK-NEXT:    ret
856  %vc = ashr <vscale x 4 x i64> %va, splat (i64 32)
857  ret <vscale x 4 x i64> %vc
858}
859
860define <vscale x 8 x i64> @vsra_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
861; CHECK-LABEL: vsra_vv_nxv8i64:
862; CHECK:       # %bb.0:
863; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
864; CHECK-NEXT:    vsra.vv v8, v8, v16
865; CHECK-NEXT:    ret
866  %vc = ashr <vscale x 8 x i64> %va, %vb
867  ret <vscale x 8 x i64> %vc
868}
869
870define <vscale x 8 x i64> @vsra_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
871; CHECK-LABEL: vsra_vx_nxv8i64:
872; CHECK:       # %bb.0:
873; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
874; CHECK-NEXT:    vsra.vx v8, v8, a0
875; CHECK-NEXT:    ret
876  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
877  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
878  %vc = ashr <vscale x 8 x i64> %va, %splat
879  ret <vscale x 8 x i64> %vc
880}
881
882define <vscale x 8 x i64> @vsra_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
883; CHECK-LABEL: vsra_vi_nxv8i64_0:
884; CHECK:       # %bb.0:
885; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
886; CHECK-NEXT:    vsra.vi v8, v8, 31
887; CHECK-NEXT:    ret
888  %vc = ashr <vscale x 8 x i64> %va, splat (i64 31)
889  ret <vscale x 8 x i64> %vc
890}
891
892define <vscale x 8 x i64> @vsra_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
893; CHECK-LABEL: vsra_vi_nxv8i64_1:
894; CHECK:       # %bb.0:
895; CHECK-NEXT:    li a0, 32
896; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
897; CHECK-NEXT:    vsra.vx v8, v8, a0
898; CHECK-NEXT:    ret
899  %vc = ashr <vscale x 8 x i64> %va, splat (i64 32)
900  ret <vscale x 8 x i64> %vc
901}
902
903define <vscale x 8 x i32> @vsra_vv_mask_nxv4i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %mask) {
904; CHECK-LABEL: vsra_vv_mask_nxv4i32:
905; CHECK:       # %bb.0:
906; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
907; CHECK-NEXT:    vsra.vv v8, v8, v12, v0.t
908; CHECK-NEXT:    ret
909  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> zeroinitializer
910  %vc = ashr <vscale x 8 x i32> %va, %vs
911  ret <vscale x 8 x i32> %vc
912}
913
914define <vscale x 8 x i32> @vsra_vx_mask_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b, <vscale x 8 x i1> %mask) {
915; CHECK-LABEL: vsra_vx_mask_nxv8i32:
916; CHECK:       # %bb.0:
917; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
918; CHECK-NEXT:    vsra.vx v8, v8, a0, v0.t
919; CHECK-NEXT:    ret
920  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
921  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
922  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> zeroinitializer
923  %vc = ashr <vscale x 8 x i32> %va, %vs
924  ret <vscale x 8 x i32> %vc
925}
926
927define <vscale x 8 x i32> @vsra_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %mask) {
928; CHECK-LABEL: vsra_vi_mask_nxv8i32:
929; CHECK:       # %bb.0:
930; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
931; CHECK-NEXT:    vsra.vi v8, v8, 31, v0.t
932; CHECK-NEXT:    ret
933  %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 31), <vscale x 8 x i32> zeroinitializer
934  %vc = ashr <vscale x 8 x i32> %va, %vs
935  ret <vscale x 8 x i32> %vc
936}
937
938; Negative test. We shouldn't look through the vp.trunc as it isn't vlmax like
939; the rest of the code.
940define <vscale x 1 x i8> @vsra_vv_nxv1i8_sext_zext_mixed_trunc(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
941; CHECK-LABEL: vsra_vv_nxv1i8_sext_zext_mixed_trunc:
942; CHECK:       # %bb.0:
943; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
944; CHECK-NEXT:    vsext.vf4 v9, v8
945; CHECK-NEXT:    vzext.vf4 v10, v8
946; CHECK-NEXT:    vsra.vv v8, v9, v10
947; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
948; CHECK-NEXT:    vnsrl.wi v8, v8, 0
949; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
950; CHECK-NEXT:    vnsrl.wi v8, v8, 0, v0.t
951; CHECK-NEXT:    ret
952  %sexted_va = sext <vscale x 1 x i8> %va to <vscale x 1 x i32>
953  %zexted_vb = zext <vscale x 1 x i8> %va to <vscale x 1 x i32>
954  %expand = ashr <vscale x 1 x i32> %sexted_va, %zexted_vb
955  %vc = trunc <vscale x 1 x i32> %expand to <vscale x 1 x i16>
956  %vd = call <vscale x 1 x i8> @llvm.vp.trunc.nxv1i8.nxvi16(<vscale x 1 x i16> %vc, <vscale x 1 x i1> %m, i32 %evl)
957  ret <vscale x 1 x i8> %vd
958}
959declare <vscale x 1 x i8> @llvm.vp.trunc.nxv1i8.nxvi16(<vscale x 1 x i16>, <vscale x 1 x i1>, i32)
960