xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll (revision 1c3a3f0e79a9c6a7c1c4a71c43a9eab783c3b266)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <8 x i7> @llvm.vp.ashr.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
8
9define <8 x i7> @vsra_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
10; CHECK-LABEL: vsra_vv_v8i7:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    li a1, 127
13; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
14; CHECK-NEXT:    vand.vx v9, v9, a1, v0.t
15; CHECK-NEXT:    vsll.vi v8, v8, 1, v0.t
16; CHECK-NEXT:    vsra.vi v8, v8, 1, v0.t
17; CHECK-NEXT:    vsra.vv v8, v8, v9, v0.t
18; CHECK-NEXT:    ret
19  %v = call <8 x i7> @llvm.vp.ashr.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
20  ret <8 x i7> %v
21}
22
23declare <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
24
25define <2 x i8> @vsra_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
26; CHECK-LABEL: vsra_vv_v2i8:
27; CHECK:       # %bb.0:
28; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
29; CHECK-NEXT:    vsra.vv v8, v8, v9, v0.t
30; CHECK-NEXT:    ret
31  %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
32  ret <2 x i8> %v
33}
34
35define <2 x i8> @vsra_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) {
36; CHECK-LABEL: vsra_vv_v2i8_unmasked:
37; CHECK:       # %bb.0:
38; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
39; CHECK-NEXT:    vsra.vv v8, v8, v9
40; CHECK-NEXT:    ret
41  %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
42  ret <2 x i8> %v
43}
44
45define <2 x i8> @vsra_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
46; CHECK-LABEL: vsra_vx_v2i8:
47; CHECK:       # %bb.0:
48; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
49; CHECK-NEXT:    vsra.vx v8, v8, a0, v0.t
50; CHECK-NEXT:    ret
51  %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
52  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
53  %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
54  ret <2 x i8> %v
55}
56
57define <2 x i8> @vsra_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
58; CHECK-LABEL: vsra_vx_v2i8_unmasked:
59; CHECK:       # %bb.0:
60; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
61; CHECK-NEXT:    vsra.vx v8, v8, a0
62; CHECK-NEXT:    ret
63  %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
64  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
65  %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
66  ret <2 x i8> %v
67}
68
69define <2 x i8> @vsra_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
70; CHECK-LABEL: vsra_vi_v2i8:
71; CHECK:       # %bb.0:
72; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
73; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
74; CHECK-NEXT:    ret
75  %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> splat (i8 5), <2 x i1> %m, i32 %evl)
76  ret <2 x i8> %v
77}
78
79define <2 x i8> @vsra_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
80; CHECK-LABEL: vsra_vi_v2i8_unmasked:
81; CHECK:       # %bb.0:
82; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
83; CHECK-NEXT:    vsra.vi v8, v8, 5
84; CHECK-NEXT:    ret
85  %v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> splat (i8 5), <2 x i1> splat (i1 true), i32 %evl)
86  ret <2 x i8> %v
87}
88
89declare <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32)
90
91define <4 x i8> @vsra_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
92; CHECK-LABEL: vsra_vv_v4i8:
93; CHECK:       # %bb.0:
94; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
95; CHECK-NEXT:    vsra.vv v8, v8, v9, v0.t
96; CHECK-NEXT:    ret
97  %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
98  ret <4 x i8> %v
99}
100
101define <4 x i8> @vsra_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) {
102; CHECK-LABEL: vsra_vv_v4i8_unmasked:
103; CHECK:       # %bb.0:
104; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
105; CHECK-NEXT:    vsra.vv v8, v8, v9
106; CHECK-NEXT:    ret
107  %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
108  ret <4 x i8> %v
109}
110
111define <4 x i8> @vsra_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
112; CHECK-LABEL: vsra_vx_v4i8:
113; CHECK:       # %bb.0:
114; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
115; CHECK-NEXT:    vsra.vx v8, v8, a0, v0.t
116; CHECK-NEXT:    ret
117  %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
118  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
119  %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
120  ret <4 x i8> %v
121}
122
123define <4 x i8> @vsra_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
124; CHECK-LABEL: vsra_vx_v4i8_unmasked:
125; CHECK:       # %bb.0:
126; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
127; CHECK-NEXT:    vsra.vx v8, v8, a0
128; CHECK-NEXT:    ret
129  %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
130  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
131  %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
132  ret <4 x i8> %v
133}
134
135define <4 x i8> @vsra_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
136; CHECK-LABEL: vsra_vi_v4i8:
137; CHECK:       # %bb.0:
138; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
139; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
140; CHECK-NEXT:    ret
141  %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> splat (i8 5), <4 x i1> %m, i32 %evl)
142  ret <4 x i8> %v
143}
144
145define <4 x i8> @vsra_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
146; CHECK-LABEL: vsra_vi_v4i8_unmasked:
147; CHECK:       # %bb.0:
148; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
149; CHECK-NEXT:    vsra.vi v8, v8, 5
150; CHECK-NEXT:    ret
151  %v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> splat (i8 5), <4 x i1> splat (i1 true), i32 %evl)
152  ret <4 x i8> %v
153}
154
155declare <7 x i8> @llvm.vp.ashr.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32)
156
157define <7 x i8> @vsra_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) {
158; CHECK-LABEL: vsra_vv_v7i8:
159; CHECK:       # %bb.0:
160; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
161; CHECK-NEXT:    vsra.vv v8, v8, v9, v0.t
162; CHECK-NEXT:    ret
163  %v = call <7 x i8> @llvm.vp.ashr.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl)
164  ret <7 x i8> %v
165}
166
167declare <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
168
169define <8 x i8> @vsra_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
170; CHECK-LABEL: vsra_vv_v8i8:
171; CHECK:       # %bb.0:
172; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
173; CHECK-NEXT:    vsra.vv v8, v8, v9, v0.t
174; CHECK-NEXT:    ret
175  %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
176  ret <8 x i8> %v
177}
178
179define <8 x i8> @vsra_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) {
180; CHECK-LABEL: vsra_vv_v8i8_unmasked:
181; CHECK:       # %bb.0:
182; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
183; CHECK-NEXT:    vsra.vv v8, v8, v9
184; CHECK-NEXT:    ret
185  %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
186  ret <8 x i8> %v
187}
188
189define <8 x i8> @vsra_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
190; CHECK-LABEL: vsra_vx_v8i8:
191; CHECK:       # %bb.0:
192; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
193; CHECK-NEXT:    vsra.vx v8, v8, a0, v0.t
194; CHECK-NEXT:    ret
195  %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
196  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
197  %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
198  ret <8 x i8> %v
199}
200
201define <8 x i8> @vsra_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
202; CHECK-LABEL: vsra_vx_v8i8_unmasked:
203; CHECK:       # %bb.0:
204; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
205; CHECK-NEXT:    vsra.vx v8, v8, a0
206; CHECK-NEXT:    ret
207  %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
208  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
209  %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
210  ret <8 x i8> %v
211}
212
213define <8 x i8> @vsra_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
214; CHECK-LABEL: vsra_vi_v8i8:
215; CHECK:       # %bb.0:
216; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
217; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
218; CHECK-NEXT:    ret
219  %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> splat (i8 5), <8 x i1> %m, i32 %evl)
220  ret <8 x i8> %v
221}
222
223define <8 x i8> @vsra_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
224; CHECK-LABEL: vsra_vi_v8i8_unmasked:
225; CHECK:       # %bb.0:
226; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
227; CHECK-NEXT:    vsra.vi v8, v8, 5
228; CHECK-NEXT:    ret
229  %v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> splat (i8 5), <8 x i1> splat (i1 true), i32 %evl)
230  ret <8 x i8> %v
231}
232
233declare <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32)
234
235define <16 x i8> @vsra_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
236; CHECK-LABEL: vsra_vv_v16i8:
237; CHECK:       # %bb.0:
238; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
239; CHECK-NEXT:    vsra.vv v8, v8, v9, v0.t
240; CHECK-NEXT:    ret
241  %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
242  ret <16 x i8> %v
243}
244
245define <16 x i8> @vsra_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) {
246; CHECK-LABEL: vsra_vv_v16i8_unmasked:
247; CHECK:       # %bb.0:
248; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
249; CHECK-NEXT:    vsra.vv v8, v8, v9
250; CHECK-NEXT:    ret
251  %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
252  ret <16 x i8> %v
253}
254
255define <16 x i8> @vsra_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
256; CHECK-LABEL: vsra_vx_v16i8:
257; CHECK:       # %bb.0:
258; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
259; CHECK-NEXT:    vsra.vx v8, v8, a0, v0.t
260; CHECK-NEXT:    ret
261  %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
262  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
263  %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
264  ret <16 x i8> %v
265}
266
267define <16 x i8> @vsra_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) {
268; CHECK-LABEL: vsra_vx_v16i8_unmasked:
269; CHECK:       # %bb.0:
270; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
271; CHECK-NEXT:    vsra.vx v8, v8, a0
272; CHECK-NEXT:    ret
273  %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
274  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
275  %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
276  ret <16 x i8> %v
277}
278
279define <16 x i8> @vsra_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
280; CHECK-LABEL: vsra_vi_v16i8:
281; CHECK:       # %bb.0:
282; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
283; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
284; CHECK-NEXT:    ret
285  %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> splat (i8 5), <16 x i1> %m, i32 %evl)
286  ret <16 x i8> %v
287}
288
289define <16 x i8> @vsra_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
290; CHECK-LABEL: vsra_vi_v16i8_unmasked:
291; CHECK:       # %bb.0:
292; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
293; CHECK-NEXT:    vsra.vi v8, v8, 5
294; CHECK-NEXT:    ret
295  %v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> splat (i8 5), <16 x i1> splat (i1 true), i32 %evl)
296  ret <16 x i8> %v
297}
298
299declare <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32)
300
301define <2 x i16> @vsra_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
302; CHECK-LABEL: vsra_vv_v2i16:
303; CHECK:       # %bb.0:
304; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
305; CHECK-NEXT:    vsra.vv v8, v8, v9, v0.t
306; CHECK-NEXT:    ret
307  %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
308  ret <2 x i16> %v
309}
310
311define <2 x i16> @vsra_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) {
312; CHECK-LABEL: vsra_vv_v2i16_unmasked:
313; CHECK:       # %bb.0:
314; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
315; CHECK-NEXT:    vsra.vv v8, v8, v9
316; CHECK-NEXT:    ret
317  %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
318  ret <2 x i16> %v
319}
320
321define <2 x i16> @vsra_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
322; CHECK-LABEL: vsra_vx_v2i16:
323; CHECK:       # %bb.0:
324; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
325; CHECK-NEXT:    vsra.vx v8, v8, a0, v0.t
326; CHECK-NEXT:    ret
327  %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
328  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
329  %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
330  ret <2 x i16> %v
331}
332
333define <2 x i16> @vsra_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) {
334; CHECK-LABEL: vsra_vx_v2i16_unmasked:
335; CHECK:       # %bb.0:
336; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
337; CHECK-NEXT:    vsra.vx v8, v8, a0
338; CHECK-NEXT:    ret
339  %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
340  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
341  %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
342  ret <2 x i16> %v
343}
344
345define <2 x i16> @vsra_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
346; CHECK-LABEL: vsra_vi_v2i16:
347; CHECK:       # %bb.0:
348; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
349; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
350; CHECK-NEXT:    ret
351  %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> splat (i16 5), <2 x i1> %m, i32 %evl)
352  ret <2 x i16> %v
353}
354
355define <2 x i16> @vsra_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
356; CHECK-LABEL: vsra_vi_v2i16_unmasked:
357; CHECK:       # %bb.0:
358; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
359; CHECK-NEXT:    vsra.vi v8, v8, 5
360; CHECK-NEXT:    ret
361  %v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> splat (i16 5), <2 x i1> splat (i1 true), i32 %evl)
362  ret <2 x i16> %v
363}
364
365declare <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32)
366
367define <4 x i16> @vsra_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
368; CHECK-LABEL: vsra_vv_v4i16:
369; CHECK:       # %bb.0:
370; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
371; CHECK-NEXT:    vsra.vv v8, v8, v9, v0.t
372; CHECK-NEXT:    ret
373  %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
374  ret <4 x i16> %v
375}
376
377define <4 x i16> @vsra_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) {
378; CHECK-LABEL: vsra_vv_v4i16_unmasked:
379; CHECK:       # %bb.0:
380; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
381; CHECK-NEXT:    vsra.vv v8, v8, v9
382; CHECK-NEXT:    ret
383  %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
384  ret <4 x i16> %v
385}
386
387define <4 x i16> @vsra_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
388; CHECK-LABEL: vsra_vx_v4i16:
389; CHECK:       # %bb.0:
390; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
391; CHECK-NEXT:    vsra.vx v8, v8, a0, v0.t
392; CHECK-NEXT:    ret
393  %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
394  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
395  %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
396  ret <4 x i16> %v
397}
398
399define <4 x i16> @vsra_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) {
400; CHECK-LABEL: vsra_vx_v4i16_unmasked:
401; CHECK:       # %bb.0:
402; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
403; CHECK-NEXT:    vsra.vx v8, v8, a0
404; CHECK-NEXT:    ret
405  %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
406  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
407  %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
408  ret <4 x i16> %v
409}
410
411define <4 x i16> @vsra_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
412; CHECK-LABEL: vsra_vi_v4i16:
413; CHECK:       # %bb.0:
414; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
415; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
416; CHECK-NEXT:    ret
417  %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> splat (i16 5), <4 x i1> %m, i32 %evl)
418  ret <4 x i16> %v
419}
420
421define <4 x i16> @vsra_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
422; CHECK-LABEL: vsra_vi_v4i16_unmasked:
423; CHECK:       # %bb.0:
424; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
425; CHECK-NEXT:    vsra.vi v8, v8, 5
426; CHECK-NEXT:    ret
427  %v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> splat (i16 5), <4 x i1> splat (i1 true), i32 %evl)
428  ret <4 x i16> %v
429}
430
431declare <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32)
432
433define <8 x i16> @vsra_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
434; CHECK-LABEL: vsra_vv_v8i16:
435; CHECK:       # %bb.0:
436; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
437; CHECK-NEXT:    vsra.vv v8, v8, v9, v0.t
438; CHECK-NEXT:    ret
439  %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
440  ret <8 x i16> %v
441}
442
443define <8 x i16> @vsra_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) {
444; CHECK-LABEL: vsra_vv_v8i16_unmasked:
445; CHECK:       # %bb.0:
446; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
447; CHECK-NEXT:    vsra.vv v8, v8, v9
448; CHECK-NEXT:    ret
449  %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
450  ret <8 x i16> %v
451}
452
453define <8 x i16> @vsra_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
454; CHECK-LABEL: vsra_vx_v8i16:
455; CHECK:       # %bb.0:
456; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
457; CHECK-NEXT:    vsra.vx v8, v8, a0, v0.t
458; CHECK-NEXT:    ret
459  %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
460  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
461  %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
462  ret <8 x i16> %v
463}
464
465define <8 x i16> @vsra_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) {
466; CHECK-LABEL: vsra_vx_v8i16_unmasked:
467; CHECK:       # %bb.0:
468; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
469; CHECK-NEXT:    vsra.vx v8, v8, a0
470; CHECK-NEXT:    ret
471  %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
472  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
473  %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
474  ret <8 x i16> %v
475}
476
477define <8 x i16> @vsra_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
478; CHECK-LABEL: vsra_vi_v8i16:
479; CHECK:       # %bb.0:
480; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
481; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
482; CHECK-NEXT:    ret
483  %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> splat (i16 5), <8 x i1> %m, i32 %evl)
484  ret <8 x i16> %v
485}
486
487define <8 x i16> @vsra_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
488; CHECK-LABEL: vsra_vi_v8i16_unmasked:
489; CHECK:       # %bb.0:
490; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
491; CHECK-NEXT:    vsra.vi v8, v8, 5
492; CHECK-NEXT:    ret
493  %v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> splat (i16 5), <8 x i1> splat (i1 true), i32 %evl)
494  ret <8 x i16> %v
495}
496
497declare <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32)
498
499define <16 x i16> @vsra_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
500; CHECK-LABEL: vsra_vv_v16i16:
501; CHECK:       # %bb.0:
502; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
503; CHECK-NEXT:    vsra.vv v8, v8, v10, v0.t
504; CHECK-NEXT:    ret
505  %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
506  ret <16 x i16> %v
507}
508
509define <16 x i16> @vsra_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) {
510; CHECK-LABEL: vsra_vv_v16i16_unmasked:
511; CHECK:       # %bb.0:
512; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
513; CHECK-NEXT:    vsra.vv v8, v8, v10
514; CHECK-NEXT:    ret
515  %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
516  ret <16 x i16> %v
517}
518
519define <16 x i16> @vsra_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
520; CHECK-LABEL: vsra_vx_v16i16:
521; CHECK:       # %bb.0:
522; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
523; CHECK-NEXT:    vsra.vx v8, v8, a0, v0.t
524; CHECK-NEXT:    ret
525  %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
526  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
527  %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
528  ret <16 x i16> %v
529}
530
531define <16 x i16> @vsra_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) {
532; CHECK-LABEL: vsra_vx_v16i16_unmasked:
533; CHECK:       # %bb.0:
534; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
535; CHECK-NEXT:    vsra.vx v8, v8, a0
536; CHECK-NEXT:    ret
537  %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
538  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
539  %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
540  ret <16 x i16> %v
541}
542
543define <16 x i16> @vsra_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
544; CHECK-LABEL: vsra_vi_v16i16:
545; CHECK:       # %bb.0:
546; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
547; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
548; CHECK-NEXT:    ret
549  %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> splat (i16 5), <16 x i1> %m, i32 %evl)
550  ret <16 x i16> %v
551}
552
553define <16 x i16> @vsra_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
554; CHECK-LABEL: vsra_vi_v16i16_unmasked:
555; CHECK:       # %bb.0:
556; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
557; CHECK-NEXT:    vsra.vi v8, v8, 5
558; CHECK-NEXT:    ret
559  %v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> splat (i16 5), <16 x i1> splat (i1 true), i32 %evl)
560  ret <16 x i16> %v
561}
562
563declare <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32)
564
565define <2 x i32> @vsra_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
566; CHECK-LABEL: vsra_vv_v2i32:
567; CHECK:       # %bb.0:
568; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
569; CHECK-NEXT:    vsra.vv v8, v8, v9, v0.t
570; CHECK-NEXT:    ret
571  %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
572  ret <2 x i32> %v
573}
574
575define <2 x i32> @vsra_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) {
576; CHECK-LABEL: vsra_vv_v2i32_unmasked:
577; CHECK:       # %bb.0:
578; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
579; CHECK-NEXT:    vsra.vv v8, v8, v9
580; CHECK-NEXT:    ret
581  %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
582  ret <2 x i32> %v
583}
584
585define <2 x i32> @vsra_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
586; CHECK-LABEL: vsra_vx_v2i32:
587; CHECK:       # %bb.0:
588; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
589; CHECK-NEXT:    vsra.vx v8, v8, a0, v0.t
590; CHECK-NEXT:    ret
591  %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
592  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
593  %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
594  ret <2 x i32> %v
595}
596
597define <2 x i32> @vsra_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) {
598; CHECK-LABEL: vsra_vx_v2i32_unmasked:
599; CHECK:       # %bb.0:
600; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
601; CHECK-NEXT:    vsra.vx v8, v8, a0
602; CHECK-NEXT:    ret
603  %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
604  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
605  %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
606  ret <2 x i32> %v
607}
608
609define <2 x i32> @vsra_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
610; CHECK-LABEL: vsra_vi_v2i32:
611; CHECK:       # %bb.0:
612; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
613; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
614; CHECK-NEXT:    ret
615  %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> splat (i32 5), <2 x i1> %m, i32 %evl)
616  ret <2 x i32> %v
617}
618
619define <2 x i32> @vsra_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
620; CHECK-LABEL: vsra_vi_v2i32_unmasked:
621; CHECK:       # %bb.0:
622; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
623; CHECK-NEXT:    vsra.vi v8, v8, 5
624; CHECK-NEXT:    ret
625  %v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> splat (i32 5), <2 x i1> splat (i1 true), i32 %evl)
626  ret <2 x i32> %v
627}
628
629declare <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
630
631define <4 x i32> @vsra_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
632; CHECK-LABEL: vsra_vv_v4i32:
633; CHECK:       # %bb.0:
634; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
635; CHECK-NEXT:    vsra.vv v8, v8, v9, v0.t
636; CHECK-NEXT:    ret
637  %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
638  ret <4 x i32> %v
639}
640
641define <4 x i32> @vsra_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) {
642; CHECK-LABEL: vsra_vv_v4i32_unmasked:
643; CHECK:       # %bb.0:
644; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
645; CHECK-NEXT:    vsra.vv v8, v8, v9
646; CHECK-NEXT:    ret
647  %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
648  ret <4 x i32> %v
649}
650
651define <4 x i32> @vsra_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
652; CHECK-LABEL: vsra_vx_v4i32:
653; CHECK:       # %bb.0:
654; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
655; CHECK-NEXT:    vsra.vx v8, v8, a0, v0.t
656; CHECK-NEXT:    ret
657  %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
658  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
659  %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
660  ret <4 x i32> %v
661}
662
663define <4 x i32> @vsra_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) {
664; CHECK-LABEL: vsra_vx_v4i32_unmasked:
665; CHECK:       # %bb.0:
666; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
667; CHECK-NEXT:    vsra.vx v8, v8, a0
668; CHECK-NEXT:    ret
669  %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
670  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
671  %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
672  ret <4 x i32> %v
673}
674
675define <4 x i32> @vsra_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
676; CHECK-LABEL: vsra_vi_v4i32:
677; CHECK:       # %bb.0:
678; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
679; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
680; CHECK-NEXT:    ret
681  %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> splat (i32 5), <4 x i1> %m, i32 %evl)
682  ret <4 x i32> %v
683}
684
685define <4 x i32> @vsra_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
686; CHECK-LABEL: vsra_vi_v4i32_unmasked:
687; CHECK:       # %bb.0:
688; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
689; CHECK-NEXT:    vsra.vi v8, v8, 5
690; CHECK-NEXT:    ret
691  %v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> splat (i32 5), <4 x i1> splat (i1 true), i32 %evl)
692  ret <4 x i32> %v
693}
694
695declare <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
696
697define <8 x i32> @vsra_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
698; CHECK-LABEL: vsra_vv_v8i32:
699; CHECK:       # %bb.0:
700; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
701; CHECK-NEXT:    vsra.vv v8, v8, v10, v0.t
702; CHECK-NEXT:    ret
703  %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
704  ret <8 x i32> %v
705}
706
707define <8 x i32> @vsra_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) {
708; CHECK-LABEL: vsra_vv_v8i32_unmasked:
709; CHECK:       # %bb.0:
710; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
711; CHECK-NEXT:    vsra.vv v8, v8, v10
712; CHECK-NEXT:    ret
713  %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
714  ret <8 x i32> %v
715}
716
717define <8 x i32> @vsra_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
718; CHECK-LABEL: vsra_vx_v8i32:
719; CHECK:       # %bb.0:
720; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
721; CHECK-NEXT:    vsra.vx v8, v8, a0, v0.t
722; CHECK-NEXT:    ret
723  %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
724  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
725  %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
726  ret <8 x i32> %v
727}
728
729define <8 x i32> @vsra_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) {
730; CHECK-LABEL: vsra_vx_v8i32_unmasked:
731; CHECK:       # %bb.0:
732; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
733; CHECK-NEXT:    vsra.vx v8, v8, a0
734; CHECK-NEXT:    ret
735  %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
736  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
737  %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
738  ret <8 x i32> %v
739}
740
741define <8 x i32> @vsra_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
742; CHECK-LABEL: vsra_vi_v8i32:
743; CHECK:       # %bb.0:
744; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
745; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
746; CHECK-NEXT:    ret
747  %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> splat (i32 5), <8 x i1> %m, i32 %evl)
748  ret <8 x i32> %v
749}
750
751define <8 x i32> @vsra_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
752; CHECK-LABEL: vsra_vi_v8i32_unmasked:
753; CHECK:       # %bb.0:
754; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
755; CHECK-NEXT:    vsra.vi v8, v8, 5
756; CHECK-NEXT:    ret
757  %v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> splat (i32 5), <8 x i1> splat (i1 true), i32 %evl)
758  ret <8 x i32> %v
759}
760
761declare <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32)
762
763define <16 x i32> @vsra_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
764; CHECK-LABEL: vsra_vv_v16i32:
765; CHECK:       # %bb.0:
766; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
767; CHECK-NEXT:    vsra.vv v8, v8, v12, v0.t
768; CHECK-NEXT:    ret
769  %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
770  ret <16 x i32> %v
771}
772
773define <16 x i32> @vsra_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) {
774; CHECK-LABEL: vsra_vv_v16i32_unmasked:
775; CHECK:       # %bb.0:
776; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
777; CHECK-NEXT:    vsra.vv v8, v8, v12
778; CHECK-NEXT:    ret
779  %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
780  ret <16 x i32> %v
781}
782
783define <16 x i32> @vsra_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
784; CHECK-LABEL: vsra_vx_v16i32:
785; CHECK:       # %bb.0:
786; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
787; CHECK-NEXT:    vsra.vx v8, v8, a0, v0.t
788; CHECK-NEXT:    ret
789  %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
790  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
791  %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
792  ret <16 x i32> %v
793}
794
795define <16 x i32> @vsra_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) {
796; CHECK-LABEL: vsra_vx_v16i32_unmasked:
797; CHECK:       # %bb.0:
798; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
799; CHECK-NEXT:    vsra.vx v8, v8, a0
800; CHECK-NEXT:    ret
801  %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
802  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
803  %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
804  ret <16 x i32> %v
805}
806
807define <16 x i32> @vsra_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
808; CHECK-LABEL: vsra_vi_v16i32:
809; CHECK:       # %bb.0:
810; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
811; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
812; CHECK-NEXT:    ret
813  %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> splat (i32 5), <16 x i1> %m, i32 %evl)
814  ret <16 x i32> %v
815}
816
817define <16 x i32> @vsra_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
818; CHECK-LABEL: vsra_vi_v16i32_unmasked:
819; CHECK:       # %bb.0:
820; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
821; CHECK-NEXT:    vsra.vi v8, v8, 5
822; CHECK-NEXT:    ret
823  %v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> splat (i32 5), <16 x i1> splat (i1 true), i32 %evl)
824  ret <16 x i32> %v
825}
826
827declare <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32)
828
829define <2 x i64> @vsra_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
830; CHECK-LABEL: vsra_vv_v2i64:
831; CHECK:       # %bb.0:
832; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
833; CHECK-NEXT:    vsra.vv v8, v8, v9, v0.t
834; CHECK-NEXT:    ret
835  %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
836  ret <2 x i64> %v
837}
838
839define <2 x i64> @vsra_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) {
840; CHECK-LABEL: vsra_vv_v2i64_unmasked:
841; CHECK:       # %bb.0:
842; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
843; CHECK-NEXT:    vsra.vv v8, v8, v9
844; CHECK-NEXT:    ret
845  %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
846  ret <2 x i64> %v
847}
848
849define <2 x i64> @vsra_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) {
850; RV32-LABEL: vsra_vx_v2i64:
851; RV32:       # %bb.0:
852; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
853; RV32-NEXT:    vsra.vx v8, v8, a0, v0.t
854; RV32-NEXT:    ret
855;
856; RV64-LABEL: vsra_vx_v2i64:
857; RV64:       # %bb.0:
858; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
859; RV64-NEXT:    vsra.vx v8, v8, a0, v0.t
860; RV64-NEXT:    ret
861  %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
862  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
863  %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
864  ret <2 x i64> %v
865}
866
867define <2 x i64> @vsra_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl) {
868; RV32-LABEL: vsra_vx_v2i64_unmasked:
869; RV32:       # %bb.0:
870; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
871; RV32-NEXT:    vsra.vx v8, v8, a0
872; RV32-NEXT:    ret
873;
874; RV64-LABEL: vsra_vx_v2i64_unmasked:
875; RV64:       # %bb.0:
876; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
877; RV64-NEXT:    vsra.vx v8, v8, a0
878; RV64-NEXT:    ret
879  %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
880  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
881  %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
882  ret <2 x i64> %v
883}
884
885define <2 x i64> @vsra_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
886; CHECK-LABEL: vsra_vi_v2i64:
887; CHECK:       # %bb.0:
888; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
889; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
890; CHECK-NEXT:    ret
891  %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> splat (i64 5), <2 x i1> %m, i32 %evl)
892  ret <2 x i64> %v
893}
894
895define <2 x i64> @vsra_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
896; CHECK-LABEL: vsra_vi_v2i64_unmasked:
897; CHECK:       # %bb.0:
898; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
899; CHECK-NEXT:    vsra.vi v8, v8, 5
900; CHECK-NEXT:    ret
901  %v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> splat (i64 5), <2 x i1> splat (i1 true), i32 %evl)
902  ret <2 x i64> %v
903}
904
905declare <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32)
906
907define <4 x i64> @vsra_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
908; CHECK-LABEL: vsra_vv_v4i64:
909; CHECK:       # %bb.0:
910; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
911; CHECK-NEXT:    vsra.vv v8, v8, v10, v0.t
912; CHECK-NEXT:    ret
913  %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
914  ret <4 x i64> %v
915}
916
917define <4 x i64> @vsra_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) {
918; CHECK-LABEL: vsra_vv_v4i64_unmasked:
919; CHECK:       # %bb.0:
920; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
921; CHECK-NEXT:    vsra.vv v8, v8, v10
922; CHECK-NEXT:    ret
923  %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
924  ret <4 x i64> %v
925}
926
927define <4 x i64> @vsra_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) {
928; RV32-LABEL: vsra_vx_v4i64:
929; RV32:       # %bb.0:
930; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
931; RV32-NEXT:    vsra.vx v8, v8, a0, v0.t
932; RV32-NEXT:    ret
933;
934; RV64-LABEL: vsra_vx_v4i64:
935; RV64:       # %bb.0:
936; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
937; RV64-NEXT:    vsra.vx v8, v8, a0, v0.t
938; RV64-NEXT:    ret
939  %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
940  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
941  %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
942  ret <4 x i64> %v
943}
944
945define <4 x i64> @vsra_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl) {
946; RV32-LABEL: vsra_vx_v4i64_unmasked:
947; RV32:       # %bb.0:
948; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
949; RV32-NEXT:    vsra.vx v8, v8, a0
950; RV32-NEXT:    ret
951;
952; RV64-LABEL: vsra_vx_v4i64_unmasked:
953; RV64:       # %bb.0:
954; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
955; RV64-NEXT:    vsra.vx v8, v8, a0
956; RV64-NEXT:    ret
957  %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
958  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
959  %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
960  ret <4 x i64> %v
961}
962
963define <4 x i64> @vsra_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
964; CHECK-LABEL: vsra_vi_v4i64:
965; CHECK:       # %bb.0:
966; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
967; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
968; CHECK-NEXT:    ret
969  %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> splat (i64 5), <4 x i1> %m, i32 %evl)
970  ret <4 x i64> %v
971}
972
973define <4 x i64> @vsra_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
974; CHECK-LABEL: vsra_vi_v4i64_unmasked:
975; CHECK:       # %bb.0:
976; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
977; CHECK-NEXT:    vsra.vi v8, v8, 5
978; CHECK-NEXT:    ret
979  %v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> splat (i64 5), <4 x i1> splat (i1 true), i32 %evl)
980  ret <4 x i64> %v
981}
982
983declare <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32)
984
985define <8 x i64> @vsra_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
986; CHECK-LABEL: vsra_vv_v8i64:
987; CHECK:       # %bb.0:
988; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
989; CHECK-NEXT:    vsra.vv v8, v8, v12, v0.t
990; CHECK-NEXT:    ret
991  %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
992  ret <8 x i64> %v
993}
994
995define <8 x i64> @vsra_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) {
996; CHECK-LABEL: vsra_vv_v8i64_unmasked:
997; CHECK:       # %bb.0:
998; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
999; CHECK-NEXT:    vsra.vv v8, v8, v12
1000; CHECK-NEXT:    ret
1001  %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
1002  ret <8 x i64> %v
1003}
1004
1005define <8 x i64> @vsra_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) {
1006; RV32-LABEL: vsra_vx_v8i64:
1007; RV32:       # %bb.0:
1008; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1009; RV32-NEXT:    vsra.vx v8, v8, a0, v0.t
1010; RV32-NEXT:    ret
1011;
1012; RV64-LABEL: vsra_vx_v8i64:
1013; RV64:       # %bb.0:
1014; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1015; RV64-NEXT:    vsra.vx v8, v8, a0, v0.t
1016; RV64-NEXT:    ret
1017  %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
1018  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
1019  %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
1020  ret <8 x i64> %v
1021}
1022
1023define <8 x i64> @vsra_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl) {
1024; RV32-LABEL: vsra_vx_v8i64_unmasked:
1025; RV32:       # %bb.0:
1026; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1027; RV32-NEXT:    vsra.vx v8, v8, a0
1028; RV32-NEXT:    ret
1029;
1030; RV64-LABEL: vsra_vx_v8i64_unmasked:
1031; RV64:       # %bb.0:
1032; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1033; RV64-NEXT:    vsra.vx v8, v8, a0
1034; RV64-NEXT:    ret
1035  %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
1036  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
1037  %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
1038  ret <8 x i64> %v
1039}
1040
1041define <8 x i64> @vsra_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
1042; CHECK-LABEL: vsra_vi_v8i64:
1043; CHECK:       # %bb.0:
1044; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1045; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
1046; CHECK-NEXT:    ret
1047  %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> splat (i64 5), <8 x i1> %m, i32 %evl)
1048  ret <8 x i64> %v
1049}
1050
1051define <8 x i64> @vsra_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
1052; CHECK-LABEL: vsra_vi_v8i64_unmasked:
1053; CHECK:       # %bb.0:
1054; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1055; CHECK-NEXT:    vsra.vi v8, v8, 5
1056; CHECK-NEXT:    ret
1057  %v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> splat (i64 5), <8 x i1> splat (i1 true), i32 %evl)
1058  ret <8 x i64> %v
1059}
1060
1061declare <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32)
1062
1063define <16 x i64> @vsra_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
1064; CHECK-LABEL: vsra_vv_v16i64:
1065; CHECK:       # %bb.0:
1066; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1067; CHECK-NEXT:    vsra.vv v8, v8, v16, v0.t
1068; CHECK-NEXT:    ret
1069  %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
1070  ret <16 x i64> %v
1071}
1072
1073define <16 x i64> @vsra_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) {
1074; CHECK-LABEL: vsra_vv_v16i64_unmasked:
1075; CHECK:       # %bb.0:
1076; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1077; CHECK-NEXT:    vsra.vv v8, v8, v16
1078; CHECK-NEXT:    ret
1079  %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
1080  ret <16 x i64> %v
1081}
1082
1083define <16 x i64> @vsra_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) {
1084; RV32-LABEL: vsra_vx_v16i64:
1085; RV32:       # %bb.0:
1086; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1087; RV32-NEXT:    vsra.vx v8, v8, a0, v0.t
1088; RV32-NEXT:    ret
1089;
1090; RV64-LABEL: vsra_vx_v16i64:
1091; RV64:       # %bb.0:
1092; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1093; RV64-NEXT:    vsra.vx v8, v8, a0, v0.t
1094; RV64-NEXT:    ret
1095  %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
1096  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
1097  %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
1098  ret <16 x i64> %v
1099}
1100
1101define <16 x i64> @vsra_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %evl) {
1102; RV32-LABEL: vsra_vx_v16i64_unmasked:
1103; RV32:       # %bb.0:
1104; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1105; RV32-NEXT:    vsra.vx v8, v8, a0
1106; RV32-NEXT:    ret
1107;
1108; RV64-LABEL: vsra_vx_v16i64_unmasked:
1109; RV64:       # %bb.0:
1110; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1111; RV64-NEXT:    vsra.vx v8, v8, a0
1112; RV64-NEXT:    ret
1113  %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
1114  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
1115  %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
1116  ret <16 x i64> %v
1117}
1118
1119define <16 x i64> @vsra_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
1120; CHECK-LABEL: vsra_vi_v16i64:
1121; CHECK:       # %bb.0:
1122; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1123; CHECK-NEXT:    vsra.vi v8, v8, 5, v0.t
1124; CHECK-NEXT:    ret
1125  %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> splat (i64 5), <16 x i1> %m, i32 %evl)
1126  ret <16 x i64> %v
1127}
1128
1129define <16 x i64> @vsra_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
1130; CHECK-LABEL: vsra_vi_v16i64_unmasked:
1131; CHECK:       # %bb.0:
1132; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1133; CHECK-NEXT:    vsra.vi v8, v8, 5
1134; CHECK-NEXT:    ret
1135  %v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> splat (i64 5), <16 x i1> splat (i1 true), i32 %evl)
1136  ret <16 x i64> %v
1137}
1138