xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll (revision 1c3a3f0e79a9c6a7c1c4a71c43a9eab783c3b266)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <8 x i7> @llvm.vp.lshr.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
8
9define <8 x i7> @vsrl_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
10; CHECK-LABEL: vsrl_vv_v8i7:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    li a1, 127
13; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
14; CHECK-NEXT:    vand.vx v9, v9, a1, v0.t
15; CHECK-NEXT:    vand.vx v8, v8, a1, v0.t
16; CHECK-NEXT:    vsrl.vv v8, v8, v9, v0.t
17; CHECK-NEXT:    ret
18  %v = call <8 x i7> @llvm.vp.lshr.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
19  ret <8 x i7> %v
20}
21
22declare <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
23
24define <2 x i8> @vsrl_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
25; CHECK-LABEL: vsrl_vv_v2i8:
26; CHECK:       # %bb.0:
27; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
28; CHECK-NEXT:    vsrl.vv v8, v8, v9, v0.t
29; CHECK-NEXT:    ret
30  %v = call <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
31  ret <2 x i8> %v
32}
33
34define <2 x i8> @vsrl_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) {
35; CHECK-LABEL: vsrl_vv_v2i8_unmasked:
36; CHECK:       # %bb.0:
37; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
38; CHECK-NEXT:    vsrl.vv v8, v8, v9
39; CHECK-NEXT:    ret
40  %v = call <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
41  ret <2 x i8> %v
42}
43
44define <2 x i8> @vsrl_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
45; CHECK-LABEL: vsrl_vx_v2i8:
46; CHECK:       # %bb.0:
47; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
48; CHECK-NEXT:    vsrl.vx v8, v8, a0, v0.t
49; CHECK-NEXT:    ret
50  %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
51  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
52  %v = call <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
53  ret <2 x i8> %v
54}
55
56define <2 x i8> @vsrl_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
57; CHECK-LABEL: vsrl_vx_v2i8_unmasked:
58; CHECK:       # %bb.0:
59; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
60; CHECK-NEXT:    vsrl.vx v8, v8, a0
61; CHECK-NEXT:    ret
62  %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
63  %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
64  %v = call <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
65  ret <2 x i8> %v
66}
67
68define <2 x i8> @vsrl_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
69; CHECK-LABEL: vsrl_vi_v2i8:
70; CHECK:       # %bb.0:
71; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
72; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
73; CHECK-NEXT:    ret
74  %v = call <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8> %va, <2 x i8> splat (i8 4), <2 x i1> %m, i32 %evl)
75  ret <2 x i8> %v
76}
77
78define <2 x i8> @vsrl_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
79; CHECK-LABEL: vsrl_vi_v2i8_unmasked:
80; CHECK:       # %bb.0:
81; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
82; CHECK-NEXT:    vsrl.vi v8, v8, 4
83; CHECK-NEXT:    ret
84  %v = call <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8> %va, <2 x i8> splat (i8 4), <2 x i1> splat (i1 true), i32 %evl)
85  ret <2 x i8> %v
86}
87
88declare <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32)
89
90define <4 x i8> @vsrl_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
91; CHECK-LABEL: vsrl_vv_v4i8:
92; CHECK:       # %bb.0:
93; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
94; CHECK-NEXT:    vsrl.vv v8, v8, v9, v0.t
95; CHECK-NEXT:    ret
96  %v = call <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
97  ret <4 x i8> %v
98}
99
100define <4 x i8> @vsrl_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) {
101; CHECK-LABEL: vsrl_vv_v4i8_unmasked:
102; CHECK:       # %bb.0:
103; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
104; CHECK-NEXT:    vsrl.vv v8, v8, v9
105; CHECK-NEXT:    ret
106  %v = call <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
107  ret <4 x i8> %v
108}
109
110define <4 x i8> @vsrl_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
111; CHECK-LABEL: vsrl_vx_v4i8:
112; CHECK:       # %bb.0:
113; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
114; CHECK-NEXT:    vsrl.vx v8, v8, a0, v0.t
115; CHECK-NEXT:    ret
116  %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
117  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
118  %v = call <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
119  ret <4 x i8> %v
120}
121
122define <4 x i8> @vsrl_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
123; CHECK-LABEL: vsrl_vx_v4i8_unmasked:
124; CHECK:       # %bb.0:
125; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
126; CHECK-NEXT:    vsrl.vx v8, v8, a0
127; CHECK-NEXT:    ret
128  %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
129  %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
130  %v = call <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
131  ret <4 x i8> %v
132}
133
134define <4 x i8> @vsrl_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
135; CHECK-LABEL: vsrl_vi_v4i8:
136; CHECK:       # %bb.0:
137; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
138; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
139; CHECK-NEXT:    ret
140  %v = call <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8> %va, <4 x i8> splat (i8 4), <4 x i1> %m, i32 %evl)
141  ret <4 x i8> %v
142}
143
144define <4 x i8> @vsrl_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
145; CHECK-LABEL: vsrl_vi_v4i8_unmasked:
146; CHECK:       # %bb.0:
147; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
148; CHECK-NEXT:    vsrl.vi v8, v8, 4
149; CHECK-NEXT:    ret
150  %v = call <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8> %va, <4 x i8> splat (i8 4), <4 x i1> splat (i1 true), i32 %evl)
151  ret <4 x i8> %v
152}
153
154declare <7 x i8> @llvm.vp.lshr.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32)
155
156define <7 x i8> @vsrl_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) {
157; CHECK-LABEL: vsrl_vv_v7i8:
158; CHECK:       # %bb.0:
159; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
160; CHECK-NEXT:    vsrl.vv v8, v8, v9, v0.t
161; CHECK-NEXT:    ret
162  %v = call <7 x i8> @llvm.vp.lshr.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl)
163  ret <7 x i8> %v
164}
165
166declare <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
167
168define <8 x i8> @vsrl_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
169; CHECK-LABEL: vsrl_vv_v8i8:
170; CHECK:       # %bb.0:
171; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
172; CHECK-NEXT:    vsrl.vv v8, v8, v9, v0.t
173; CHECK-NEXT:    ret
174  %v = call <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
175  ret <8 x i8> %v
176}
177
178define <8 x i8> @vsrl_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) {
179; CHECK-LABEL: vsrl_vv_v8i8_unmasked:
180; CHECK:       # %bb.0:
181; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
182; CHECK-NEXT:    vsrl.vv v8, v8, v9
183; CHECK-NEXT:    ret
184  %v = call <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
185  ret <8 x i8> %v
186}
187
188define <8 x i8> @vsrl_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
189; CHECK-LABEL: vsrl_vx_v8i8:
190; CHECK:       # %bb.0:
191; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
192; CHECK-NEXT:    vsrl.vx v8, v8, a0, v0.t
193; CHECK-NEXT:    ret
194  %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
195  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
196  %v = call <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
197  ret <8 x i8> %v
198}
199
200define <8 x i8> @vsrl_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
201; CHECK-LABEL: vsrl_vx_v8i8_unmasked:
202; CHECK:       # %bb.0:
203; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
204; CHECK-NEXT:    vsrl.vx v8, v8, a0
205; CHECK-NEXT:    ret
206  %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
207  %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
208  %v = call <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
209  ret <8 x i8> %v
210}
211
212define <8 x i8> @vsrl_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
213; CHECK-LABEL: vsrl_vi_v8i8:
214; CHECK:       # %bb.0:
215; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
216; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
217; CHECK-NEXT:    ret
218  %v = call <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8> %va, <8 x i8> splat (i8 4), <8 x i1> %m, i32 %evl)
219  ret <8 x i8> %v
220}
221
222define <8 x i8> @vsrl_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
223; CHECK-LABEL: vsrl_vi_v8i8_unmasked:
224; CHECK:       # %bb.0:
225; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
226; CHECK-NEXT:    vsrl.vi v8, v8, 4
227; CHECK-NEXT:    ret
228  %v = call <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8> %va, <8 x i8> splat (i8 4), <8 x i1> splat (i1 true), i32 %evl)
229  ret <8 x i8> %v
230}
231
232declare <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32)
233
234define <16 x i8> @vsrl_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
235; CHECK-LABEL: vsrl_vv_v16i8:
236; CHECK:       # %bb.0:
237; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
238; CHECK-NEXT:    vsrl.vv v8, v8, v9, v0.t
239; CHECK-NEXT:    ret
240  %v = call <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
241  ret <16 x i8> %v
242}
243
244define <16 x i8> @vsrl_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) {
245; CHECK-LABEL: vsrl_vv_v16i8_unmasked:
246; CHECK:       # %bb.0:
247; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
248; CHECK-NEXT:    vsrl.vv v8, v8, v9
249; CHECK-NEXT:    ret
250  %v = call <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
251  ret <16 x i8> %v
252}
253
254define <16 x i8> @vsrl_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
255; CHECK-LABEL: vsrl_vx_v16i8:
256; CHECK:       # %bb.0:
257; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
258; CHECK-NEXT:    vsrl.vx v8, v8, a0, v0.t
259; CHECK-NEXT:    ret
260  %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
261  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
262  %v = call <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
263  ret <16 x i8> %v
264}
265
266define <16 x i8> @vsrl_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) {
267; CHECK-LABEL: vsrl_vx_v16i8_unmasked:
268; CHECK:       # %bb.0:
269; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
270; CHECK-NEXT:    vsrl.vx v8, v8, a0
271; CHECK-NEXT:    ret
272  %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
273  %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
274  %v = call <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
275  ret <16 x i8> %v
276}
277
278define <16 x i8> @vsrl_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
279; CHECK-LABEL: vsrl_vi_v16i8:
280; CHECK:       # %bb.0:
281; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
282; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
283; CHECK-NEXT:    ret
284  %v = call <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8> %va, <16 x i8> splat (i8 4), <16 x i1> %m, i32 %evl)
285  ret <16 x i8> %v
286}
287
288define <16 x i8> @vsrl_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
289; CHECK-LABEL: vsrl_vi_v16i8_unmasked:
290; CHECK:       # %bb.0:
291; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
292; CHECK-NEXT:    vsrl.vi v8, v8, 4
293; CHECK-NEXT:    ret
294  %v = call <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8> %va, <16 x i8> splat (i8 4), <16 x i1> splat (i1 true), i32 %evl)
295  ret <16 x i8> %v
296}
297
298declare <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32)
299
300define <2 x i16> @vsrl_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
301; CHECK-LABEL: vsrl_vv_v2i16:
302; CHECK:       # %bb.0:
303; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
304; CHECK-NEXT:    vsrl.vv v8, v8, v9, v0.t
305; CHECK-NEXT:    ret
306  %v = call <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
307  ret <2 x i16> %v
308}
309
310define <2 x i16> @vsrl_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) {
311; CHECK-LABEL: vsrl_vv_v2i16_unmasked:
312; CHECK:       # %bb.0:
313; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
314; CHECK-NEXT:    vsrl.vv v8, v8, v9
315; CHECK-NEXT:    ret
316  %v = call <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
317  ret <2 x i16> %v
318}
319
320define <2 x i16> @vsrl_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
321; CHECK-LABEL: vsrl_vx_v2i16:
322; CHECK:       # %bb.0:
323; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
324; CHECK-NEXT:    vsrl.vx v8, v8, a0, v0.t
325; CHECK-NEXT:    ret
326  %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
327  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
328  %v = call <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
329  ret <2 x i16> %v
330}
331
332define <2 x i16> @vsrl_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) {
333; CHECK-LABEL: vsrl_vx_v2i16_unmasked:
334; CHECK:       # %bb.0:
335; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
336; CHECK-NEXT:    vsrl.vx v8, v8, a0
337; CHECK-NEXT:    ret
338  %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
339  %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
340  %v = call <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
341  ret <2 x i16> %v
342}
343
344define <2 x i16> @vsrl_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
345; CHECK-LABEL: vsrl_vi_v2i16:
346; CHECK:       # %bb.0:
347; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
348; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
349; CHECK-NEXT:    ret
350  %v = call <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16> %va, <2 x i16> splat (i16 4), <2 x i1> %m, i32 %evl)
351  ret <2 x i16> %v
352}
353
354define <2 x i16> @vsrl_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
355; CHECK-LABEL: vsrl_vi_v2i16_unmasked:
356; CHECK:       # %bb.0:
357; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
358; CHECK-NEXT:    vsrl.vi v8, v8, 4
359; CHECK-NEXT:    ret
360  %v = call <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16> %va, <2 x i16> splat (i16 4), <2 x i1> splat (i1 true), i32 %evl)
361  ret <2 x i16> %v
362}
363
364declare <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32)
365
366define <4 x i16> @vsrl_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
367; CHECK-LABEL: vsrl_vv_v4i16:
368; CHECK:       # %bb.0:
369; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
370; CHECK-NEXT:    vsrl.vv v8, v8, v9, v0.t
371; CHECK-NEXT:    ret
372  %v = call <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
373  ret <4 x i16> %v
374}
375
376define <4 x i16> @vsrl_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) {
377; CHECK-LABEL: vsrl_vv_v4i16_unmasked:
378; CHECK:       # %bb.0:
379; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
380; CHECK-NEXT:    vsrl.vv v8, v8, v9
381; CHECK-NEXT:    ret
382  %v = call <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
383  ret <4 x i16> %v
384}
385
386define <4 x i16> @vsrl_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
387; CHECK-LABEL: vsrl_vx_v4i16:
388; CHECK:       # %bb.0:
389; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
390; CHECK-NEXT:    vsrl.vx v8, v8, a0, v0.t
391; CHECK-NEXT:    ret
392  %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
393  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
394  %v = call <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
395  ret <4 x i16> %v
396}
397
398define <4 x i16> @vsrl_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) {
399; CHECK-LABEL: vsrl_vx_v4i16_unmasked:
400; CHECK:       # %bb.0:
401; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
402; CHECK-NEXT:    vsrl.vx v8, v8, a0
403; CHECK-NEXT:    ret
404  %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
405  %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
406  %v = call <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
407  ret <4 x i16> %v
408}
409
410define <4 x i16> @vsrl_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
411; CHECK-LABEL: vsrl_vi_v4i16:
412; CHECK:       # %bb.0:
413; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
414; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
415; CHECK-NEXT:    ret
416  %v = call <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16> %va, <4 x i16> splat (i16 4), <4 x i1> %m, i32 %evl)
417  ret <4 x i16> %v
418}
419
420define <4 x i16> @vsrl_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
421; CHECK-LABEL: vsrl_vi_v4i16_unmasked:
422; CHECK:       # %bb.0:
423; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
424; CHECK-NEXT:    vsrl.vi v8, v8, 4
425; CHECK-NEXT:    ret
426  %v = call <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16> %va, <4 x i16> splat (i16 4), <4 x i1> splat (i1 true), i32 %evl)
427  ret <4 x i16> %v
428}
429
430declare <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32)
431
432define <8 x i16> @vsrl_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
433; CHECK-LABEL: vsrl_vv_v8i16:
434; CHECK:       # %bb.0:
435; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
436; CHECK-NEXT:    vsrl.vv v8, v8, v9, v0.t
437; CHECK-NEXT:    ret
438  %v = call <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
439  ret <8 x i16> %v
440}
441
442define <8 x i16> @vsrl_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) {
443; CHECK-LABEL: vsrl_vv_v8i16_unmasked:
444; CHECK:       # %bb.0:
445; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
446; CHECK-NEXT:    vsrl.vv v8, v8, v9
447; CHECK-NEXT:    ret
448  %v = call <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
449  ret <8 x i16> %v
450}
451
452define <8 x i16> @vsrl_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
453; CHECK-LABEL: vsrl_vx_v8i16:
454; CHECK:       # %bb.0:
455; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
456; CHECK-NEXT:    vsrl.vx v8, v8, a0, v0.t
457; CHECK-NEXT:    ret
458  %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
459  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
460  %v = call <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
461  ret <8 x i16> %v
462}
463
464define <8 x i16> @vsrl_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) {
465; CHECK-LABEL: vsrl_vx_v8i16_unmasked:
466; CHECK:       # %bb.0:
467; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
468; CHECK-NEXT:    vsrl.vx v8, v8, a0
469; CHECK-NEXT:    ret
470  %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
471  %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
472  %v = call <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
473  ret <8 x i16> %v
474}
475
476define <8 x i16> @vsrl_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
477; CHECK-LABEL: vsrl_vi_v8i16:
478; CHECK:       # %bb.0:
479; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
480; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
481; CHECK-NEXT:    ret
482  %v = call <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16> %va, <8 x i16> splat (i16 4), <8 x i1> %m, i32 %evl)
483  ret <8 x i16> %v
484}
485
486define <8 x i16> @vsrl_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
487; CHECK-LABEL: vsrl_vi_v8i16_unmasked:
488; CHECK:       # %bb.0:
489; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
490; CHECK-NEXT:    vsrl.vi v8, v8, 4
491; CHECK-NEXT:    ret
492  %v = call <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16> %va, <8 x i16> splat (i16 4), <8 x i1> splat (i1 true), i32 %evl)
493  ret <8 x i16> %v
494}
495
496declare <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32)
497
498define <16 x i16> @vsrl_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
499; CHECK-LABEL: vsrl_vv_v16i16:
500; CHECK:       # %bb.0:
501; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
502; CHECK-NEXT:    vsrl.vv v8, v8, v10, v0.t
503; CHECK-NEXT:    ret
504  %v = call <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
505  ret <16 x i16> %v
506}
507
508define <16 x i16> @vsrl_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) {
509; CHECK-LABEL: vsrl_vv_v16i16_unmasked:
510; CHECK:       # %bb.0:
511; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
512; CHECK-NEXT:    vsrl.vv v8, v8, v10
513; CHECK-NEXT:    ret
514  %v = call <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
515  ret <16 x i16> %v
516}
517
518define <16 x i16> @vsrl_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
519; CHECK-LABEL: vsrl_vx_v16i16:
520; CHECK:       # %bb.0:
521; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
522; CHECK-NEXT:    vsrl.vx v8, v8, a0, v0.t
523; CHECK-NEXT:    ret
524  %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
525  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
526  %v = call <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
527  ret <16 x i16> %v
528}
529
530define <16 x i16> @vsrl_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) {
531; CHECK-LABEL: vsrl_vx_v16i16_unmasked:
532; CHECK:       # %bb.0:
533; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
534; CHECK-NEXT:    vsrl.vx v8, v8, a0
535; CHECK-NEXT:    ret
536  %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
537  %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
538  %v = call <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
539  ret <16 x i16> %v
540}
541
542define <16 x i16> @vsrl_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
543; CHECK-LABEL: vsrl_vi_v16i16:
544; CHECK:       # %bb.0:
545; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
546; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
547; CHECK-NEXT:    ret
548  %v = call <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16> %va, <16 x i16> splat (i16 4), <16 x i1> %m, i32 %evl)
549  ret <16 x i16> %v
550}
551
552define <16 x i16> @vsrl_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
553; CHECK-LABEL: vsrl_vi_v16i16_unmasked:
554; CHECK:       # %bb.0:
555; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
556; CHECK-NEXT:    vsrl.vi v8, v8, 4
557; CHECK-NEXT:    ret
558  %v = call <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16> %va, <16 x i16> splat (i16 4), <16 x i1> splat (i1 true), i32 %evl)
559  ret <16 x i16> %v
560}
561
562declare <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32)
563
564define <2 x i32> @vsrl_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
565; CHECK-LABEL: vsrl_vv_v2i32:
566; CHECK:       # %bb.0:
567; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
568; CHECK-NEXT:    vsrl.vv v8, v8, v9, v0.t
569; CHECK-NEXT:    ret
570  %v = call <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
571  ret <2 x i32> %v
572}
573
574define <2 x i32> @vsrl_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) {
575; CHECK-LABEL: vsrl_vv_v2i32_unmasked:
576; CHECK:       # %bb.0:
577; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
578; CHECK-NEXT:    vsrl.vv v8, v8, v9
579; CHECK-NEXT:    ret
580  %v = call <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
581  ret <2 x i32> %v
582}
583
584define <2 x i32> @vsrl_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
585; CHECK-LABEL: vsrl_vx_v2i32:
586; CHECK:       # %bb.0:
587; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
588; CHECK-NEXT:    vsrl.vx v8, v8, a0, v0.t
589; CHECK-NEXT:    ret
590  %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
591  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
592  %v = call <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
593  ret <2 x i32> %v
594}
595
596define <2 x i32> @vsrl_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) {
597; CHECK-LABEL: vsrl_vx_v2i32_unmasked:
598; CHECK:       # %bb.0:
599; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
600; CHECK-NEXT:    vsrl.vx v8, v8, a0
601; CHECK-NEXT:    ret
602  %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
603  %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
604  %v = call <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
605  ret <2 x i32> %v
606}
607
608define <2 x i32> @vsrl_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
609; CHECK-LABEL: vsrl_vi_v2i32:
610; CHECK:       # %bb.0:
611; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
612; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
613; CHECK-NEXT:    ret
614  %v = call <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32> %va, <2 x i32> splat (i32 4), <2 x i1> %m, i32 %evl)
615  ret <2 x i32> %v
616}
617
618define <2 x i32> @vsrl_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
619; CHECK-LABEL: vsrl_vi_v2i32_unmasked:
620; CHECK:       # %bb.0:
621; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
622; CHECK-NEXT:    vsrl.vi v8, v8, 4
623; CHECK-NEXT:    ret
624  %v = call <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32> %va, <2 x i32> splat (i32 4), <2 x i1> splat (i1 true), i32 %evl)
625  ret <2 x i32> %v
626}
627
628declare <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
629
630define <4 x i32> @vsrl_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
631; CHECK-LABEL: vsrl_vv_v4i32:
632; CHECK:       # %bb.0:
633; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
634; CHECK-NEXT:    vsrl.vv v8, v8, v9, v0.t
635; CHECK-NEXT:    ret
636  %v = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
637  ret <4 x i32> %v
638}
639
640define <4 x i32> @vsrl_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) {
641; CHECK-LABEL: vsrl_vv_v4i32_unmasked:
642; CHECK:       # %bb.0:
643; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
644; CHECK-NEXT:    vsrl.vv v8, v8, v9
645; CHECK-NEXT:    ret
646  %v = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
647  ret <4 x i32> %v
648}
649
650define <4 x i32> @vsrl_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
651; CHECK-LABEL: vsrl_vx_v4i32:
652; CHECK:       # %bb.0:
653; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
654; CHECK-NEXT:    vsrl.vx v8, v8, a0, v0.t
655; CHECK-NEXT:    ret
656  %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
657  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
658  %v = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
659  ret <4 x i32> %v
660}
661
662define <4 x i32> @vsrl_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) {
663; CHECK-LABEL: vsrl_vx_v4i32_unmasked:
664; CHECK:       # %bb.0:
665; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
666; CHECK-NEXT:    vsrl.vx v8, v8, a0
667; CHECK-NEXT:    ret
668  %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
669  %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
670  %v = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
671  ret <4 x i32> %v
672}
673
674define <4 x i32> @vsrl_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
675; CHECK-LABEL: vsrl_vi_v4i32:
676; CHECK:       # %bb.0:
677; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
678; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
679; CHECK-NEXT:    ret
680  %v = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> splat (i32 4), <4 x i1> %m, i32 %evl)
681  ret <4 x i32> %v
682}
683
684define <4 x i32> @vsrl_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
685; CHECK-LABEL: vsrl_vi_v4i32_unmasked:
686; CHECK:       # %bb.0:
687; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
688; CHECK-NEXT:    vsrl.vi v8, v8, 4
689; CHECK-NEXT:    ret
690  %v = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> splat (i32 4), <4 x i1> splat (i1 true), i32 %evl)
691  ret <4 x i32> %v
692}
693
694declare <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
695
696define <8 x i32> @vsrl_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
697; CHECK-LABEL: vsrl_vv_v8i32:
698; CHECK:       # %bb.0:
699; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
700; CHECK-NEXT:    vsrl.vv v8, v8, v10, v0.t
701; CHECK-NEXT:    ret
702  %v = call <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
703  ret <8 x i32> %v
704}
705
706define <8 x i32> @vsrl_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) {
707; CHECK-LABEL: vsrl_vv_v8i32_unmasked:
708; CHECK:       # %bb.0:
709; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
710; CHECK-NEXT:    vsrl.vv v8, v8, v10
711; CHECK-NEXT:    ret
712  %v = call <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
713  ret <8 x i32> %v
714}
715
716define <8 x i32> @vsrl_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
717; CHECK-LABEL: vsrl_vx_v8i32:
718; CHECK:       # %bb.0:
719; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
720; CHECK-NEXT:    vsrl.vx v8, v8, a0, v0.t
721; CHECK-NEXT:    ret
722  %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
723  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
724  %v = call <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
725  ret <8 x i32> %v
726}
727
728define <8 x i32> @vsrl_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) {
729; CHECK-LABEL: vsrl_vx_v8i32_unmasked:
730; CHECK:       # %bb.0:
731; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
732; CHECK-NEXT:    vsrl.vx v8, v8, a0
733; CHECK-NEXT:    ret
734  %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
735  %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
736  %v = call <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
737  ret <8 x i32> %v
738}
739
740define <8 x i32> @vsrl_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
741; CHECK-LABEL: vsrl_vi_v8i32:
742; CHECK:       # %bb.0:
743; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
744; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
745; CHECK-NEXT:    ret
746  %v = call <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32> %va, <8 x i32> splat (i32 4), <8 x i1> %m, i32 %evl)
747  ret <8 x i32> %v
748}
749
750define <8 x i32> @vsrl_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
751; CHECK-LABEL: vsrl_vi_v8i32_unmasked:
752; CHECK:       # %bb.0:
753; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
754; CHECK-NEXT:    vsrl.vi v8, v8, 4
755; CHECK-NEXT:    ret
756  %v = call <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32> %va, <8 x i32> splat (i32 4), <8 x i1> splat (i1 true), i32 %evl)
757  ret <8 x i32> %v
758}
759
760declare <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32)
761
762define <16 x i32> @vsrl_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
763; CHECK-LABEL: vsrl_vv_v16i32:
764; CHECK:       # %bb.0:
765; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
766; CHECK-NEXT:    vsrl.vv v8, v8, v12, v0.t
767; CHECK-NEXT:    ret
768  %v = call <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
769  ret <16 x i32> %v
770}
771
772define <16 x i32> @vsrl_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) {
773; CHECK-LABEL: vsrl_vv_v16i32_unmasked:
774; CHECK:       # %bb.0:
775; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
776; CHECK-NEXT:    vsrl.vv v8, v8, v12
777; CHECK-NEXT:    ret
778  %v = call <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
779  ret <16 x i32> %v
780}
781
782define <16 x i32> @vsrl_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
783; CHECK-LABEL: vsrl_vx_v16i32:
784; CHECK:       # %bb.0:
785; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
786; CHECK-NEXT:    vsrl.vx v8, v8, a0, v0.t
787; CHECK-NEXT:    ret
788  %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
789  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
790  %v = call <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
791  ret <16 x i32> %v
792}
793
794define <16 x i32> @vsrl_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) {
795; CHECK-LABEL: vsrl_vx_v16i32_unmasked:
796; CHECK:       # %bb.0:
797; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
798; CHECK-NEXT:    vsrl.vx v8, v8, a0
799; CHECK-NEXT:    ret
800  %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
801  %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
802  %v = call <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
803  ret <16 x i32> %v
804}
805
806define <16 x i32> @vsrl_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
807; CHECK-LABEL: vsrl_vi_v16i32:
808; CHECK:       # %bb.0:
809; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
810; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
811; CHECK-NEXT:    ret
812  %v = call <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32> %va, <16 x i32> splat (i32 4), <16 x i1> %m, i32 %evl)
813  ret <16 x i32> %v
814}
815
816define <16 x i32> @vsrl_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
817; CHECK-LABEL: vsrl_vi_v16i32_unmasked:
818; CHECK:       # %bb.0:
819; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
820; CHECK-NEXT:    vsrl.vi v8, v8, 4
821; CHECK-NEXT:    ret
822  %v = call <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32> %va, <16 x i32> splat (i32 4), <16 x i1> splat (i1 true), i32 %evl)
823  ret <16 x i32> %v
824}
825
826declare <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32)
827
828define <2 x i64> @vsrl_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
829; CHECK-LABEL: vsrl_vv_v2i64:
830; CHECK:       # %bb.0:
831; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
832; CHECK-NEXT:    vsrl.vv v8, v8, v9, v0.t
833; CHECK-NEXT:    ret
834  %v = call <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
835  ret <2 x i64> %v
836}
837
838define <2 x i64> @vsrl_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) {
839; CHECK-LABEL: vsrl_vv_v2i64_unmasked:
840; CHECK:       # %bb.0:
841; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
842; CHECK-NEXT:    vsrl.vv v8, v8, v9
843; CHECK-NEXT:    ret
844  %v = call <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
845  ret <2 x i64> %v
846}
847
848define <2 x i64> @vsrl_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) {
849; RV32-LABEL: vsrl_vx_v2i64:
850; RV32:       # %bb.0:
851; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
852; RV32-NEXT:    vsrl.vx v8, v8, a0, v0.t
853; RV32-NEXT:    ret
854;
855; RV64-LABEL: vsrl_vx_v2i64:
856; RV64:       # %bb.0:
857; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
858; RV64-NEXT:    vsrl.vx v8, v8, a0, v0.t
859; RV64-NEXT:    ret
860  %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
861  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
862  %v = call <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
863  ret <2 x i64> %v
864}
865
866define <2 x i64> @vsrl_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl) {
867; RV32-LABEL: vsrl_vx_v2i64_unmasked:
868; RV32:       # %bb.0:
869; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
870; RV32-NEXT:    vsrl.vx v8, v8, a0
871; RV32-NEXT:    ret
872;
873; RV64-LABEL: vsrl_vx_v2i64_unmasked:
874; RV64:       # %bb.0:
875; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
876; RV64-NEXT:    vsrl.vx v8, v8, a0
877; RV64-NEXT:    ret
878  %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
879  %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
880  %v = call <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
881  ret <2 x i64> %v
882}
883
884define <2 x i64> @vsrl_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
885; CHECK-LABEL: vsrl_vi_v2i64:
886; CHECK:       # %bb.0:
887; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
888; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
889; CHECK-NEXT:    ret
890  %v = call <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64> %va, <2 x i64> splat (i64 4), <2 x i1> %m, i32 %evl)
891  ret <2 x i64> %v
892}
893
894define <2 x i64> @vsrl_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
895; CHECK-LABEL: vsrl_vi_v2i64_unmasked:
896; CHECK:       # %bb.0:
897; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
898; CHECK-NEXT:    vsrl.vi v8, v8, 4
899; CHECK-NEXT:    ret
900  %v = call <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64> %va, <2 x i64> splat (i64 4), <2 x i1> splat (i1 true), i32 %evl)
901  ret <2 x i64> %v
902}
903
904declare <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32)
905
906define <4 x i64> @vsrl_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
907; CHECK-LABEL: vsrl_vv_v4i64:
908; CHECK:       # %bb.0:
909; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
910; CHECK-NEXT:    vsrl.vv v8, v8, v10, v0.t
911; CHECK-NEXT:    ret
912  %v = call <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
913  ret <4 x i64> %v
914}
915
916define <4 x i64> @vsrl_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) {
917; CHECK-LABEL: vsrl_vv_v4i64_unmasked:
918; CHECK:       # %bb.0:
919; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
920; CHECK-NEXT:    vsrl.vv v8, v8, v10
921; CHECK-NEXT:    ret
922  %v = call <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
923  ret <4 x i64> %v
924}
925
926define <4 x i64> @vsrl_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) {
927; RV32-LABEL: vsrl_vx_v4i64:
928; RV32:       # %bb.0:
929; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
930; RV32-NEXT:    vsrl.vx v8, v8, a0, v0.t
931; RV32-NEXT:    ret
932;
933; RV64-LABEL: vsrl_vx_v4i64:
934; RV64:       # %bb.0:
935; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
936; RV64-NEXT:    vsrl.vx v8, v8, a0, v0.t
937; RV64-NEXT:    ret
938  %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
939  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
940  %v = call <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
941  ret <4 x i64> %v
942}
943
944define <4 x i64> @vsrl_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl) {
945; RV32-LABEL: vsrl_vx_v4i64_unmasked:
946; RV32:       # %bb.0:
947; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
948; RV32-NEXT:    vsrl.vx v8, v8, a0
949; RV32-NEXT:    ret
950;
951; RV64-LABEL: vsrl_vx_v4i64_unmasked:
952; RV64:       # %bb.0:
953; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
954; RV64-NEXT:    vsrl.vx v8, v8, a0
955; RV64-NEXT:    ret
956  %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
957  %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
958  %v = call <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
959  ret <4 x i64> %v
960}
961
962define <4 x i64> @vsrl_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
963; CHECK-LABEL: vsrl_vi_v4i64:
964; CHECK:       # %bb.0:
965; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
966; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
967; CHECK-NEXT:    ret
968  %v = call <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64> %va, <4 x i64> splat (i64 4), <4 x i1> %m, i32 %evl)
969  ret <4 x i64> %v
970}
971
972define <4 x i64> @vsrl_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
973; CHECK-LABEL: vsrl_vi_v4i64_unmasked:
974; CHECK:       # %bb.0:
975; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
976; CHECK-NEXT:    vsrl.vi v8, v8, 4
977; CHECK-NEXT:    ret
978  %v = call <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64> %va, <4 x i64> splat (i64 4), <4 x i1> splat (i1 true), i32 %evl)
979  ret <4 x i64> %v
980}
981
982declare <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32)
983
984define <8 x i64> @vsrl_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
985; CHECK-LABEL: vsrl_vv_v8i64:
986; CHECK:       # %bb.0:
987; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
988; CHECK-NEXT:    vsrl.vv v8, v8, v12, v0.t
989; CHECK-NEXT:    ret
990  %v = call <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
991  ret <8 x i64> %v
992}
993
994define <8 x i64> @vsrl_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) {
995; CHECK-LABEL: vsrl_vv_v8i64_unmasked:
996; CHECK:       # %bb.0:
997; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
998; CHECK-NEXT:    vsrl.vv v8, v8, v12
999; CHECK-NEXT:    ret
1000  %v = call <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
1001  ret <8 x i64> %v
1002}
1003
1004define <8 x i64> @vsrl_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) {
1005; RV32-LABEL: vsrl_vx_v8i64:
1006; RV32:       # %bb.0:
1007; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1008; RV32-NEXT:    vsrl.vx v8, v8, a0, v0.t
1009; RV32-NEXT:    ret
1010;
1011; RV64-LABEL: vsrl_vx_v8i64:
1012; RV64:       # %bb.0:
1013; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1014; RV64-NEXT:    vsrl.vx v8, v8, a0, v0.t
1015; RV64-NEXT:    ret
1016  %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
1017  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
1018  %v = call <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
1019  ret <8 x i64> %v
1020}
1021
1022define <8 x i64> @vsrl_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl) {
1023; RV32-LABEL: vsrl_vx_v8i64_unmasked:
1024; RV32:       # %bb.0:
1025; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1026; RV32-NEXT:    vsrl.vx v8, v8, a0
1027; RV32-NEXT:    ret
1028;
1029; RV64-LABEL: vsrl_vx_v8i64_unmasked:
1030; RV64:       # %bb.0:
1031; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1032; RV64-NEXT:    vsrl.vx v8, v8, a0
1033; RV64-NEXT:    ret
1034  %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
1035  %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
1036  %v = call <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
1037  ret <8 x i64> %v
1038}
1039
1040define <8 x i64> @vsrl_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
1041; CHECK-LABEL: vsrl_vi_v8i64:
1042; CHECK:       # %bb.0:
1043; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1044; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
1045; CHECK-NEXT:    ret
1046  %v = call <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64> %va, <8 x i64> splat (i64 4), <8 x i1> %m, i32 %evl)
1047  ret <8 x i64> %v
1048}
1049
1050define <8 x i64> @vsrl_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
1051; CHECK-LABEL: vsrl_vi_v8i64_unmasked:
1052; CHECK:       # %bb.0:
1053; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1054; CHECK-NEXT:    vsrl.vi v8, v8, 4
1055; CHECK-NEXT:    ret
1056  %v = call <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64> %va, <8 x i64> splat (i64 4), <8 x i1> splat (i1 true), i32 %evl)
1057  ret <8 x i64> %v
1058}
1059
1060declare <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32)
1061
1062define <16 x i64> @vsrl_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
1063; CHECK-LABEL: vsrl_vv_v16i64:
1064; CHECK:       # %bb.0:
1065; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1066; CHECK-NEXT:    vsrl.vv v8, v8, v16, v0.t
1067; CHECK-NEXT:    ret
1068  %v = call <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
1069  ret <16 x i64> %v
1070}
1071
1072define <16 x i64> @vsrl_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) {
1073; CHECK-LABEL: vsrl_vv_v16i64_unmasked:
1074; CHECK:       # %bb.0:
1075; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1076; CHECK-NEXT:    vsrl.vv v8, v8, v16
1077; CHECK-NEXT:    ret
1078  %v = call <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
1079  ret <16 x i64> %v
1080}
1081
1082define <16 x i64> @vsrl_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) {
1083; RV32-LABEL: vsrl_vx_v16i64:
1084; RV32:       # %bb.0:
1085; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1086; RV32-NEXT:    vsrl.vx v8, v8, a0, v0.t
1087; RV32-NEXT:    ret
1088;
1089; RV64-LABEL: vsrl_vx_v16i64:
1090; RV64:       # %bb.0:
1091; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1092; RV64-NEXT:    vsrl.vx v8, v8, a0, v0.t
1093; RV64-NEXT:    ret
1094  %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
1095  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
1096  %v = call <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
1097  ret <16 x i64> %v
1098}
1099
1100define <16 x i64> @vsrl_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %evl) {
1101; RV32-LABEL: vsrl_vx_v16i64_unmasked:
1102; RV32:       # %bb.0:
1103; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1104; RV32-NEXT:    vsrl.vx v8, v8, a0
1105; RV32-NEXT:    ret
1106;
1107; RV64-LABEL: vsrl_vx_v16i64_unmasked:
1108; RV64:       # %bb.0:
1109; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1110; RV64-NEXT:    vsrl.vx v8, v8, a0
1111; RV64-NEXT:    ret
1112  %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
1113  %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
1114  %v = call <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
1115  ret <16 x i64> %v
1116}
1117
1118define <16 x i64> @vsrl_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
1119; CHECK-LABEL: vsrl_vi_v16i64:
1120; CHECK:       # %bb.0:
1121; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1122; CHECK-NEXT:    vsrl.vi v8, v8, 4, v0.t
1123; CHECK-NEXT:    ret
1124  %v = call <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64> %va, <16 x i64> splat (i64 4), <16 x i1> %m, i32 %evl)
1125  ret <16 x i64> %v
1126}
1127
1128define <16 x i64> @vsrl_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
1129; CHECK-LABEL: vsrl_vi_v16i64_unmasked:
1130; CHECK:       # %bb.0:
1131; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1132; CHECK-NEXT:    vsrl.vi v8, v8, 4
1133; CHECK-NEXT:    ret
1134  %v = call <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64> %va, <16 x i64> splat (i64 4), <16 x i1> splat (i1 true), i32 %evl)
1135  ret <16 x i64> %v
1136}
1137