xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll (revision 0ebe48f068c0ca69f76ed68b621c9294acd75f76)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4
5define <vscale x 1 x i8> @test_vssrl_vv_u8mf8(<vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i32 %vl) {
6; CHECK-LABEL: test_vssrl_vv_u8mf8:
7; CHECK:       # %bb.0: # %entry
8; CHECK-NEXT:    csrwi vxrm, 0
9; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
10; CHECK-NEXT:    vssrl.vv v8, v8, v9
11; CHECK-NEXT:    ret
12entry:
13  %0 = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i32 0, i32 %vl)
14  ret <vscale x 1 x i8> %0
15}
16
17declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i32(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, i32 immarg, i32)
18
19define <vscale x 1 x i8> @test_vssrl_vx_u8mf8(<vscale x 1 x i8> %op1, i32 %shift, i32 %vl) {
20; CHECK-LABEL: test_vssrl_vx_u8mf8:
21; CHECK:       # %bb.0: # %entry
22; CHECK-NEXT:    csrwi vxrm, 0
23; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
24; CHECK-NEXT:    vssrl.vx v8, v8, a0
25; CHECK-NEXT:    ret
26entry:
27  %0 = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i32.i32(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, i32 %shift, i32 0, i32 %vl)
28  ret <vscale x 1 x i8> %0
29}
30
31declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i32.i32(<vscale x 1 x i8>, <vscale x 1 x i8>, i32, i32 immarg, i32)
32
33define <vscale x 2 x i8> @test_vssrl_vv_u8mf4(<vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i32 %vl) {
34; CHECK-LABEL: test_vssrl_vv_u8mf4:
35; CHECK:       # %bb.0: # %entry
36; CHECK-NEXT:    csrwi vxrm, 0
37; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
38; CHECK-NEXT:    vssrl.vv v8, v8, v9
39; CHECK-NEXT:    ret
40entry:
41  %0 = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i32 0, i32 %vl)
42  ret <vscale x 2 x i8> %0
43}
44
45declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i32(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, i32 immarg, i32)
46
47define <vscale x 2 x i8> @test_vssrl_vx_u8mf4(<vscale x 2 x i8> %op1, i32 %shift, i32 %vl) {
48; CHECK-LABEL: test_vssrl_vx_u8mf4:
49; CHECK:       # %bb.0: # %entry
50; CHECK-NEXT:    csrwi vxrm, 0
51; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
52; CHECK-NEXT:    vssrl.vx v8, v8, a0
53; CHECK-NEXT:    ret
54entry:
55  %0 = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i32.i32(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, i32 %shift, i32 0, i32 %vl)
56  ret <vscale x 2 x i8> %0
57}
58
59declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i32.i32(<vscale x 2 x i8>, <vscale x 2 x i8>, i32, i32 immarg, i32)
60
61define <vscale x 4 x i8> @test_vssrl_vv_u8mf2(<vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i32 %vl) {
62; CHECK-LABEL: test_vssrl_vv_u8mf2:
63; CHECK:       # %bb.0: # %entry
64; CHECK-NEXT:    csrwi vxrm, 0
65; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
66; CHECK-NEXT:    vssrl.vv v8, v8, v9
67; CHECK-NEXT:    ret
68entry:
69  %0 = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i32 0, i32 %vl)
70  ret <vscale x 4 x i8> %0
71}
72
73declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i32(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, i32 immarg, i32)
74
75define <vscale x 4 x i8> @test_vssrl_vx_u8mf2(<vscale x 4 x i8> %op1, i32 %shift, i32 %vl) {
76; CHECK-LABEL: test_vssrl_vx_u8mf2:
77; CHECK:       # %bb.0: # %entry
78; CHECK-NEXT:    csrwi vxrm, 0
79; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
80; CHECK-NEXT:    vssrl.vx v8, v8, a0
81; CHECK-NEXT:    ret
82entry:
83  %0 = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i32.i32(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, i32 %shift, i32 0, i32 %vl)
84  ret <vscale x 4 x i8> %0
85}
86
87declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i32.i32(<vscale x 4 x i8>, <vscale x 4 x i8>, i32, i32 immarg, i32)
88
89define <vscale x 8 x i8> @test_vssrl_vv_u8m1(<vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i32 %vl) {
90; CHECK-LABEL: test_vssrl_vv_u8m1:
91; CHECK:       # %bb.0: # %entry
92; CHECK-NEXT:    csrwi vxrm, 0
93; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
94; CHECK-NEXT:    vssrl.vv v8, v8, v9
95; CHECK-NEXT:    ret
96entry:
97  %0 = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i32 0, i32 %vl)
98  ret <vscale x 8 x i8> %0
99}
100
101declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i32(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, i32 immarg, i32)
102
103define <vscale x 8 x i8> @test_vssrl_vx_u8m1(<vscale x 8 x i8> %op1, i32 %shift, i32 %vl) {
104; CHECK-LABEL: test_vssrl_vx_u8m1:
105; CHECK:       # %bb.0: # %entry
106; CHECK-NEXT:    csrwi vxrm, 0
107; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
108; CHECK-NEXT:    vssrl.vx v8, v8, a0
109; CHECK-NEXT:    ret
110entry:
111  %0 = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i32.i32(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, i32 %shift, i32 0, i32 %vl)
112  ret <vscale x 8 x i8> %0
113}
114
115declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i32.i32(<vscale x 8 x i8>, <vscale x 8 x i8>, i32, i32 immarg, i32)
116
117define <vscale x 16 x i8> @test_vssrl_vv_u8m2(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i32 %vl) {
118; CHECK-LABEL: test_vssrl_vv_u8m2:
119; CHECK:       # %bb.0: # %entry
120; CHECK-NEXT:    csrwi vxrm, 0
121; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
122; CHECK-NEXT:    vssrl.vv v8, v8, v10
123; CHECK-NEXT:    ret
124entry:
125  %0 = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i32 0, i32 %vl)
126  ret <vscale x 16 x i8> %0
127}
128
129declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i32(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, i32 immarg, i32)
130
131define <vscale x 16 x i8> @test_vssrl_vx_u8m2(<vscale x 16 x i8> %op1, i32 %shift, i32 %vl) {
132; CHECK-LABEL: test_vssrl_vx_u8m2:
133; CHECK:       # %bb.0: # %entry
134; CHECK-NEXT:    csrwi vxrm, 0
135; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
136; CHECK-NEXT:    vssrl.vx v8, v8, a0
137; CHECK-NEXT:    ret
138entry:
139  %0 = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i32.i32(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, i32 %shift, i32 0, i32 %vl)
140  ret <vscale x 16 x i8> %0
141}
142
143declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i32.i32(<vscale x 16 x i8>, <vscale x 16 x i8>, i32, i32 immarg, i32)
144
145define <vscale x 32 x i8> @test_vssrl_vv_u8m4(<vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i32 %vl) {
146; CHECK-LABEL: test_vssrl_vv_u8m4:
147; CHECK:       # %bb.0: # %entry
148; CHECK-NEXT:    csrwi vxrm, 0
149; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
150; CHECK-NEXT:    vssrl.vv v8, v8, v12
151; CHECK-NEXT:    ret
152entry:
153  %0 = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i32 0, i32 %vl)
154  ret <vscale x 32 x i8> %0
155}
156
157declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i32(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, i32 immarg, i32)
158
159define <vscale x 32 x i8> @test_vssrl_vx_u8m4(<vscale x 32 x i8> %op1, i32 %shift, i32 %vl) {
160; CHECK-LABEL: test_vssrl_vx_u8m4:
161; CHECK:       # %bb.0: # %entry
162; CHECK-NEXT:    csrwi vxrm, 0
163; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
164; CHECK-NEXT:    vssrl.vx v8, v8, a0
165; CHECK-NEXT:    ret
166entry:
167  %0 = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i32.i32(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, i32 %shift, i32 0, i32 %vl)
168  ret <vscale x 32 x i8> %0
169}
170
171declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i32.i32(<vscale x 32 x i8>, <vscale x 32 x i8>, i32, i32 immarg, i32)
172
173define <vscale x 64 x i8> @test_vssrl_vv_u8m8(<vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i32 %vl) {
174; CHECK-LABEL: test_vssrl_vv_u8m8:
175; CHECK:       # %bb.0: # %entry
176; CHECK-NEXT:    csrwi vxrm, 0
177; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
178; CHECK-NEXT:    vssrl.vv v8, v8, v16
179; CHECK-NEXT:    ret
180entry:
181  %0 = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i32 0, i32 %vl)
182  ret <vscale x 64 x i8> %0
183}
184
185declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i32(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, i32 immarg, i32)
186
187define <vscale x 64 x i8> @test_vssrl_vx_u8m8(<vscale x 64 x i8> %op1, i32 %shift, i32 %vl) {
188; CHECK-LABEL: test_vssrl_vx_u8m8:
189; CHECK:       # %bb.0: # %entry
190; CHECK-NEXT:    csrwi vxrm, 0
191; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
192; CHECK-NEXT:    vssrl.vx v8, v8, a0
193; CHECK-NEXT:    ret
194entry:
195  %0 = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i32.i32(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, i32 %shift, i32 0, i32 %vl)
196  ret <vscale x 64 x i8> %0
197}
198
199declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i32.i32(<vscale x 64 x i8>, <vscale x 64 x i8>, i32, i32 immarg, i32)
200
201define <vscale x 1 x i16> @test_vssrl_vv_u16mf4(<vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i32 %vl) {
202; CHECK-LABEL: test_vssrl_vv_u16mf4:
203; CHECK:       # %bb.0: # %entry
204; CHECK-NEXT:    csrwi vxrm, 0
205; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
206; CHECK-NEXT:    vssrl.vv v8, v8, v9
207; CHECK-NEXT:    ret
208entry:
209  %0 = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i32 0, i32 %vl)
210  ret <vscale x 1 x i16> %0
211}
212
213declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i32(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, i32 immarg, i32)
214
215define <vscale x 1 x i16> @test_vssrl_vx_u16mf4(<vscale x 1 x i16> %op1, i32 %shift, i32 %vl) {
216; CHECK-LABEL: test_vssrl_vx_u16mf4:
217; CHECK:       # %bb.0: # %entry
218; CHECK-NEXT:    csrwi vxrm, 0
219; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
220; CHECK-NEXT:    vssrl.vx v8, v8, a0
221; CHECK-NEXT:    ret
222entry:
223  %0 = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i32.i32(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, i32 %shift, i32 0, i32 %vl)
224  ret <vscale x 1 x i16> %0
225}
226
227declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i32.i32(<vscale x 1 x i16>, <vscale x 1 x i16>, i32, i32 immarg, i32)
228
229define <vscale x 2 x i16> @test_vssrl_vv_u16mf2(<vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i32 %vl) {
230; CHECK-LABEL: test_vssrl_vv_u16mf2:
231; CHECK:       # %bb.0: # %entry
232; CHECK-NEXT:    csrwi vxrm, 0
233; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
234; CHECK-NEXT:    vssrl.vv v8, v8, v9
235; CHECK-NEXT:    ret
236entry:
237  %0 = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i32 0, i32 %vl)
238  ret <vscale x 2 x i16> %0
239}
240
241declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i32(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, i32 immarg, i32)
242
243define <vscale x 2 x i16> @test_vssrl_vx_u16mf2(<vscale x 2 x i16> %op1, i32 %shift, i32 %vl) {
244; CHECK-LABEL: test_vssrl_vx_u16mf2:
245; CHECK:       # %bb.0: # %entry
246; CHECK-NEXT:    csrwi vxrm, 0
247; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
248; CHECK-NEXT:    vssrl.vx v8, v8, a0
249; CHECK-NEXT:    ret
250entry:
251  %0 = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i32.i32(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, i32 %shift, i32 0, i32 %vl)
252  ret <vscale x 2 x i16> %0
253}
254
255declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i32.i32(<vscale x 2 x i16>, <vscale x 2 x i16>, i32, i32 immarg, i32)
256
257define <vscale x 4 x i16> @test_vssrl_vv_u16m1(<vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i32 %vl) {
258; CHECK-LABEL: test_vssrl_vv_u16m1:
259; CHECK:       # %bb.0: # %entry
260; CHECK-NEXT:    csrwi vxrm, 0
261; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
262; CHECK-NEXT:    vssrl.vv v8, v8, v9
263; CHECK-NEXT:    ret
264entry:
265  %0 = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i32 0, i32 %vl)
266  ret <vscale x 4 x i16> %0
267}
268
269declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i32(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, i32 immarg, i32)
270
271define <vscale x 4 x i16> @test_vssrl_vx_u16m1(<vscale x 4 x i16> %op1, i32 %shift, i32 %vl) {
272; CHECK-LABEL: test_vssrl_vx_u16m1:
273; CHECK:       # %bb.0: # %entry
274; CHECK-NEXT:    csrwi vxrm, 0
275; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
276; CHECK-NEXT:    vssrl.vx v8, v8, a0
277; CHECK-NEXT:    ret
278entry:
279  %0 = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i32.i32(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, i32 %shift, i32 0, i32 %vl)
280  ret <vscale x 4 x i16> %0
281}
282
283declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i32.i32(<vscale x 4 x i16>, <vscale x 4 x i16>, i32, i32 immarg, i32)
284
285define <vscale x 8 x i16> @test_vssrl_vv_u16m2(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i32 %vl) {
286; CHECK-LABEL: test_vssrl_vv_u16m2:
287; CHECK:       # %bb.0: # %entry
288; CHECK-NEXT:    csrwi vxrm, 0
289; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
290; CHECK-NEXT:    vssrl.vv v8, v8, v10
291; CHECK-NEXT:    ret
292entry:
293  %0 = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i32 0, i32 %vl)
294  ret <vscale x 8 x i16> %0
295}
296
297declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i32(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32 immarg, i32)
298
299define <vscale x 8 x i16> @test_vssrl_vx_u16m2(<vscale x 8 x i16> %op1, i32 %shift, i32 %vl) {
300; CHECK-LABEL: test_vssrl_vx_u16m2:
301; CHECK:       # %bb.0: # %entry
302; CHECK-NEXT:    csrwi vxrm, 0
303; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
304; CHECK-NEXT:    vssrl.vx v8, v8, a0
305; CHECK-NEXT:    ret
306entry:
307  %0 = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i32.i32(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, i32 %shift, i32 0, i32 %vl)
308  ret <vscale x 8 x i16> %0
309}
310
311declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i32.i32(<vscale x 8 x i16>, <vscale x 8 x i16>, i32, i32 immarg, i32)
312
313define <vscale x 16 x i16> @test_vssrl_vv_u16m4(<vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i32 %vl) {
314; CHECK-LABEL: test_vssrl_vv_u16m4:
315; CHECK:       # %bb.0: # %entry
316; CHECK-NEXT:    csrwi vxrm, 0
317; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
318; CHECK-NEXT:    vssrl.vv v8, v8, v12
319; CHECK-NEXT:    ret
320entry:
321  %0 = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i32 0, i32 %vl)
322  ret <vscale x 16 x i16> %0
323}
324
325declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i32(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, i32 immarg, i32)
326
327define <vscale x 16 x i16> @test_vssrl_vx_u16m4(<vscale x 16 x i16> %op1, i32 %shift, i32 %vl) {
328; CHECK-LABEL: test_vssrl_vx_u16m4:
329; CHECK:       # %bb.0: # %entry
330; CHECK-NEXT:    csrwi vxrm, 0
331; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
332; CHECK-NEXT:    vssrl.vx v8, v8, a0
333; CHECK-NEXT:    ret
334entry:
335  %0 = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i32.i32(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, i32 %shift, i32 0, i32 %vl)
336  ret <vscale x 16 x i16> %0
337}
338
339declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i32.i32(<vscale x 16 x i16>, <vscale x 16 x i16>, i32, i32 immarg, i32)
340
341define <vscale x 32 x i16> @test_vssrl_vv_u16m8(<vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i32 %vl) {
342; CHECK-LABEL: test_vssrl_vv_u16m8:
343; CHECK:       # %bb.0: # %entry
344; CHECK-NEXT:    csrwi vxrm, 0
345; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
346; CHECK-NEXT:    vssrl.vv v8, v8, v16
347; CHECK-NEXT:    ret
348entry:
349  %0 = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i32 0, i32 %vl)
350  ret <vscale x 32 x i16> %0
351}
352
353declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i32(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, i32 immarg, i32)
354
355define <vscale x 32 x i16> @test_vssrl_vx_u16m8(<vscale x 32 x i16> %op1, i32 %shift, i32 %vl) {
356; CHECK-LABEL: test_vssrl_vx_u16m8:
357; CHECK:       # %bb.0: # %entry
358; CHECK-NEXT:    csrwi vxrm, 0
359; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
360; CHECK-NEXT:    vssrl.vx v8, v8, a0
361; CHECK-NEXT:    ret
362entry:
363  %0 = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i32.i32(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, i32 %shift, i32 0, i32 %vl)
364  ret <vscale x 32 x i16> %0
365}
366
367declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i32.i32(<vscale x 32 x i16>, <vscale x 32 x i16>, i32, i32 immarg, i32)
368
369define <vscale x 1 x i32> @test_vssrl_vv_u32mf2(<vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i32 %vl) {
370; CHECK-LABEL: test_vssrl_vv_u32mf2:
371; CHECK:       # %bb.0: # %entry
372; CHECK-NEXT:    csrwi vxrm, 0
373; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
374; CHECK-NEXT:    vssrl.vv v8, v8, v9
375; CHECK-NEXT:    ret
376entry:
377  %0 = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i32 0, i32 %vl)
378  ret <vscale x 1 x i32> %0
379}
380
381declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, i32 immarg, i32)
382
383define <vscale x 1 x i32> @test_vssrl_vx_u32mf2(<vscale x 1 x i32> %op1, i32 %shift, i32 %vl) {
384; CHECK-LABEL: test_vssrl_vx_u32mf2:
385; CHECK:       # %bb.0: # %entry
386; CHECK-NEXT:    csrwi vxrm, 0
387; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
388; CHECK-NEXT:    vssrl.vx v8, v8, a0
389; CHECK-NEXT:    ret
390entry:
391  %0 = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32.i32(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, i32 %shift, i32 0, i32 %vl)
392  ret <vscale x 1 x i32> %0
393}
394
395declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32.i32(<vscale x 1 x i32>, <vscale x 1 x i32>, i32, i32 immarg, i32)
396
397define <vscale x 2 x i32> @test_vssrl_vv_u32m1(<vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i32 %vl) {
398; CHECK-LABEL: test_vssrl_vv_u32m1:
399; CHECK:       # %bb.0: # %entry
400; CHECK-NEXT:    csrwi vxrm, 0
401; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
402; CHECK-NEXT:    vssrl.vv v8, v8, v9
403; CHECK-NEXT:    ret
404entry:
405  %0 = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i32 0, i32 %vl)
406  ret <vscale x 2 x i32> %0
407}
408
409declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32 immarg, i32)
410
411define <vscale x 2 x i32> @test_vssrl_vx_u32m1(<vscale x 2 x i32> %op1, i32 %shift, i32 %vl) {
412; CHECK-LABEL: test_vssrl_vx_u32m1:
413; CHECK:       # %bb.0: # %entry
414; CHECK-NEXT:    csrwi vxrm, 0
415; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
416; CHECK-NEXT:    vssrl.vx v8, v8, a0
417; CHECK-NEXT:    ret
418entry:
419  %0 = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32.i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, i32 %shift, i32 0, i32 %vl)
420  ret <vscale x 2 x i32> %0
421}
422
423declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i32, i32 immarg, i32)
424
425define <vscale x 4 x i32> @test_vssrl_vv_u32m2(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i32 %vl) {
426; CHECK-LABEL: test_vssrl_vv_u32m2:
427; CHECK:       # %bb.0: # %entry
428; CHECK-NEXT:    csrwi vxrm, 0
429; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
430; CHECK-NEXT:    vssrl.vv v8, v8, v10
431; CHECK-NEXT:    ret
432entry:
433  %0 = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i32 0, i32 %vl)
434  ret <vscale x 4 x i32> %0
435}
436
437declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32 immarg, i32)
438
439define <vscale x 4 x i32> @test_vssrl_vx_u32m2(<vscale x 4 x i32> %op1, i32 %shift, i32 %vl) {
440; CHECK-LABEL: test_vssrl_vx_u32m2:
441; CHECK:       # %bb.0: # %entry
442; CHECK-NEXT:    csrwi vxrm, 0
443; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
444; CHECK-NEXT:    vssrl.vx v8, v8, a0
445; CHECK-NEXT:    ret
446entry:
447  %0 = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, i32 %shift, i32 0, i32 %vl)
448  ret <vscale x 4 x i32> %0
449}
450
451declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32.i32(<vscale x 4 x i32>, <vscale x 4 x i32>, i32, i32 immarg, i32)
452
453define <vscale x 8 x i32> @test_vssrl_vv_u32m4(<vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i32 %vl) {
454; CHECK-LABEL: test_vssrl_vv_u32m4:
455; CHECK:       # %bb.0: # %entry
456; CHECK-NEXT:    csrwi vxrm, 0
457; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
458; CHECK-NEXT:    vssrl.vv v8, v8, v12
459; CHECK-NEXT:    ret
460entry:
461  %0 = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i32 0, i32 %vl)
462  ret <vscale x 8 x i32> %0
463}
464
465declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, i32 immarg, i32)
466
467define <vscale x 8 x i32> @test_vssrl_vx_u32m4(<vscale x 8 x i32> %op1, i32 %shift, i32 %vl) {
468; CHECK-LABEL: test_vssrl_vx_u32m4:
469; CHECK:       # %bb.0: # %entry
470; CHECK-NEXT:    csrwi vxrm, 0
471; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
472; CHECK-NEXT:    vssrl.vx v8, v8, a0
473; CHECK-NEXT:    ret
474entry:
475  %0 = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32.i32(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, i32 %shift, i32 0, i32 %vl)
476  ret <vscale x 8 x i32> %0
477}
478
479declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32.i32(<vscale x 8 x i32>, <vscale x 8 x i32>, i32, i32 immarg, i32)
480
481define <vscale x 16 x i32> @test_vssrl_vv_u32m8(<vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i32 %vl) {
482; CHECK-LABEL: test_vssrl_vv_u32m8:
483; CHECK:       # %bb.0: # %entry
484; CHECK-NEXT:    csrwi vxrm, 0
485; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
486; CHECK-NEXT:    vssrl.vv v8, v8, v16
487; CHECK-NEXT:    ret
488entry:
489  %0 = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i32 0, i32 %vl)
490  ret <vscale x 16 x i32> %0
491}
492
493declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, i32 immarg, i32)
494
495define <vscale x 16 x i32> @test_vssrl_vx_u32m8(<vscale x 16 x i32> %op1, i32 %shift, i32 %vl) {
496; CHECK-LABEL: test_vssrl_vx_u32m8:
497; CHECK:       # %bb.0: # %entry
498; CHECK-NEXT:    csrwi vxrm, 0
499; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
500; CHECK-NEXT:    vssrl.vx v8, v8, a0
501; CHECK-NEXT:    ret
502entry:
503  %0 = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32.i32(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, i32 %shift, i32 0, i32 %vl)
504  ret <vscale x 16 x i32> %0
505}
506
507declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32.i32(<vscale x 16 x i32>, <vscale x 16 x i32>, i32, i32 immarg, i32)
508
509define <vscale x 1 x i64> @test_vssrl_vv_u64m1(<vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i32 %vl) {
510; CHECK-LABEL: test_vssrl_vv_u64m1:
511; CHECK:       # %bb.0: # %entry
512; CHECK-NEXT:    csrwi vxrm, 0
513; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
514; CHECK-NEXT:    vssrl.vv v8, v8, v9
515; CHECK-NEXT:    ret
516entry:
517  %0 = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i32 0, i32 %vl)
518  ret <vscale x 1 x i64> %0
519}
520
521declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i32 immarg, i32)
522
523define <vscale x 1 x i64> @test_vssrl_vx_u64m1(<vscale x 1 x i64> %op1, i32 %shift, i32 %vl) {
524; CHECK-LABEL: test_vssrl_vx_u64m1:
525; CHECK:       # %bb.0: # %entry
526; CHECK-NEXT:    csrwi vxrm, 0
527; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
528; CHECK-NEXT:    vssrl.vx v8, v8, a0
529; CHECK-NEXT:    ret
530entry:
531  %0 = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i32.i32(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, i32 %shift, i32 0, i32 %vl)
532  ret <vscale x 1 x i64> %0
533}
534
535declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i32.i32(<vscale x 1 x i64>, <vscale x 1 x i64>, i32, i32 immarg, i32)
536
537define <vscale x 2 x i64> @test_vssrl_vv_u64m2(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i32 %vl) {
538; CHECK-LABEL: test_vssrl_vv_u64m2:
539; CHECK:       # %bb.0: # %entry
540; CHECK-NEXT:    csrwi vxrm, 0
541; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
542; CHECK-NEXT:    vssrl.vv v8, v8, v10
543; CHECK-NEXT:    ret
544entry:
545  %0 = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i32 0, i32 %vl)
546  ret <vscale x 2 x i64> %0
547}
548
549declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32 immarg, i32)
550
551define <vscale x 2 x i64> @test_vssrl_vx_u64m2(<vscale x 2 x i64> %op1, i32 %shift, i32 %vl) {
552; CHECK-LABEL: test_vssrl_vx_u64m2:
553; CHECK:       # %bb.0: # %entry
554; CHECK-NEXT:    csrwi vxrm, 0
555; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
556; CHECK-NEXT:    vssrl.vx v8, v8, a0
557; CHECK-NEXT:    ret
558entry:
559  %0 = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i32.i32(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, i32 %shift, i32 0, i32 %vl)
560  ret <vscale x 2 x i64> %0
561}
562
563declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i32.i32(<vscale x 2 x i64>, <vscale x 2 x i64>, i32, i32 immarg, i32)
564
565define <vscale x 4 x i64> @test_vssrl_vv_u64m4(<vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i32 %vl) {
566; CHECK-LABEL: test_vssrl_vv_u64m4:
567; CHECK:       # %bb.0: # %entry
568; CHECK-NEXT:    csrwi vxrm, 0
569; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
570; CHECK-NEXT:    vssrl.vv v8, v8, v12
571; CHECK-NEXT:    ret
572entry:
573  %0 = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i32 0, i32 %vl)
574  ret <vscale x 4 x i64> %0
575}
576
577declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, i32 immarg, i32)
578
579define <vscale x 4 x i64> @test_vssrl_vx_u64m4(<vscale x 4 x i64> %op1, i32 %shift, i32 %vl) {
580; CHECK-LABEL: test_vssrl_vx_u64m4:
581; CHECK:       # %bb.0: # %entry
582; CHECK-NEXT:    csrwi vxrm, 0
583; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
584; CHECK-NEXT:    vssrl.vx v8, v8, a0
585; CHECK-NEXT:    ret
586entry:
587  %0 = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i32.i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, i32 %shift, i32 0, i32 %vl)
588  ret <vscale x 4 x i64> %0
589}
590
591declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i32.i32(<vscale x 4 x i64>, <vscale x 4 x i64>, i32, i32 immarg, i32)
592
593define <vscale x 8 x i64> @test_vssrl_vv_u64m8(<vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i32 %vl) {
594; CHECK-LABEL: test_vssrl_vv_u64m8:
595; CHECK:       # %bb.0: # %entry
596; CHECK-NEXT:    csrwi vxrm, 0
597; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
598; CHECK-NEXT:    vssrl.vv v8, v8, v16
599; CHECK-NEXT:    ret
600entry:
601  %0 = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i32 0, i32 %vl)
602  ret <vscale x 8 x i64> %0
603}
604
605declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, i32 immarg, i32)
606
607define <vscale x 8 x i64> @test_vssrl_vx_u64m8(<vscale x 8 x i64> %op1, i32 %shift, i32 %vl) {
608; CHECK-LABEL: test_vssrl_vx_u64m8:
609; CHECK:       # %bb.0: # %entry
610; CHECK-NEXT:    csrwi vxrm, 0
611; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
612; CHECK-NEXT:    vssrl.vx v8, v8, a0
613; CHECK-NEXT:    ret
614entry:
615  %0 = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i32.i32(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, i32 %shift, i32 0, i32 %vl)
616  ret <vscale x 8 x i64> %0
617}
618
619declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i32.i32(<vscale x 8 x i64>, <vscale x 8 x i64>, i32, i32 immarg, i32)
620
621define <vscale x 1 x i8> @test_vssrl_vv_u8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i32 %vl) {
622; CHECK-LABEL: test_vssrl_vv_u8mf8_m:
623; CHECK:       # %bb.0: # %entry
624; CHECK-NEXT:    csrwi vxrm, 0
625; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
626; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
627; CHECK-NEXT:    ret
628entry:
629  %0 = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
630  ret <vscale x 1 x i8> %0
631}
632
633declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
634
635define <vscale x 1 x i8> @test_vssrl_vx_u8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, i32 %shift, i32 %vl) {
636; CHECK-LABEL: test_vssrl_vx_u8mf8_m:
637; CHECK:       # %bb.0: # %entry
638; CHECK-NEXT:    csrwi vxrm, 0
639; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
640; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
641; CHECK-NEXT:    ret
642entry:
643  %0 = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i32.i32(<vscale x 1 x i8> poison, <vscale x 1 x i8> %op1, i32 %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
644  ret <vscale x 1 x i8> %0
645}
646
647declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i32.i32(<vscale x 1 x i8>, <vscale x 1 x i8>, i32, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
648
649define <vscale x 2 x i8> @test_vssrl_vv_u8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i32 %vl) {
650; CHECK-LABEL: test_vssrl_vv_u8mf4_m:
651; CHECK:       # %bb.0: # %entry
652; CHECK-NEXT:    csrwi vxrm, 0
653; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
654; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
655; CHECK-NEXT:    ret
656entry:
657  %0 = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
658  ret <vscale x 2 x i8> %0
659}
660
661declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
662
663define <vscale x 2 x i8> @test_vssrl_vx_u8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, i32 %shift, i32 %vl) {
664; CHECK-LABEL: test_vssrl_vx_u8mf4_m:
665; CHECK:       # %bb.0: # %entry
666; CHECK-NEXT:    csrwi vxrm, 0
667; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
668; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
669; CHECK-NEXT:    ret
670entry:
671  %0 = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i32.i32(<vscale x 2 x i8> poison, <vscale x 2 x i8> %op1, i32 %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
672  ret <vscale x 2 x i8> %0
673}
674
675declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i32.i32(<vscale x 2 x i8>, <vscale x 2 x i8>, i32, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
676
677define <vscale x 4 x i8> @test_vssrl_vv_u8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i32 %vl) {
678; CHECK-LABEL: test_vssrl_vv_u8mf2_m:
679; CHECK:       # %bb.0: # %entry
680; CHECK-NEXT:    csrwi vxrm, 0
681; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
682; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
683; CHECK-NEXT:    ret
684entry:
685  %0 = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
686  ret <vscale x 4 x i8> %0
687}
688
689declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
690
691define <vscale x 4 x i8> @test_vssrl_vx_u8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, i32 %shift, i32 %vl) {
692; CHECK-LABEL: test_vssrl_vx_u8mf2_m:
693; CHECK:       # %bb.0: # %entry
694; CHECK-NEXT:    csrwi vxrm, 0
695; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
696; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
697; CHECK-NEXT:    ret
698entry:
699  %0 = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i32.i32(<vscale x 4 x i8> poison, <vscale x 4 x i8> %op1, i32 %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
700  ret <vscale x 4 x i8> %0
701}
702
703declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i32.i32(<vscale x 4 x i8>, <vscale x 4 x i8>, i32, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
704
705define <vscale x 8 x i8> @test_vssrl_vv_u8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i32 %vl) {
706; CHECK-LABEL: test_vssrl_vv_u8m1_m:
707; CHECK:       # %bb.0: # %entry
708; CHECK-NEXT:    csrwi vxrm, 0
709; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
710; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
711; CHECK-NEXT:    ret
712entry:
713  %0 = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
714  ret <vscale x 8 x i8> %0
715}
716
717declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
718
719define <vscale x 8 x i8> @test_vssrl_vx_u8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, i32 %shift, i32 %vl) {
720; CHECK-LABEL: test_vssrl_vx_u8m1_m:
721; CHECK:       # %bb.0: # %entry
722; CHECK-NEXT:    csrwi vxrm, 0
723; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
724; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
725; CHECK-NEXT:    ret
726entry:
727  %0 = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i32.i32(<vscale x 8 x i8> poison, <vscale x 8 x i8> %op1, i32 %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
728  ret <vscale x 8 x i8> %0
729}
730
731declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i32.i32(<vscale x 8 x i8>, <vscale x 8 x i8>, i32, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
732
733define <vscale x 16 x i8> @test_vssrl_vv_u8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i32 %vl) {
734; CHECK-LABEL: test_vssrl_vv_u8m2_m:
735; CHECK:       # %bb.0: # %entry
736; CHECK-NEXT:    csrwi vxrm, 0
737; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
738; CHECK-NEXT:    vssrl.vv v8, v8, v10, v0.t
739; CHECK-NEXT:    ret
740entry:
741  %0 = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, <vscale x 16 x i1> %mask, i32 0, i32 %vl, i32 3)
742  ret <vscale x 16 x i8> %0
743}
744
745declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32 immarg, i32, i32 immarg)
746
747define <vscale x 16 x i8> @test_vssrl_vx_u8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, i32 %shift, i32 %vl) {
748; CHECK-LABEL: test_vssrl_vx_u8m2_m:
749; CHECK:       # %bb.0: # %entry
750; CHECK-NEXT:    csrwi vxrm, 0
751; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
752; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
753; CHECK-NEXT:    ret
754entry:
755  %0 = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i32.i32(<vscale x 16 x i8> poison, <vscale x 16 x i8> %op1, i32 %shift, <vscale x 16 x i1> %mask, i32 0, i32 %vl, i32 3)
756  ret <vscale x 16 x i8> %0
757}
758
759declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i32.i32(<vscale x 16 x i8>, <vscale x 16 x i8>, i32, <vscale x 16 x i1>, i32 immarg, i32, i32 immarg)
760
761define <vscale x 32 x i8> @test_vssrl_vv_u8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i32 %vl) {
762; CHECK-LABEL: test_vssrl_vv_u8m4_m:
763; CHECK:       # %bb.0: # %entry
764; CHECK-NEXT:    csrwi vxrm, 0
765; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
766; CHECK-NEXT:    vssrl.vv v8, v8, v12, v0.t
767; CHECK-NEXT:    ret
768entry:
769  %0 = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, <vscale x 32 x i1> %mask, i32 0, i32 %vl, i32 3)
770  ret <vscale x 32 x i8> %0
771}
772
773declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32 immarg, i32, i32 immarg)
774
775define <vscale x 32 x i8> @test_vssrl_vx_u8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, i32 %shift, i32 %vl) {
776; CHECK-LABEL: test_vssrl_vx_u8m4_m:
777; CHECK:       # %bb.0: # %entry
778; CHECK-NEXT:    csrwi vxrm, 0
779; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
780; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
781; CHECK-NEXT:    ret
782entry:
783  %0 = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i32.i32(<vscale x 32 x i8> poison, <vscale x 32 x i8> %op1, i32 %shift, <vscale x 32 x i1> %mask, i32 0, i32 %vl, i32 3)
784  ret <vscale x 32 x i8> %0
785}
786
787declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i32.i32(<vscale x 32 x i8>, <vscale x 32 x i8>, i32, <vscale x 32 x i1>, i32 immarg, i32, i32 immarg)
788
789define <vscale x 64 x i8> @test_vssrl_vv_u8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i32 %vl) {
790; CHECK-LABEL: test_vssrl_vv_u8m8_m:
791; CHECK:       # %bb.0: # %entry
792; CHECK-NEXT:    csrwi vxrm, 0
793; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
794; CHECK-NEXT:    vssrl.vv v8, v8, v16, v0.t
795; CHECK-NEXT:    ret
796entry:
797  %0 = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, <vscale x 64 x i1> %mask, i32 0, i32 %vl, i32 3)
798  ret <vscale x 64 x i8> %0
799}
800
801declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32 immarg, i32, i32 immarg)
802
803define <vscale x 64 x i8> @test_vssrl_vx_u8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, i32 %shift, i32 %vl) {
804; CHECK-LABEL: test_vssrl_vx_u8m8_m:
805; CHECK:       # %bb.0: # %entry
806; CHECK-NEXT:    csrwi vxrm, 0
807; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
808; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
809; CHECK-NEXT:    ret
810entry:
811  %0 = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i32.i32(<vscale x 64 x i8> poison, <vscale x 64 x i8> %op1, i32 %shift, <vscale x 64 x i1> %mask, i32 0, i32 %vl, i32 3)
812  ret <vscale x 64 x i8> %0
813}
814
815declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i32.i32(<vscale x 64 x i8>, <vscale x 64 x i8>, i32, <vscale x 64 x i1>, i32 immarg, i32, i32 immarg)
816
817define <vscale x 1 x i16> @test_vssrl_vv_u16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i32 %vl) {
818; CHECK-LABEL: test_vssrl_vv_u16mf4_m:
819; CHECK:       # %bb.0: # %entry
820; CHECK-NEXT:    csrwi vxrm, 0
821; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
822; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
823; CHECK-NEXT:    ret
824entry:
825  %0 = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
826  ret <vscale x 1 x i16> %0
827}
828
829declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
830
831define <vscale x 1 x i16> @test_vssrl_vx_u16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, i32 %shift, i32 %vl) {
832; CHECK-LABEL: test_vssrl_vx_u16mf4_m:
833; CHECK:       # %bb.0: # %entry
834; CHECK-NEXT:    csrwi vxrm, 0
835; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
836; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
837; CHECK-NEXT:    ret
838entry:
839  %0 = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i32.i32(<vscale x 1 x i16> poison, <vscale x 1 x i16> %op1, i32 %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
840  ret <vscale x 1 x i16> %0
841}
842
843declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i32.i32(<vscale x 1 x i16>, <vscale x 1 x i16>, i32, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
844
845define <vscale x 2 x i16> @test_vssrl_vv_u16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i32 %vl) {
846; CHECK-LABEL: test_vssrl_vv_u16mf2_m:
847; CHECK:       # %bb.0: # %entry
848; CHECK-NEXT:    csrwi vxrm, 0
849; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
850; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
851; CHECK-NEXT:    ret
852entry:
853  %0 = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
854  ret <vscale x 2 x i16> %0
855}
856
857declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
858
859define <vscale x 2 x i16> @test_vssrl_vx_u16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, i32 %shift, i32 %vl) {
860; CHECK-LABEL: test_vssrl_vx_u16mf2_m:
861; CHECK:       # %bb.0: # %entry
862; CHECK-NEXT:    csrwi vxrm, 0
863; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
864; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
865; CHECK-NEXT:    ret
866entry:
867  %0 = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i32.i32(<vscale x 2 x i16> poison, <vscale x 2 x i16> %op1, i32 %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
868  ret <vscale x 2 x i16> %0
869}
870
871declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i32.i32(<vscale x 2 x i16>, <vscale x 2 x i16>, i32, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
872
873define <vscale x 4 x i16> @test_vssrl_vv_u16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i32 %vl) {
874; CHECK-LABEL: test_vssrl_vv_u16m1_m:
875; CHECK:       # %bb.0: # %entry
876; CHECK-NEXT:    csrwi vxrm, 0
877; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
878; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
879; CHECK-NEXT:    ret
880entry:
881  %0 = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
882  ret <vscale x 4 x i16> %0
883}
884
885declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
886
887define <vscale x 4 x i16> @test_vssrl_vx_u16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, i32 %shift, i32 %vl) {
888; CHECK-LABEL: test_vssrl_vx_u16m1_m:
889; CHECK:       # %bb.0: # %entry
890; CHECK-NEXT:    csrwi vxrm, 0
891; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
892; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
893; CHECK-NEXT:    ret
894entry:
895  %0 = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i32.i32(<vscale x 4 x i16> poison, <vscale x 4 x i16> %op1, i32 %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
896  ret <vscale x 4 x i16> %0
897}
898
899declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i32.i32(<vscale x 4 x i16>, <vscale x 4 x i16>, i32, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
900
901define <vscale x 8 x i16> @test_vssrl_vv_u16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i32 %vl) {
902; CHECK-LABEL: test_vssrl_vv_u16m2_m:
903; CHECK:       # %bb.0: # %entry
904; CHECK-NEXT:    csrwi vxrm, 0
905; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
906; CHECK-NEXT:    vssrl.vv v8, v8, v10, v0.t
907; CHECK-NEXT:    ret
908entry:
909  %0 = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
910  ret <vscale x 8 x i16> %0
911}
912
913declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
914
915define <vscale x 8 x i16> @test_vssrl_vx_u16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, i32 %shift, i32 %vl) {
916; CHECK-LABEL: test_vssrl_vx_u16m2_m:
917; CHECK:       # %bb.0: # %entry
918; CHECK-NEXT:    csrwi vxrm, 0
919; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
920; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
921; CHECK-NEXT:    ret
922entry:
923  %0 = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i32.i32(<vscale x 8 x i16> poison, <vscale x 8 x i16> %op1, i32 %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
924  ret <vscale x 8 x i16> %0
925}
926
927declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i32.i32(<vscale x 8 x i16>, <vscale x 8 x i16>, i32, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
928
929define <vscale x 16 x i16> @test_vssrl_vv_u16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i32 %vl) {
930; CHECK-LABEL: test_vssrl_vv_u16m4_m:
931; CHECK:       # %bb.0: # %entry
932; CHECK-NEXT:    csrwi vxrm, 0
933; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
934; CHECK-NEXT:    vssrl.vv v8, v8, v12, v0.t
935; CHECK-NEXT:    ret
936entry:
937  %0 = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, <vscale x 16 x i1> %mask, i32 0, i32 %vl, i32 3)
938  ret <vscale x 16 x i16> %0
939}
940
941declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32 immarg, i32, i32 immarg)
942
943define <vscale x 16 x i16> @test_vssrl_vx_u16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, i32 %shift, i32 %vl) {
944; CHECK-LABEL: test_vssrl_vx_u16m4_m:
945; CHECK:       # %bb.0: # %entry
946; CHECK-NEXT:    csrwi vxrm, 0
947; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
948; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
949; CHECK-NEXT:    ret
950entry:
951  %0 = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i32.i32(<vscale x 16 x i16> poison, <vscale x 16 x i16> %op1, i32 %shift, <vscale x 16 x i1> %mask, i32 0, i32 %vl, i32 3)
952  ret <vscale x 16 x i16> %0
953}
954
955declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i32.i32(<vscale x 16 x i16>, <vscale x 16 x i16>, i32, <vscale x 16 x i1>, i32 immarg, i32, i32 immarg)
956
957define <vscale x 32 x i16> @test_vssrl_vv_u16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i32 %vl) {
958; CHECK-LABEL: test_vssrl_vv_u16m8_m:
959; CHECK:       # %bb.0: # %entry
960; CHECK-NEXT:    csrwi vxrm, 0
961; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
962; CHECK-NEXT:    vssrl.vv v8, v8, v16, v0.t
963; CHECK-NEXT:    ret
964entry:
965  %0 = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, <vscale x 32 x i1> %mask, i32 0, i32 %vl, i32 3)
966  ret <vscale x 32 x i16> %0
967}
968
969declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32 immarg, i32, i32 immarg)
970
971define <vscale x 32 x i16> @test_vssrl_vx_u16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, i32 %shift, i32 %vl) {
972; CHECK-LABEL: test_vssrl_vx_u16m8_m:
973; CHECK:       # %bb.0: # %entry
974; CHECK-NEXT:    csrwi vxrm, 0
975; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
976; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
977; CHECK-NEXT:    ret
978entry:
979  %0 = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i32.i32(<vscale x 32 x i16> poison, <vscale x 32 x i16> %op1, i32 %shift, <vscale x 32 x i1> %mask, i32 0, i32 %vl, i32 3)
980  ret <vscale x 32 x i16> %0
981}
982
983declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i32.i32(<vscale x 32 x i16>, <vscale x 32 x i16>, i32, <vscale x 32 x i1>, i32 immarg, i32, i32 immarg)
984
985define <vscale x 1 x i32> @test_vssrl_vv_u32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i32 %vl) {
986; CHECK-LABEL: test_vssrl_vv_u32mf2_m:
987; CHECK:       # %bb.0: # %entry
988; CHECK-NEXT:    csrwi vxrm, 0
989; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
990; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
991; CHECK-NEXT:    ret
992entry:
993  %0 = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
994  ret <vscale x 1 x i32> %0
995}
996
997declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
998
999define <vscale x 1 x i32> @test_vssrl_vx_u32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, i32 %shift, i32 %vl) {
1000; CHECK-LABEL: test_vssrl_vx_u32mf2_m:
1001; CHECK:       # %bb.0: # %entry
1002; CHECK-NEXT:    csrwi vxrm, 0
1003; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1004; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
1005; CHECK-NEXT:    ret
1006entry:
1007  %0 = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32.i32(<vscale x 1 x i32> poison, <vscale x 1 x i32> %op1, i32 %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
1008  ret <vscale x 1 x i32> %0
1009}
1010
1011declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32.i32(<vscale x 1 x i32>, <vscale x 1 x i32>, i32, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
1012
1013define <vscale x 2 x i32> @test_vssrl_vv_u32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i32 %vl) {
1014; CHECK-LABEL: test_vssrl_vv_u32m1_m:
1015; CHECK:       # %bb.0: # %entry
1016; CHECK-NEXT:    csrwi vxrm, 0
1017; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
1018; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
1019; CHECK-NEXT:    ret
1020entry:
1021  %0 = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
1022  ret <vscale x 2 x i32> %0
1023}
1024
1025declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
1026
1027define <vscale x 2 x i32> @test_vssrl_vx_u32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, i32 %shift, i32 %vl) {
1028; CHECK-LABEL: test_vssrl_vx_u32m1_m:
1029; CHECK:       # %bb.0: # %entry
1030; CHECK-NEXT:    csrwi vxrm, 0
1031; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1032; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
1033; CHECK-NEXT:    ret
1034entry:
1035  %0 = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32.i32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %op1, i32 %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
1036  ret <vscale x 2 x i32> %0
1037}
1038
1039declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i32, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
1040
1041define <vscale x 4 x i32> @test_vssrl_vv_u32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i32 %vl) {
1042; CHECK-LABEL: test_vssrl_vv_u32m2_m:
1043; CHECK:       # %bb.0: # %entry
1044; CHECK-NEXT:    csrwi vxrm, 0
1045; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
1046; CHECK-NEXT:    vssrl.vv v8, v8, v10, v0.t
1047; CHECK-NEXT:    ret
1048entry:
1049  %0 = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
1050  ret <vscale x 4 x i32> %0
1051}
1052
1053declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
1054
1055define <vscale x 4 x i32> @test_vssrl_vx_u32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, i32 %shift, i32 %vl) {
1056; CHECK-LABEL: test_vssrl_vx_u32m2_m:
1057; CHECK:       # %bb.0: # %entry
1058; CHECK-NEXT:    csrwi vxrm, 0
1059; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1060; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
1061; CHECK-NEXT:    ret
1062entry:
1063  %0 = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %op1, i32 %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
1064  ret <vscale x 4 x i32> %0
1065}
1066
1067declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32.i32(<vscale x 4 x i32>, <vscale x 4 x i32>, i32, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
1068
1069define <vscale x 8 x i32> @test_vssrl_vv_u32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i32 %vl) {
1070; CHECK-LABEL: test_vssrl_vv_u32m4_m:
1071; CHECK:       # %bb.0: # %entry
1072; CHECK-NEXT:    csrwi vxrm, 0
1073; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
1074; CHECK-NEXT:    vssrl.vv v8, v8, v12, v0.t
1075; CHECK-NEXT:    ret
1076entry:
1077  %0 = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
1078  ret <vscale x 8 x i32> %0
1079}
1080
1081declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
1082
1083define <vscale x 8 x i32> @test_vssrl_vx_u32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, i32 %shift, i32 %vl) {
1084; CHECK-LABEL: test_vssrl_vx_u32m4_m:
1085; CHECK:       # %bb.0: # %entry
1086; CHECK-NEXT:    csrwi vxrm, 0
1087; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1088; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
1089; CHECK-NEXT:    ret
1090entry:
1091  %0 = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32.i32(<vscale x 8 x i32> poison, <vscale x 8 x i32> %op1, i32 %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
1092  ret <vscale x 8 x i32> %0
1093}
1094
1095declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32.i32(<vscale x 8 x i32>, <vscale x 8 x i32>, i32, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
1096
1097define <vscale x 16 x i32> @test_vssrl_vv_u32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i32 %vl) {
1098; CHECK-LABEL: test_vssrl_vv_u32m8_m:
1099; CHECK:       # %bb.0: # %entry
1100; CHECK-NEXT:    csrwi vxrm, 0
1101; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
1102; CHECK-NEXT:    vssrl.vv v8, v8, v16, v0.t
1103; CHECK-NEXT:    ret
1104entry:
1105  %0 = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, <vscale x 16 x i1> %mask, i32 0, i32 %vl, i32 3)
1106  ret <vscale x 16 x i32> %0
1107}
1108
1109declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32 immarg, i32, i32 immarg)
1110
1111define <vscale x 16 x i32> @test_vssrl_vx_u32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, i32 %shift, i32 %vl) {
1112; CHECK-LABEL: test_vssrl_vx_u32m8_m:
1113; CHECK:       # %bb.0: # %entry
1114; CHECK-NEXT:    csrwi vxrm, 0
1115; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1116; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
1117; CHECK-NEXT:    ret
1118entry:
1119  %0 = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32.i32(<vscale x 16 x i32> poison, <vscale x 16 x i32> %op1, i32 %shift, <vscale x 16 x i1> %mask, i32 0, i32 %vl, i32 3)
1120  ret <vscale x 16 x i32> %0
1121}
1122
1123declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32.i32(<vscale x 16 x i32>, <vscale x 16 x i32>, i32, <vscale x 16 x i1>, i32 immarg, i32, i32 immarg)
1124
1125define <vscale x 1 x i64> @test_vssrl_vv_u64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i32 %vl) {
1126; CHECK-LABEL: test_vssrl_vv_u64m1_m:
1127; CHECK:       # %bb.0: # %entry
1128; CHECK-NEXT:    csrwi vxrm, 0
1129; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1130; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
1131; CHECK-NEXT:    ret
1132entry:
1133  %0 = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
1134  ret <vscale x 1 x i64> %0
1135}
1136
1137declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
1138
1139define <vscale x 1 x i64> @test_vssrl_vx_u64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, i32 %shift, i32 %vl) {
1140; CHECK-LABEL: test_vssrl_vx_u64m1_m:
1141; CHECK:       # %bb.0: # %entry
1142; CHECK-NEXT:    csrwi vxrm, 0
1143; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1144; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
1145; CHECK-NEXT:    ret
1146entry:
1147  %0 = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i32.i32(<vscale x 1 x i64> poison, <vscale x 1 x i64> %op1, i32 %shift, <vscale x 1 x i1> %mask, i32 0, i32 %vl, i32 3)
1148  ret <vscale x 1 x i64> %0
1149}
1150
1151declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i32.i32(<vscale x 1 x i64>, <vscale x 1 x i64>, i32, <vscale x 1 x i1>, i32 immarg, i32, i32 immarg)
1152
1153define <vscale x 2 x i64> @test_vssrl_vv_u64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i32 %vl) {
1154; CHECK-LABEL: test_vssrl_vv_u64m2_m:
1155; CHECK:       # %bb.0: # %entry
1156; CHECK-NEXT:    csrwi vxrm, 0
1157; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1158; CHECK-NEXT:    vssrl.vv v8, v8, v10, v0.t
1159; CHECK-NEXT:    ret
1160entry:
1161  %0 = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
1162  ret <vscale x 2 x i64> %0
1163}
1164
1165declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
1166
1167define <vscale x 2 x i64> @test_vssrl_vx_u64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, i32 %shift, i32 %vl) {
1168; CHECK-LABEL: test_vssrl_vx_u64m2_m:
1169; CHECK:       # %bb.0: # %entry
1170; CHECK-NEXT:    csrwi vxrm, 0
1171; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1172; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
1173; CHECK-NEXT:    ret
1174entry:
1175  %0 = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i32.i32(<vscale x 2 x i64> poison, <vscale x 2 x i64> %op1, i32 %shift, <vscale x 2 x i1> %mask, i32 0, i32 %vl, i32 3)
1176  ret <vscale x 2 x i64> %0
1177}
1178
1179declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i32.i32(<vscale x 2 x i64>, <vscale x 2 x i64>, i32, <vscale x 2 x i1>, i32 immarg, i32, i32 immarg)
1180
1181define <vscale x 4 x i64> @test_vssrl_vv_u64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i32 %vl) {
1182; CHECK-LABEL: test_vssrl_vv_u64m4_m:
1183; CHECK:       # %bb.0: # %entry
1184; CHECK-NEXT:    csrwi vxrm, 0
1185; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1186; CHECK-NEXT:    vssrl.vv v8, v8, v12, v0.t
1187; CHECK-NEXT:    ret
1188entry:
1189  %0 = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
1190  ret <vscale x 4 x i64> %0
1191}
1192
1193declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
1194
1195define <vscale x 4 x i64> @test_vssrl_vx_u64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, i32 %shift, i32 %vl) {
1196; CHECK-LABEL: test_vssrl_vx_u64m4_m:
1197; CHECK:       # %bb.0: # %entry
1198; CHECK-NEXT:    csrwi vxrm, 0
1199; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1200; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
1201; CHECK-NEXT:    ret
1202entry:
1203  %0 = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i32.i32(<vscale x 4 x i64> poison, <vscale x 4 x i64> %op1, i32 %shift, <vscale x 4 x i1> %mask, i32 0, i32 %vl, i32 3)
1204  ret <vscale x 4 x i64> %0
1205}
1206
1207declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i32.i32(<vscale x 4 x i64>, <vscale x 4 x i64>, i32, <vscale x 4 x i1>, i32 immarg, i32, i32 immarg)
1208
1209define <vscale x 8 x i64> @test_vssrl_vv_u64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i32 %vl) {
1210; CHECK-LABEL: test_vssrl_vv_u64m8_m:
1211; CHECK:       # %bb.0: # %entry
1212; CHECK-NEXT:    csrwi vxrm, 0
1213; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1214; CHECK-NEXT:    vssrl.vv v8, v8, v16, v0.t
1215; CHECK-NEXT:    ret
1216entry:
1217  %0 = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
1218  ret <vscale x 8 x i64> %0
1219}
1220
1221declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
1222
1223define <vscale x 8 x i64> @test_vssrl_vx_u64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, i32 %shift, i32 %vl) {
1224; CHECK-LABEL: test_vssrl_vx_u64m8_m:
1225; CHECK:       # %bb.0: # %entry
1226; CHECK-NEXT:    csrwi vxrm, 0
1227; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1228; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
1229; CHECK-NEXT:    ret
1230entry:
1231  %0 = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i32.i32(<vscale x 8 x i64> poison, <vscale x 8 x i64> %op1, i32 %shift, <vscale x 8 x i1> %mask, i32 0, i32 %vl, i32 3)
1232  ret <vscale x 8 x i64> %0
1233}
1234
1235declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i32.i32(<vscale x 8 x i64>, <vscale x 8 x i64>, i32, <vscale x 8 x i1>, i32 immarg, i32, i32 immarg)
1236