xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in.ll (revision 2967e5f8007d873a3e9d97870d2461d0827a3976)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
8  <vscale x 1 x i8>,
9  <vscale x 1 x i8>,
10  <vscale x 1 x i1>,
11  iXLen);
12
13define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
14; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
17; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
18; CHECK-NEXT:    vmv1r.v v0, v10
19; CHECK-NEXT:    ret
20entry:
21  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
22    <vscale x 1 x i8> %0,
23    <vscale x 1 x i8> %1,
24    <vscale x 1 x i1> %2,
25    iXLen %3)
26
27  ret <vscale x 1 x i1> %a
28}
29
30declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8(
31  <vscale x 2 x i8>,
32  <vscale x 2 x i8>,
33  <vscale x 2 x i1>,
34  iXLen);
35
36define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
37; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8:
38; CHECK:       # %bb.0: # %entry
39; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
40; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
41; CHECK-NEXT:    vmv1r.v v0, v10
42; CHECK-NEXT:    ret
43entry:
44  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8(
45    <vscale x 2 x i8> %0,
46    <vscale x 2 x i8> %1,
47    <vscale x 2 x i1> %2,
48    iXLen %3)
49
50  ret <vscale x 2 x i1> %a
51}
52
53declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8(
54  <vscale x 4 x i8>,
55  <vscale x 4 x i8>,
56  <vscale x 4 x i1>,
57  iXLen);
58
59define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
60; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8:
61; CHECK:       # %bb.0: # %entry
62; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
63; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
64; CHECK-NEXT:    vmv1r.v v0, v10
65; CHECK-NEXT:    ret
66entry:
67  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8(
68    <vscale x 4 x i8> %0,
69    <vscale x 4 x i8> %1,
70    <vscale x 4 x i1> %2,
71    iXLen %3)
72
73  ret <vscale x 4 x i1> %a
74}
75
76declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8(
77  <vscale x 8 x i8>,
78  <vscale x 8 x i8>,
79  <vscale x 8 x i1>,
80  iXLen);
81
82define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
83; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8:
84; CHECK:       # %bb.0: # %entry
85; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
86; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
87; CHECK-NEXT:    vmv.v.v v0, v10
88; CHECK-NEXT:    ret
89entry:
90  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8(
91    <vscale x 8 x i8> %0,
92    <vscale x 8 x i8> %1,
93    <vscale x 8 x i1> %2,
94    iXLen %3)
95
96  ret <vscale x 8 x i1> %a
97}
98
99declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8(
100  <vscale x 16 x i8>,
101  <vscale x 16 x i8>,
102  <vscale x 16 x i1>,
103  iXLen);
104
105define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
106; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8:
107; CHECK:       # %bb.0: # %entry
108; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
109; CHECK-NEXT:    vmsbc.vvm v12, v8, v10, v0
110; CHECK-NEXT:    vmv1r.v v0, v12
111; CHECK-NEXT:    ret
112entry:
113  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8(
114    <vscale x 16 x i8> %0,
115    <vscale x 16 x i8> %1,
116    <vscale x 16 x i1> %2,
117    iXLen %3)
118
119  ret <vscale x 16 x i1> %a
120}
121
122declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8(
123  <vscale x 32 x i8>,
124  <vscale x 32 x i8>,
125  <vscale x 32 x i1>,
126  iXLen);
127
128define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
129; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8:
130; CHECK:       # %bb.0: # %entry
131; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
132; CHECK-NEXT:    vmsbc.vvm v16, v8, v12, v0
133; CHECK-NEXT:    vmv1r.v v0, v16
134; CHECK-NEXT:    ret
135entry:
136  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8(
137    <vscale x 32 x i8> %0,
138    <vscale x 32 x i8> %1,
139    <vscale x 32 x i1> %2,
140    iXLen %3)
141
142  ret <vscale x 32 x i1> %a
143}
144
145declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8(
146  <vscale x 64 x i8>,
147  <vscale x 64 x i8>,
148  <vscale x 64 x i1>,
149  iXLen);
150
151define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
152; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8:
153; CHECK:       # %bb.0: # %entry
154; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
155; CHECK-NEXT:    vmsbc.vvm v24, v8, v16, v0
156; CHECK-NEXT:    vmv1r.v v0, v24
157; CHECK-NEXT:    ret
158entry:
159  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8(
160    <vscale x 64 x i8> %0,
161    <vscale x 64 x i8> %1,
162    <vscale x 64 x i1> %2,
163    iXLen %3)
164
165  ret <vscale x 64 x i1> %a
166}
167
168declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16(
169  <vscale x 1 x i16>,
170  <vscale x 1 x i16>,
171  <vscale x 1 x i1>,
172  iXLen);
173
174define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
175; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16:
176; CHECK:       # %bb.0: # %entry
177; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
178; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
179; CHECK-NEXT:    vmv1r.v v0, v10
180; CHECK-NEXT:    ret
181entry:
182  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16(
183    <vscale x 1 x i16> %0,
184    <vscale x 1 x i16> %1,
185    <vscale x 1 x i1> %2,
186    iXLen %3)
187
188  ret <vscale x 1 x i1> %a
189}
190
191declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16(
192  <vscale x 2 x i16>,
193  <vscale x 2 x i16>,
194  <vscale x 2 x i1>,
195  iXLen);
196
197define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
198; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16:
199; CHECK:       # %bb.0: # %entry
200; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
201; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
202; CHECK-NEXT:    vmv1r.v v0, v10
203; CHECK-NEXT:    ret
204entry:
205  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16(
206    <vscale x 2 x i16> %0,
207    <vscale x 2 x i16> %1,
208    <vscale x 2 x i1> %2,
209    iXLen %3)
210
211  ret <vscale x 2 x i1> %a
212}
213
214declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16(
215  <vscale x 4 x i16>,
216  <vscale x 4 x i16>,
217  <vscale x 4 x i1>,
218  iXLen);
219
220define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
221; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16:
222; CHECK:       # %bb.0: # %entry
223; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
224; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
225; CHECK-NEXT:    vmv.v.v v0, v10
226; CHECK-NEXT:    ret
227entry:
228  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16(
229    <vscale x 4 x i16> %0,
230    <vscale x 4 x i16> %1,
231    <vscale x 4 x i1> %2,
232    iXLen %3)
233
234  ret <vscale x 4 x i1> %a
235}
236
237declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16(
238  <vscale x 8 x i16>,
239  <vscale x 8 x i16>,
240  <vscale x 8 x i1>,
241  iXLen);
242
243define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
244; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16:
245; CHECK:       # %bb.0: # %entry
246; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
247; CHECK-NEXT:    vmsbc.vvm v12, v8, v10, v0
248; CHECK-NEXT:    vmv1r.v v0, v12
249; CHECK-NEXT:    ret
250entry:
251  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16(
252    <vscale x 8 x i16> %0,
253    <vscale x 8 x i16> %1,
254    <vscale x 8 x i1> %2,
255    iXLen %3)
256
257  ret <vscale x 8 x i1> %a
258}
259
260declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16(
261  <vscale x 16 x i16>,
262  <vscale x 16 x i16>,
263  <vscale x 16 x i1>,
264  iXLen);
265
266define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
267; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16:
268; CHECK:       # %bb.0: # %entry
269; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
270; CHECK-NEXT:    vmsbc.vvm v16, v8, v12, v0
271; CHECK-NEXT:    vmv1r.v v0, v16
272; CHECK-NEXT:    ret
273entry:
274  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16(
275    <vscale x 16 x i16> %0,
276    <vscale x 16 x i16> %1,
277    <vscale x 16 x i1> %2,
278    iXLen %3)
279
280  ret <vscale x 16 x i1> %a
281}
282
283declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16(
284  <vscale x 32 x i16>,
285  <vscale x 32 x i16>,
286  <vscale x 32 x i1>,
287  iXLen);
288
289define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
290; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16:
291; CHECK:       # %bb.0: # %entry
292; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
293; CHECK-NEXT:    vmsbc.vvm v24, v8, v16, v0
294; CHECK-NEXT:    vmv1r.v v0, v24
295; CHECK-NEXT:    ret
296entry:
297  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16(
298    <vscale x 32 x i16> %0,
299    <vscale x 32 x i16> %1,
300    <vscale x 32 x i1> %2,
301    iXLen %3)
302
303  ret <vscale x 32 x i1> %a
304}
305
306declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32(
307  <vscale x 1 x i32>,
308  <vscale x 1 x i32>,
309  <vscale x 1 x i1>,
310  iXLen);
311
312define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
313; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32:
314; CHECK:       # %bb.0: # %entry
315; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
316; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
317; CHECK-NEXT:    vmv1r.v v0, v10
318; CHECK-NEXT:    ret
319entry:
320  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32(
321    <vscale x 1 x i32> %0,
322    <vscale x 1 x i32> %1,
323    <vscale x 1 x i1> %2,
324    iXLen %3)
325
326  ret <vscale x 1 x i1> %a
327}
328
329declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32(
330  <vscale x 2 x i32>,
331  <vscale x 2 x i32>,
332  <vscale x 2 x i1>,
333  iXLen);
334
335define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
336; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32:
337; CHECK:       # %bb.0: # %entry
338; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
339; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
340; CHECK-NEXT:    vmv.v.v v0, v10
341; CHECK-NEXT:    ret
342entry:
343  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32(
344    <vscale x 2 x i32> %0,
345    <vscale x 2 x i32> %1,
346    <vscale x 2 x i1> %2,
347    iXLen %3)
348
349  ret <vscale x 2 x i1> %a
350}
351
352declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32(
353  <vscale x 4 x i32>,
354  <vscale x 4 x i32>,
355  <vscale x 4 x i1>,
356  iXLen);
357
358define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
359; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32:
360; CHECK:       # %bb.0: # %entry
361; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
362; CHECK-NEXT:    vmsbc.vvm v12, v8, v10, v0
363; CHECK-NEXT:    vmv1r.v v0, v12
364; CHECK-NEXT:    ret
365entry:
366  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32(
367    <vscale x 4 x i32> %0,
368    <vscale x 4 x i32> %1,
369    <vscale x 4 x i1> %2,
370    iXLen %3)
371
372  ret <vscale x 4 x i1> %a
373}
374
375declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32(
376  <vscale x 8 x i32>,
377  <vscale x 8 x i32>,
378  <vscale x 8 x i1>,
379  iXLen);
380
381define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
382; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32:
383; CHECK:       # %bb.0: # %entry
384; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
385; CHECK-NEXT:    vmsbc.vvm v16, v8, v12, v0
386; CHECK-NEXT:    vmv1r.v v0, v16
387; CHECK-NEXT:    ret
388entry:
389  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32(
390    <vscale x 8 x i32> %0,
391    <vscale x 8 x i32> %1,
392    <vscale x 8 x i1> %2,
393    iXLen %3)
394
395  ret <vscale x 8 x i1> %a
396}
397
398declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32(
399  <vscale x 16 x i32>,
400  <vscale x 16 x i32>,
401  <vscale x 16 x i1>,
402  iXLen);
403
404define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
405; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32:
406; CHECK:       # %bb.0: # %entry
407; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
408; CHECK-NEXT:    vmsbc.vvm v24, v8, v16, v0
409; CHECK-NEXT:    vmv1r.v v0, v24
410; CHECK-NEXT:    ret
411entry:
412  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32(
413    <vscale x 16 x i32> %0,
414    <vscale x 16 x i32> %1,
415    <vscale x 16 x i1> %2,
416    iXLen %3)
417
418  ret <vscale x 16 x i1> %a
419}
420
421declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64(
422  <vscale x 1 x i64>,
423  <vscale x 1 x i64>,
424  <vscale x 1 x i1>,
425  iXLen);
426
427define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
428; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64:
429; CHECK:       # %bb.0: # %entry
430; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
431; CHECK-NEXT:    vmsbc.vvm v10, v8, v9, v0
432; CHECK-NEXT:    vmv.v.v v0, v10
433; CHECK-NEXT:    ret
434entry:
435  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64(
436    <vscale x 1 x i64> %0,
437    <vscale x 1 x i64> %1,
438    <vscale x 1 x i1> %2,
439    iXLen %3)
440
441  ret <vscale x 1 x i1> %a
442}
443
444declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64(
445  <vscale x 2 x i64>,
446  <vscale x 2 x i64>,
447  <vscale x 2 x i1>,
448  iXLen);
449
450define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
451; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64:
452; CHECK:       # %bb.0: # %entry
453; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
454; CHECK-NEXT:    vmsbc.vvm v12, v8, v10, v0
455; CHECK-NEXT:    vmv1r.v v0, v12
456; CHECK-NEXT:    ret
457entry:
458  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64(
459    <vscale x 2 x i64> %0,
460    <vscale x 2 x i64> %1,
461    <vscale x 2 x i1> %2,
462    iXLen %3)
463
464  ret <vscale x 2 x i1> %a
465}
466
467declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64(
468  <vscale x 4 x i64>,
469  <vscale x 4 x i64>,
470  <vscale x 4 x i1>,
471  iXLen);
472
473define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
474; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64:
475; CHECK:       # %bb.0: # %entry
476; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
477; CHECK-NEXT:    vmsbc.vvm v16, v8, v12, v0
478; CHECK-NEXT:    vmv1r.v v0, v16
479; CHECK-NEXT:    ret
480entry:
481  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64(
482    <vscale x 4 x i64> %0,
483    <vscale x 4 x i64> %1,
484    <vscale x 4 x i1> %2,
485    iXLen %3)
486
487  ret <vscale x 4 x i1> %a
488}
489
490declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64(
491  <vscale x 8 x i64>,
492  <vscale x 8 x i64>,
493  <vscale x 8 x i1>,
494  iXLen);
495
496define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
497; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64:
498; CHECK:       # %bb.0: # %entry
499; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
500; CHECK-NEXT:    vmsbc.vvm v24, v8, v16, v0
501; CHECK-NEXT:    vmv1r.v v0, v24
502; CHECK-NEXT:    ret
503entry:
504  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64(
505    <vscale x 8 x i64> %0,
506    <vscale x 8 x i64> %1,
507    <vscale x 8 x i1> %2,
508    iXLen %3)
509
510  ret <vscale x 8 x i1> %a
511}
512
513declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8(
514  <vscale x 1 x i8>,
515  i8,
516  <vscale x 1 x i1>,
517  iXLen);
518
519define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
520; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8:
521; CHECK:       # %bb.0: # %entry
522; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
523; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
524; CHECK-NEXT:    vmv1r.v v0, v9
525; CHECK-NEXT:    ret
526entry:
527  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8(
528    <vscale x 1 x i8> %0,
529    i8 %1,
530    <vscale x 1 x i1> %2,
531    iXLen %3)
532
533  ret <vscale x 1 x i1> %a
534}
535
536declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8(
537  <vscale x 2 x i8>,
538  i8,
539  <vscale x 2 x i1>,
540  iXLen);
541
542define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
543; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8:
544; CHECK:       # %bb.0: # %entry
545; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
546; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
547; CHECK-NEXT:    vmv1r.v v0, v9
548; CHECK-NEXT:    ret
549entry:
550  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8(
551    <vscale x 2 x i8> %0,
552    i8 %1,
553    <vscale x 2 x i1> %2,
554    iXLen %3)
555
556  ret <vscale x 2 x i1> %a
557}
558
559declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8(
560  <vscale x 4 x i8>,
561  i8,
562  <vscale x 4 x i1>,
563  iXLen);
564
565define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
566; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8:
567; CHECK:       # %bb.0: # %entry
568; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
569; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
570; CHECK-NEXT:    vmv1r.v v0, v9
571; CHECK-NEXT:    ret
572entry:
573  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8(
574    <vscale x 4 x i8> %0,
575    i8 %1,
576    <vscale x 4 x i1> %2,
577    iXLen %3)
578
579  ret <vscale x 4 x i1> %a
580}
581
582declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8(
583  <vscale x 8 x i8>,
584  i8,
585  <vscale x 8 x i1>,
586  iXLen);
587
588define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
589; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8:
590; CHECK:       # %bb.0: # %entry
591; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
592; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
593; CHECK-NEXT:    vmv.v.v v0, v9
594; CHECK-NEXT:    ret
595entry:
596  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8(
597    <vscale x 8 x i8> %0,
598    i8 %1,
599    <vscale x 8 x i1> %2,
600    iXLen %3)
601
602  ret <vscale x 8 x i1> %a
603}
604
605declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8(
606  <vscale x 16 x i8>,
607  i8,
608  <vscale x 16 x i1>,
609  iXLen);
610
611define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
612; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8:
613; CHECK:       # %bb.0: # %entry
614; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
615; CHECK-NEXT:    vmsbc.vxm v10, v8, a0, v0
616; CHECK-NEXT:    vmv1r.v v0, v10
617; CHECK-NEXT:    ret
618entry:
619  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8(
620    <vscale x 16 x i8> %0,
621    i8 %1,
622    <vscale x 16 x i1> %2,
623    iXLen %3)
624
625  ret <vscale x 16 x i1> %a
626}
627
628declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8(
629  <vscale x 32 x i8>,
630  i8,
631  <vscale x 32 x i1>,
632  iXLen);
633
634define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
635; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8:
636; CHECK:       # %bb.0: # %entry
637; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
638; CHECK-NEXT:    vmsbc.vxm v12, v8, a0, v0
639; CHECK-NEXT:    vmv1r.v v0, v12
640; CHECK-NEXT:    ret
641entry:
642  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8(
643    <vscale x 32 x i8> %0,
644    i8 %1,
645    <vscale x 32 x i1> %2,
646    iXLen %3)
647
648  ret <vscale x 32 x i1> %a
649}
650
651declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8(
652  <vscale x 64 x i8>,
653  i8,
654  <vscale x 64 x i1>,
655  iXLen);
656
657define <vscale x 64 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
658; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8:
659; CHECK:       # %bb.0: # %entry
660; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
661; CHECK-NEXT:    vmsbc.vxm v16, v8, a0, v0
662; CHECK-NEXT:    vmv1r.v v0, v16
663; CHECK-NEXT:    ret
664entry:
665  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8(
666    <vscale x 64 x i8> %0,
667    i8 %1,
668    <vscale x 64 x i1> %2,
669    iXLen %3)
670
671  ret <vscale x 64 x i1> %a
672}
673
674declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16(
675  <vscale x 1 x i16>,
676  i16,
677  <vscale x 1 x i1>,
678  iXLen);
679
680define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
681; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16:
682; CHECK:       # %bb.0: # %entry
683; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
684; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
685; CHECK-NEXT:    vmv1r.v v0, v9
686; CHECK-NEXT:    ret
687entry:
688  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16(
689    <vscale x 1 x i16> %0,
690    i16 %1,
691    <vscale x 1 x i1> %2,
692    iXLen %3)
693
694  ret <vscale x 1 x i1> %a
695}
696
697declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16(
698  <vscale x 2 x i16>,
699  i16,
700  <vscale x 2 x i1>,
701  iXLen);
702
703define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
704; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16:
705; CHECK:       # %bb.0: # %entry
706; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
707; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
708; CHECK-NEXT:    vmv1r.v v0, v9
709; CHECK-NEXT:    ret
710entry:
711  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16(
712    <vscale x 2 x i16> %0,
713    i16 %1,
714    <vscale x 2 x i1> %2,
715    iXLen %3)
716
717  ret <vscale x 2 x i1> %a
718}
719
720declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16(
721  <vscale x 4 x i16>,
722  i16,
723  <vscale x 4 x i1>,
724  iXLen);
725
726define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
727; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16:
728; CHECK:       # %bb.0: # %entry
729; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
730; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
731; CHECK-NEXT:    vmv.v.v v0, v9
732; CHECK-NEXT:    ret
733entry:
734  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16(
735    <vscale x 4 x i16> %0,
736    i16 %1,
737    <vscale x 4 x i1> %2,
738    iXLen %3)
739
740  ret <vscale x 4 x i1> %a
741}
742
743declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16(
744  <vscale x 8 x i16>,
745  i16,
746  <vscale x 8 x i1>,
747  iXLen);
748
749define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
750; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16:
751; CHECK:       # %bb.0: # %entry
752; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
753; CHECK-NEXT:    vmsbc.vxm v10, v8, a0, v0
754; CHECK-NEXT:    vmv1r.v v0, v10
755; CHECK-NEXT:    ret
756entry:
757  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16(
758    <vscale x 8 x i16> %0,
759    i16 %1,
760    <vscale x 8 x i1> %2,
761    iXLen %3)
762
763  ret <vscale x 8 x i1> %a
764}
765
766declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16(
767  <vscale x 16 x i16>,
768  i16,
769  <vscale x 16 x i1>,
770  iXLen);
771
772define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
773; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16:
774; CHECK:       # %bb.0: # %entry
775; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
776; CHECK-NEXT:    vmsbc.vxm v12, v8, a0, v0
777; CHECK-NEXT:    vmv1r.v v0, v12
778; CHECK-NEXT:    ret
779entry:
780  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16(
781    <vscale x 16 x i16> %0,
782    i16 %1,
783    <vscale x 16 x i1> %2,
784    iXLen %3)
785
786  ret <vscale x 16 x i1> %a
787}
788
789declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16(
790  <vscale x 32 x i16>,
791  i16,
792  <vscale x 32 x i1>,
793  iXLen);
794
795define <vscale x 32 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
796; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16:
797; CHECK:       # %bb.0: # %entry
798; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
799; CHECK-NEXT:    vmsbc.vxm v16, v8, a0, v0
800; CHECK-NEXT:    vmv1r.v v0, v16
801; CHECK-NEXT:    ret
802entry:
803  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16(
804    <vscale x 32 x i16> %0,
805    i16 %1,
806    <vscale x 32 x i1> %2,
807    iXLen %3)
808
809  ret <vscale x 32 x i1> %a
810}
811
812declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32(
813  <vscale x 1 x i32>,
814  i32,
815  <vscale x 1 x i1>,
816  iXLen);
817
818define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
819; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32:
820; CHECK:       # %bb.0: # %entry
821; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
822; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
823; CHECK-NEXT:    vmv1r.v v0, v9
824; CHECK-NEXT:    ret
825entry:
826  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32(
827    <vscale x 1 x i32> %0,
828    i32 %1,
829    <vscale x 1 x i1> %2,
830    iXLen %3)
831
832  ret <vscale x 1 x i1> %a
833}
834
835declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32(
836  <vscale x 2 x i32>,
837  i32,
838  <vscale x 2 x i1>,
839  iXLen);
840
841define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
842; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32:
843; CHECK:       # %bb.0: # %entry
844; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
845; CHECK-NEXT:    vmsbc.vxm v9, v8, a0, v0
846; CHECK-NEXT:    vmv.v.v v0, v9
847; CHECK-NEXT:    ret
848entry:
849  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32(
850    <vscale x 2 x i32> %0,
851    i32 %1,
852    <vscale x 2 x i1> %2,
853    iXLen %3)
854
855  ret <vscale x 2 x i1> %a
856}
857
858declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32(
859  <vscale x 4 x i32>,
860  i32,
861  <vscale x 4 x i1>,
862  iXLen);
863
864define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
865; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32:
866; CHECK:       # %bb.0: # %entry
867; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
868; CHECK-NEXT:    vmsbc.vxm v10, v8, a0, v0
869; CHECK-NEXT:    vmv1r.v v0, v10
870; CHECK-NEXT:    ret
871entry:
872  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32(
873    <vscale x 4 x i32> %0,
874    i32 %1,
875    <vscale x 4 x i1> %2,
876    iXLen %3)
877
878  ret <vscale x 4 x i1> %a
879}
880
881declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32(
882  <vscale x 8 x i32>,
883  i32,
884  <vscale x 8 x i1>,
885  iXLen);
886
887define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
888; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32:
889; CHECK:       # %bb.0: # %entry
890; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
891; CHECK-NEXT:    vmsbc.vxm v12, v8, a0, v0
892; CHECK-NEXT:    vmv1r.v v0, v12
893; CHECK-NEXT:    ret
894entry:
895  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32(
896    <vscale x 8 x i32> %0,
897    i32 %1,
898    <vscale x 8 x i1> %2,
899    iXLen %3)
900
901  ret <vscale x 8 x i1> %a
902}
903
904declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32(
905  <vscale x 16 x i32>,
906  i32,
907  <vscale x 16 x i1>,
908  iXLen);
909
910define <vscale x 16 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
911; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32:
912; CHECK:       # %bb.0: # %entry
913; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
914; CHECK-NEXT:    vmsbc.vxm v16, v8, a0, v0
915; CHECK-NEXT:    vmv1r.v v0, v16
916; CHECK-NEXT:    ret
917entry:
918  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32(
919    <vscale x 16 x i32> %0,
920    i32 %1,
921    <vscale x 16 x i1> %2,
922    iXLen %3)
923
924  ret <vscale x 16 x i1> %a
925}
926
927declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64(
928  <vscale x 1 x i64>,
929  i64,
930  <vscale x 1 x i1>,
931  iXLen);
932
933define <vscale x 1 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
934; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64:
935; RV32:       # %bb.0: # %entry
936; RV32-NEXT:    addi sp, sp, -16
937; RV32-NEXT:    sw a0, 8(sp)
938; RV32-NEXT:    sw a1, 12(sp)
939; RV32-NEXT:    addi a0, sp, 8
940; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
941; RV32-NEXT:    vlse64.v v10, (a0), zero
942; RV32-NEXT:    vmsbc.vvm v9, v8, v10, v0
943; RV32-NEXT:    vmv.v.v v0, v9
944; RV32-NEXT:    addi sp, sp, 16
945; RV32-NEXT:    ret
946;
947; RV64-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64:
948; RV64:       # %bb.0: # %entry
949; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
950; RV64-NEXT:    vmsbc.vxm v9, v8, a0, v0
951; RV64-NEXT:    vmv.v.v v0, v9
952; RV64-NEXT:    ret
953entry:
954  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64(
955    <vscale x 1 x i64> %0,
956    i64 %1,
957    <vscale x 1 x i1> %2,
958    iXLen %3)
959
960  ret <vscale x 1 x i1> %a
961}
962
963declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64(
964  <vscale x 2 x i64>,
965  i64,
966  <vscale x 2 x i1>,
967  iXLen);
968
969define <vscale x 2 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
970; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64:
971; RV32:       # %bb.0: # %entry
972; RV32-NEXT:    addi sp, sp, -16
973; RV32-NEXT:    sw a0, 8(sp)
974; RV32-NEXT:    sw a1, 12(sp)
975; RV32-NEXT:    addi a0, sp, 8
976; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
977; RV32-NEXT:    vlse64.v v12, (a0), zero
978; RV32-NEXT:    vmsbc.vvm v10, v8, v12, v0
979; RV32-NEXT:    vmv1r.v v0, v10
980; RV32-NEXT:    addi sp, sp, 16
981; RV32-NEXT:    ret
982;
983; RV64-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64:
984; RV64:       # %bb.0: # %entry
985; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
986; RV64-NEXT:    vmsbc.vxm v10, v8, a0, v0
987; RV64-NEXT:    vmv1r.v v0, v10
988; RV64-NEXT:    ret
989entry:
990  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64(
991    <vscale x 2 x i64> %0,
992    i64 %1,
993    <vscale x 2 x i1> %2,
994    iXLen %3)
995
996  ret <vscale x 2 x i1> %a
997}
998
999declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64(
1000  <vscale x 4 x i64>,
1001  i64,
1002  <vscale x 4 x i1>,
1003  iXLen);
1004
1005define <vscale x 4 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1006; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64:
1007; RV32:       # %bb.0: # %entry
1008; RV32-NEXT:    addi sp, sp, -16
1009; RV32-NEXT:    sw a0, 8(sp)
1010; RV32-NEXT:    sw a1, 12(sp)
1011; RV32-NEXT:    addi a0, sp, 8
1012; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1013; RV32-NEXT:    vlse64.v v16, (a0), zero
1014; RV32-NEXT:    vmsbc.vvm v12, v8, v16, v0
1015; RV32-NEXT:    vmv1r.v v0, v12
1016; RV32-NEXT:    addi sp, sp, 16
1017; RV32-NEXT:    ret
1018;
1019; RV64-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64:
1020; RV64:       # %bb.0: # %entry
1021; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1022; RV64-NEXT:    vmsbc.vxm v12, v8, a0, v0
1023; RV64-NEXT:    vmv1r.v v0, v12
1024; RV64-NEXT:    ret
1025entry:
1026  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64(
1027    <vscale x 4 x i64> %0,
1028    i64 %1,
1029    <vscale x 4 x i1> %2,
1030    iXLen %3)
1031
1032  ret <vscale x 4 x i1> %a
1033}
1034
1035declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64(
1036  <vscale x 8 x i64>,
1037  i64,
1038  <vscale x 8 x i1>,
1039  iXLen);
1040
1041define <vscale x 8 x i1> @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1042; RV32-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64:
1043; RV32:       # %bb.0: # %entry
1044; RV32-NEXT:    addi sp, sp, -16
1045; RV32-NEXT:    sw a0, 8(sp)
1046; RV32-NEXT:    sw a1, 12(sp)
1047; RV32-NEXT:    addi a0, sp, 8
1048; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1049; RV32-NEXT:    vlse64.v v24, (a0), zero
1050; RV32-NEXT:    vmsbc.vvm v16, v8, v24, v0
1051; RV32-NEXT:    vmv1r.v v0, v16
1052; RV32-NEXT:    addi sp, sp, 16
1053; RV32-NEXT:    ret
1054;
1055; RV64-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64:
1056; RV64:       # %bb.0: # %entry
1057; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1058; RV64-NEXT:    vmsbc.vxm v16, v8, a0, v0
1059; RV64-NEXT:    vmv1r.v v0, v16
1060; RV64-NEXT:    ret
1061entry:
1062  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64(
1063    <vscale x 8 x i64> %0,
1064    i64 %1,
1065    <vscale x 8 x i1> %2,
1066    iXLen %3)
1067
1068  ret <vscale x 8 x i1> %a
1069}
1070