xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vmsif.ll (revision b6c0f1bfa79a3a32d841ac5ab1f94c3aee3b5d90)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s
6
7declare <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(
8  <vscale x 1 x i1>,
9  iXLen);
10
11define <vscale x 1 x i1> @intrinsic_vmsif_m_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
12; CHECK-LABEL: intrinsic_vmsif_m_nxv1i1:
13; CHECK:       # %bb.0: # %entry
14; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
15; CHECK-NEXT:    vmsif.m v8, v0
16; CHECK-NEXT:    vmv1r.v v0, v8
17; CHECK-NEXT:    ret
18entry:
19  %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(
20    <vscale x 1 x i1> %0,
21    iXLen %1)
22  ret <vscale x 1 x i1> %a
23}
24
25declare <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1(
26  <vscale x 1 x i1>,
27  <vscale x 1 x i1>,
28  <vscale x 1 x i1>,
29  iXLen);
30
31define <vscale x 1 x i1> @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
32; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1:
33; CHECK:       # %bb.0: # %entry
34; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
35; CHECK-NEXT:    vmv1r.v v10, v0
36; CHECK-NEXT:    vmv1r.v v0, v9
37; CHECK-NEXT:    vmsif.m v10, v8, v0.t
38; CHECK-NEXT:    vmv1r.v v0, v10
39; CHECK-NEXT:    ret
40entry:
41  %a = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1(
42    <vscale x 1 x i1> %0,
43    <vscale x 1 x i1> %1,
44    <vscale x 1 x i1> %2,
45    iXLen %3)
46  ret <vscale x 1 x i1> %a
47}
48
49declare <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1(
50  <vscale x 2 x i1>,
51  iXLen);
52
53define <vscale x 2 x i1> @intrinsic_vmsif_m_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
54; CHECK-LABEL: intrinsic_vmsif_m_nxv2i1:
55; CHECK:       # %bb.0: # %entry
56; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
57; CHECK-NEXT:    vmsif.m v8, v0
58; CHECK-NEXT:    vmv1r.v v0, v8
59; CHECK-NEXT:    ret
60entry:
61  %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1(
62    <vscale x 2 x i1> %0,
63    iXLen %1)
64  ret <vscale x 2 x i1> %a
65}
66
67declare <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1(
68  <vscale x 2 x i1>,
69  <vscale x 2 x i1>,
70  <vscale x 2 x i1>,
71  iXLen);
72
73define <vscale x 2 x i1> @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
74; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1:
75; CHECK:       # %bb.0: # %entry
76; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
77; CHECK-NEXT:    vmv1r.v v10, v0
78; CHECK-NEXT:    vmv1r.v v0, v9
79; CHECK-NEXT:    vmsif.m v10, v8, v0.t
80; CHECK-NEXT:    vmv1r.v v0, v10
81; CHECK-NEXT:    ret
82entry:
83  %a = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1(
84    <vscale x 2 x i1> %0,
85    <vscale x 2 x i1> %1,
86    <vscale x 2 x i1> %2,
87    iXLen %3)
88  ret <vscale x 2 x i1> %a
89}
90
91declare <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1(
92  <vscale x 4 x i1>,
93  iXLen);
94
95define <vscale x 4 x i1> @intrinsic_vmsif_m_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
96; CHECK-LABEL: intrinsic_vmsif_m_nxv4i1:
97; CHECK:       # %bb.0: # %entry
98; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
99; CHECK-NEXT:    vmsif.m v8, v0
100; CHECK-NEXT:    vmv1r.v v0, v8
101; CHECK-NEXT:    ret
102entry:
103  %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1(
104    <vscale x 4 x i1> %0,
105    iXLen %1)
106  ret <vscale x 4 x i1> %a
107}
108
109declare <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1(
110  <vscale x 4 x i1>,
111  <vscale x 4 x i1>,
112  <vscale x 4 x i1>,
113  iXLen);
114
115define <vscale x 4 x i1> @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
116; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1:
117; CHECK:       # %bb.0: # %entry
118; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
119; CHECK-NEXT:    vmv1r.v v10, v0
120; CHECK-NEXT:    vmv1r.v v0, v9
121; CHECK-NEXT:    vmsif.m v10, v8, v0.t
122; CHECK-NEXT:    vmv1r.v v0, v10
123; CHECK-NEXT:    ret
124entry:
125  %a = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1(
126    <vscale x 4 x i1> %0,
127    <vscale x 4 x i1> %1,
128    <vscale x 4 x i1> %2,
129    iXLen %3)
130  ret <vscale x 4 x i1> %a
131}
132
133declare <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1(
134  <vscale x 8 x i1>,
135  iXLen);
136
137define <vscale x 8 x i1> @intrinsic_vmsif_m_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
138; CHECK-LABEL: intrinsic_vmsif_m_nxv8i1:
139; CHECK:       # %bb.0: # %entry
140; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
141; CHECK-NEXT:    vmsif.m v8, v0
142; CHECK-NEXT:    vmv.v.v v0, v8
143; CHECK-NEXT:    ret
144entry:
145  %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1(
146    <vscale x 8 x i1> %0,
147    iXLen %1)
148  ret <vscale x 8 x i1> %a
149}
150
151declare <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1(
152  <vscale x 8 x i1>,
153  <vscale x 8 x i1>,
154  <vscale x 8 x i1>,
155  iXLen);
156
157define <vscale x 8 x i1> @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
158; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1:
159; CHECK:       # %bb.0: # %entry
160; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
161; CHECK-NEXT:    vmv1r.v v10, v0
162; CHECK-NEXT:    vmv1r.v v0, v9
163; CHECK-NEXT:    vmsif.m v10, v8, v0.t
164; CHECK-NEXT:    vmv.v.v v0, v10
165; CHECK-NEXT:    ret
166entry:
167  %a = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1(
168    <vscale x 8 x i1> %0,
169    <vscale x 8 x i1> %1,
170    <vscale x 8 x i1> %2,
171    iXLen %3)
172  ret <vscale x 8 x i1> %a
173}
174
175declare <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1(
176  <vscale x 16 x i1>,
177  iXLen);
178
179define <vscale x 16 x i1> @intrinsic_vmsif_m_nxv16i1(<vscale x 16 x i1> %0, iXLen %1) nounwind {
180; CHECK-LABEL: intrinsic_vmsif_m_nxv16i1:
181; CHECK:       # %bb.0: # %entry
182; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
183; CHECK-NEXT:    vmsif.m v8, v0
184; CHECK-NEXT:    vmv1r.v v0, v8
185; CHECK-NEXT:    ret
186entry:
187  %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1(
188    <vscale x 16 x i1> %0,
189    iXLen %1)
190  ret <vscale x 16 x i1> %a
191}
192
193declare <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1(
194  <vscale x 16 x i1>,
195  <vscale x 16 x i1>,
196  <vscale x 16 x i1>,
197  iXLen);
198
199define <vscale x 16 x i1> @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
200; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1:
201; CHECK:       # %bb.0: # %entry
202; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
203; CHECK-NEXT:    vmv1r.v v10, v0
204; CHECK-NEXT:    vmv1r.v v0, v9
205; CHECK-NEXT:    vmsif.m v10, v8, v0.t
206; CHECK-NEXT:    vmv1r.v v0, v10
207; CHECK-NEXT:    ret
208entry:
209  %a = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1(
210    <vscale x 16 x i1> %0,
211    <vscale x 16 x i1> %1,
212    <vscale x 16 x i1> %2,
213    iXLen %3)
214  ret <vscale x 16 x i1> %a
215}
216
217declare <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1(
218  <vscale x 32 x i1>,
219  iXLen);
220
221define <vscale x 32 x i1> @intrinsic_vmsif_m_nxv32i1(<vscale x 32 x i1> %0, iXLen %1) nounwind {
222; CHECK-LABEL: intrinsic_vmsif_m_nxv32i1:
223; CHECK:       # %bb.0: # %entry
224; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
225; CHECK-NEXT:    vmsif.m v8, v0
226; CHECK-NEXT:    vmv1r.v v0, v8
227; CHECK-NEXT:    ret
228entry:
229  %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1(
230    <vscale x 32 x i1> %0,
231    iXLen %1)
232  ret <vscale x 32 x i1> %a
233}
234
235declare <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1(
236  <vscale x 32 x i1>,
237  <vscale x 32 x i1>,
238  <vscale x 32 x i1>,
239  iXLen);
240
241define <vscale x 32 x i1> @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
242; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1:
243; CHECK:       # %bb.0: # %entry
244; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
245; CHECK-NEXT:    vmv1r.v v10, v0
246; CHECK-NEXT:    vmv1r.v v0, v9
247; CHECK-NEXT:    vmsif.m v10, v8, v0.t
248; CHECK-NEXT:    vmv1r.v v0, v10
249; CHECK-NEXT:    ret
250entry:
251  %a = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1(
252    <vscale x 32 x i1> %0,
253    <vscale x 32 x i1> %1,
254    <vscale x 32 x i1> %2,
255    iXLen %3)
256  ret <vscale x 32 x i1> %a
257}
258
259declare <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1(
260  <vscale x 64 x i1>,
261  iXLen);
262
263define <vscale x 64 x i1> @intrinsic_vmsif_m_nxv64i1(<vscale x 64 x i1> %0, iXLen %1) nounwind {
264; CHECK-LABEL: intrinsic_vmsif_m_nxv64i1:
265; CHECK:       # %bb.0: # %entry
266; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
267; CHECK-NEXT:    vmsif.m v8, v0
268; CHECK-NEXT:    vmv1r.v v0, v8
269; CHECK-NEXT:    ret
270entry:
271  %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1(
272    <vscale x 64 x i1> %0,
273    iXLen %1)
274  ret <vscale x 64 x i1> %a
275}
276
277declare <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1(
278  <vscale x 64 x i1>,
279  <vscale x 64 x i1>,
280  <vscale x 64 x i1>,
281  iXLen);
282
283define <vscale x 64 x i1> @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
284; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1:
285; CHECK:       # %bb.0: # %entry
286; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
287; CHECK-NEXT:    vmv1r.v v10, v0
288; CHECK-NEXT:    vmv1r.v v0, v9
289; CHECK-NEXT:    vmsif.m v10, v8, v0.t
290; CHECK-NEXT:    vmv1r.v v0, v10
291; CHECK-NEXT:    ret
292entry:
293  %a = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1(
294    <vscale x 64 x i1> %0,
295    <vscale x 64 x i1> %1,
296    <vscale x 64 x i1> %2,
297    iXLen %3)
298  ret <vscale x 64 x i1> %a
299}
300