xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll (revision d8d131dfa99762ccdd2116661980b7d0493cd7b5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
4
5define <vscale x 1 x i1> @vmand_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
6; CHECK-LABEL: vmand_vv_nxv1i1:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
9; CHECK-NEXT:    vmand.mm v0, v0, v8
10; CHECK-NEXT:    ret
11  %vc = and <vscale x 1 x i1> %va, %vb
12  ret <vscale x 1 x i1> %vc
13}
14
15define <vscale x 2 x i1> @vmand_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
16; CHECK-LABEL: vmand_vv_nxv2i1:
17; CHECK:       # %bb.0:
18; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
19; CHECK-NEXT:    vmand.mm v0, v0, v8
20; CHECK-NEXT:    ret
21  %vc = and <vscale x 2 x i1> %va, %vb
22  ret <vscale x 2 x i1> %vc
23}
24
25define <vscale x 4 x i1> @vmand_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
26; CHECK-LABEL: vmand_vv_nxv4i1:
27; CHECK:       # %bb.0:
28; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
29; CHECK-NEXT:    vmand.mm v0, v0, v8
30; CHECK-NEXT:    ret
31  %vc = and <vscale x 4 x i1> %va, %vb
32  ret <vscale x 4 x i1> %vc
33}
34
35define <vscale x 8 x i1> @vmand_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
36; CHECK-LABEL: vmand_vv_nxv8i1:
37; CHECK:       # %bb.0:
38; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
39; CHECK-NEXT:    vmand.mm v0, v0, v8
40; CHECK-NEXT:    ret
41  %vc = and <vscale x 8 x i1> %va, %vb
42  ret <vscale x 8 x i1> %vc
43}
44
45define <vscale x 16 x i1> @vmand_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
46; CHECK-LABEL: vmand_vv_nxv16i1:
47; CHECK:       # %bb.0:
48; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
49; CHECK-NEXT:    vmand.mm v0, v0, v8
50; CHECK-NEXT:    ret
51  %vc = and <vscale x 16 x i1> %va, %vb
52  ret <vscale x 16 x i1> %vc
53}
54
55define <vscale x 1 x i1> @vmor_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
56; CHECK-LABEL: vmor_vv_nxv1i1:
57; CHECK:       # %bb.0:
58; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
59; CHECK-NEXT:    vmor.mm v0, v0, v8
60; CHECK-NEXT:    ret
61  %vc = or <vscale x 1 x i1> %va, %vb
62  ret <vscale x 1 x i1> %vc
63}
64
65define <vscale x 2 x i1> @vmor_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
66; CHECK-LABEL: vmor_vv_nxv2i1:
67; CHECK:       # %bb.0:
68; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
69; CHECK-NEXT:    vmor.mm v0, v0, v8
70; CHECK-NEXT:    ret
71  %vc = or <vscale x 2 x i1> %va, %vb
72  ret <vscale x 2 x i1> %vc
73}
74
75define <vscale x 4 x i1> @vmor_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
76; CHECK-LABEL: vmor_vv_nxv4i1:
77; CHECK:       # %bb.0:
78; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
79; CHECK-NEXT:    vmor.mm v0, v0, v8
80; CHECK-NEXT:    ret
81  %vc = or <vscale x 4 x i1> %va, %vb
82  ret <vscale x 4 x i1> %vc
83}
84
85define <vscale x 8 x i1> @vmor_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
86; CHECK-LABEL: vmor_vv_nxv8i1:
87; CHECK:       # %bb.0:
88; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
89; CHECK-NEXT:    vmor.mm v0, v0, v8
90; CHECK-NEXT:    ret
91  %vc = or <vscale x 8 x i1> %va, %vb
92  ret <vscale x 8 x i1> %vc
93}
94
95define <vscale x 16 x i1> @vmor_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
96; CHECK-LABEL: vmor_vv_nxv16i1:
97; CHECK:       # %bb.0:
98; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
99; CHECK-NEXT:    vmor.mm v0, v0, v8
100; CHECK-NEXT:    ret
101  %vc = or <vscale x 16 x i1> %va, %vb
102  ret <vscale x 16 x i1> %vc
103}
104
105define <vscale x 1 x i1> @vmxor_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
106; CHECK-LABEL: vmxor_vv_nxv1i1:
107; CHECK:       # %bb.0:
108; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
109; CHECK-NEXT:    vmxor.mm v0, v0, v8
110; CHECK-NEXT:    ret
111  %vc = xor <vscale x 1 x i1> %va, %vb
112  ret <vscale x 1 x i1> %vc
113}
114
115define <vscale x 2 x i1> @vmxor_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
116; CHECK-LABEL: vmxor_vv_nxv2i1:
117; CHECK:       # %bb.0:
118; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
119; CHECK-NEXT:    vmxor.mm v0, v0, v8
120; CHECK-NEXT:    ret
121  %vc = xor <vscale x 2 x i1> %va, %vb
122  ret <vscale x 2 x i1> %vc
123}
124
125define <vscale x 4 x i1> @vmxor_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
126; CHECK-LABEL: vmxor_vv_nxv4i1:
127; CHECK:       # %bb.0:
128; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
129; CHECK-NEXT:    vmxor.mm v0, v0, v8
130; CHECK-NEXT:    ret
131  %vc = xor <vscale x 4 x i1> %va, %vb
132  ret <vscale x 4 x i1> %vc
133}
134
135define <vscale x 8 x i1> @vmxor_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
136; CHECK-LABEL: vmxor_vv_nxv8i1:
137; CHECK:       # %bb.0:
138; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
139; CHECK-NEXT:    vmxor.mm v0, v0, v8
140; CHECK-NEXT:    ret
141  %vc = xor <vscale x 8 x i1> %va, %vb
142  ret <vscale x 8 x i1> %vc
143}
144
145define <vscale x 16 x i1> @vmxor_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
146; CHECK-LABEL: vmxor_vv_nxv16i1:
147; CHECK:       # %bb.0:
148; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
149; CHECK-NEXT:    vmxor.mm v0, v0, v8
150; CHECK-NEXT:    ret
151  %vc = xor <vscale x 16 x i1> %va, %vb
152  ret <vscale x 16 x i1> %vc
153}
154
155define <vscale x 1 x i1> @vmnand_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
156; CHECK-LABEL: vmnand_vv_nxv1i1:
157; CHECK:       # %bb.0:
158; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
159; CHECK-NEXT:    vmnand.mm v0, v0, v8
160; CHECK-NEXT:    ret
161  %vc = and <vscale x 1 x i1> %va, %vb
162  %not = xor <vscale x 1 x i1> %vc, splat (i1 1)
163  ret <vscale x 1 x i1> %not
164}
165
166define <vscale x 2 x i1> @vmnand_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
167; CHECK-LABEL: vmnand_vv_nxv2i1:
168; CHECK:       # %bb.0:
169; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
170; CHECK-NEXT:    vmnand.mm v0, v0, v8
171; CHECK-NEXT:    ret
172  %vc = and <vscale x 2 x i1> %va, %vb
173  %not = xor <vscale x 2 x i1> %vc, splat (i1 1)
174  ret <vscale x 2 x i1> %not
175}
176
177define <vscale x 4 x i1> @vmnand_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
178; CHECK-LABEL: vmnand_vv_nxv4i1:
179; CHECK:       # %bb.0:
180; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
181; CHECK-NEXT:    vmnand.mm v0, v0, v8
182; CHECK-NEXT:    ret
183  %vc = and <vscale x 4 x i1> %va, %vb
184  %not = xor <vscale x 4 x i1> %vc, splat (i1 1)
185  ret <vscale x 4 x i1> %not
186}
187
188define <vscale x 8 x i1> @vmnand_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
189; CHECK-LABEL: vmnand_vv_nxv8i1:
190; CHECK:       # %bb.0:
191; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
192; CHECK-NEXT:    vmnand.mm v0, v0, v8
193; CHECK-NEXT:    ret
194  %vc = and <vscale x 8 x i1> %va, %vb
195  %not = xor <vscale x 8 x i1> %vc, splat (i1 1)
196  ret <vscale x 8 x i1> %not
197}
198
199define <vscale x 16 x i1> @vmnand_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
200; CHECK-LABEL: vmnand_vv_nxv16i1:
201; CHECK:       # %bb.0:
202; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
203; CHECK-NEXT:    vmnand.mm v0, v0, v8
204; CHECK-NEXT:    ret
205  %vc = and <vscale x 16 x i1> %va, %vb
206  %not = xor <vscale x 16 x i1> %vc, splat (i1 1)
207  ret <vscale x 16 x i1> %not
208}
209
210define <vscale x 1 x i1> @vmnor_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
211; CHECK-LABEL: vmnor_vv_nxv1i1:
212; CHECK:       # %bb.0:
213; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
214; CHECK-NEXT:    vmnor.mm v0, v0, v8
215; CHECK-NEXT:    ret
216  %vc = or <vscale x 1 x i1> %va, %vb
217  %not = xor <vscale x 1 x i1> %vc, splat (i1 1)
218  ret <vscale x 1 x i1> %not
219}
220
221define <vscale x 2 x i1> @vmnor_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
222; CHECK-LABEL: vmnor_vv_nxv2i1:
223; CHECK:       # %bb.0:
224; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
225; CHECK-NEXT:    vmnor.mm v0, v0, v8
226; CHECK-NEXT:    ret
227  %vc = or <vscale x 2 x i1> %va, %vb
228  %not = xor <vscale x 2 x i1> %vc, splat (i1 1)
229  ret <vscale x 2 x i1> %not
230}
231
232define <vscale x 4 x i1> @vmnor_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
233; CHECK-LABEL: vmnor_vv_nxv4i1:
234; CHECK:       # %bb.0:
235; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
236; CHECK-NEXT:    vmnor.mm v0, v0, v8
237; CHECK-NEXT:    ret
238  %vc = or <vscale x 4 x i1> %va, %vb
239  %not = xor <vscale x 4 x i1> %vc, splat (i1 1)
240  ret <vscale x 4 x i1> %not
241}
242
243define <vscale x 8 x i1> @vmnor_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
244; CHECK-LABEL: vmnor_vv_nxv8i1:
245; CHECK:       # %bb.0:
246; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
247; CHECK-NEXT:    vmnor.mm v0, v0, v8
248; CHECK-NEXT:    ret
249  %vc = or <vscale x 8 x i1> %va, %vb
250  %not = xor <vscale x 8 x i1> %vc, splat (i1 1)
251  ret <vscale x 8 x i1> %not
252}
253
254define <vscale x 16 x i1> @vmnor_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
255; CHECK-LABEL: vmnor_vv_nxv16i1:
256; CHECK:       # %bb.0:
257; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
258; CHECK-NEXT:    vmnor.mm v0, v0, v8
259; CHECK-NEXT:    ret
260  %vc = or <vscale x 16 x i1> %va, %vb
261  %not = xor <vscale x 16 x i1> %vc, splat (i1 1)
262  ret <vscale x 16 x i1> %not
263}
264
265define <vscale x 1 x i1> @vmxnor_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
266; CHECK-LABEL: vmxnor_vv_nxv1i1:
267; CHECK:       # %bb.0:
268; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
269; CHECK-NEXT:    vmxnor.mm v0, v0, v8
270; CHECK-NEXT:    ret
271  %vc = xor <vscale x 1 x i1> %va, %vb
272  %not = xor <vscale x 1 x i1> %vc, splat (i1 1)
273  ret <vscale x 1 x i1> %not
274}
275
276define <vscale x 2 x i1> @vmxnor_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
277; CHECK-LABEL: vmxnor_vv_nxv2i1:
278; CHECK:       # %bb.0:
279; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
280; CHECK-NEXT:    vmxnor.mm v0, v0, v8
281; CHECK-NEXT:    ret
282  %vc = xor <vscale x 2 x i1> %va, %vb
283  %not = xor <vscale x 2 x i1> %vc, splat (i1 1)
284  ret <vscale x 2 x i1> %not
285}
286
287define <vscale x 4 x i1> @vmxnor_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
288; CHECK-LABEL: vmxnor_vv_nxv4i1:
289; CHECK:       # %bb.0:
290; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
291; CHECK-NEXT:    vmxnor.mm v0, v0, v8
292; CHECK-NEXT:    ret
293  %vc = xor <vscale x 4 x i1> %va, %vb
294  %not = xor <vscale x 4 x i1> %vc, splat (i1 1)
295  ret <vscale x 4 x i1> %not
296}
297
298define <vscale x 8 x i1> @vmxnor_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
299; CHECK-LABEL: vmxnor_vv_nxv8i1:
300; CHECK:       # %bb.0:
301; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
302; CHECK-NEXT:    vmxnor.mm v0, v0, v8
303; CHECK-NEXT:    ret
304  %vc = xor <vscale x 8 x i1> %va, %vb
305  %not = xor <vscale x 8 x i1> %vc, splat (i1 1)
306  ret <vscale x 8 x i1> %not
307}
308
309define <vscale x 16 x i1> @vmxnor_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
310; CHECK-LABEL: vmxnor_vv_nxv16i1:
311; CHECK:       # %bb.0:
312; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
313; CHECK-NEXT:    vmxnor.mm v0, v0, v8
314; CHECK-NEXT:    ret
315  %vc = xor <vscale x 16 x i1> %va, %vb
316  %not = xor <vscale x 16 x i1> %vc, splat (i1 1)
317  ret <vscale x 16 x i1> %not
318}
319
320define <vscale x 1 x i1> @vmandn_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
321; CHECK-LABEL: vmandn_vv_nxv1i1:
322; CHECK:       # %bb.0:
323; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
324; CHECK-NEXT:    vmandn.mm v0, v0, v8
325; CHECK-NEXT:    ret
326  %not = xor <vscale x 1 x i1> %vb, splat (i1 1)
327  %vc = and <vscale x 1 x i1> %va, %not
328  ret <vscale x 1 x i1> %vc
329}
330
331define <vscale x 2 x i1> @vmandn_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
332; CHECK-LABEL: vmandn_vv_nxv2i1:
333; CHECK:       # %bb.0:
334; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
335; CHECK-NEXT:    vmandn.mm v0, v0, v8
336; CHECK-NEXT:    ret
337  %not = xor <vscale x 2 x i1> %vb, splat (i1 1)
338  %vc = and <vscale x 2 x i1> %va, %not
339  ret <vscale x 2 x i1> %vc
340}
341
342define <vscale x 4 x i1> @vmandn_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
343; CHECK-LABEL: vmandn_vv_nxv4i1:
344; CHECK:       # %bb.0:
345; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
346; CHECK-NEXT:    vmandn.mm v0, v0, v8
347; CHECK-NEXT:    ret
348  %not = xor <vscale x 4 x i1> %vb, splat (i1 1)
349  %vc = and <vscale x 4 x i1> %va, %not
350  ret <vscale x 4 x i1> %vc
351}
352
353define <vscale x 8 x i1> @vmandn_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
354; CHECK-LABEL: vmandn_vv_nxv8i1:
355; CHECK:       # %bb.0:
356; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
357; CHECK-NEXT:    vmandn.mm v0, v0, v8
358; CHECK-NEXT:    ret
359  %not = xor <vscale x 8 x i1> %vb, splat (i1 1)
360  %vc = and <vscale x 8 x i1> %va, %not
361  ret <vscale x 8 x i1> %vc
362}
363
364define <vscale x 16 x i1> @vmandn_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
365; CHECK-LABEL: vmandn_vv_nxv16i1:
366; CHECK:       # %bb.0:
367; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
368; CHECK-NEXT:    vmandn.mm v0, v0, v8
369; CHECK-NEXT:    ret
370  %not = xor <vscale x 16 x i1> %vb, splat (i1 1)
371  %vc = and <vscale x 16 x i1> %va, %not
372  ret <vscale x 16 x i1> %vc
373}
374
375define <vscale x 1 x i1> @vmorn_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
376; CHECK-LABEL: vmorn_vv_nxv1i1:
377; CHECK:       # %bb.0:
378; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
379; CHECK-NEXT:    vmorn.mm v0, v0, v8
380; CHECK-NEXT:    ret
381  %not = xor <vscale x 1 x i1> %vb, splat (i1 1)
382  %vc = or <vscale x 1 x i1> %va, %not
383  ret <vscale x 1 x i1> %vc
384}
385
386define <vscale x 2 x i1> @vmorn_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
387; CHECK-LABEL: vmorn_vv_nxv2i1:
388; CHECK:       # %bb.0:
389; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
390; CHECK-NEXT:    vmorn.mm v0, v0, v8
391; CHECK-NEXT:    ret
392  %not = xor <vscale x 2 x i1> %vb, splat (i1 1)
393  %vc = or <vscale x 2 x i1> %va, %not
394  ret <vscale x 2 x i1> %vc
395}
396
397define <vscale x 4 x i1> @vmorn_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
398; CHECK-LABEL: vmorn_vv_nxv4i1:
399; CHECK:       # %bb.0:
400; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
401; CHECK-NEXT:    vmorn.mm v0, v0, v8
402; CHECK-NEXT:    ret
403  %not = xor <vscale x 4 x i1> %vb, splat (i1 1)
404  %vc = or <vscale x 4 x i1> %va, %not
405  ret <vscale x 4 x i1> %vc
406}
407
408define <vscale x 8 x i1> @vmorn_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
409; CHECK-LABEL: vmorn_vv_nxv8i1:
410; CHECK:       # %bb.0:
411; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
412; CHECK-NEXT:    vmorn.mm v0, v0, v8
413; CHECK-NEXT:    ret
414  %not = xor <vscale x 8 x i1> %vb, splat (i1 1)
415  %vc = or <vscale x 8 x i1> %va, %not
416  ret <vscale x 8 x i1> %vc
417}
418
419define <vscale x 16 x i1> @vmorn_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
420; CHECK-LABEL: vmorn_vv_nxv16i1:
421; CHECK:       # %bb.0:
422; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
423; CHECK-NEXT:    vmorn.mm v0, v0, v8
424; CHECK-NEXT:    ret
425  %not = xor <vscale x 16 x i1> %vb, splat (i1 1)
426  %vc = or <vscale x 16 x i1> %va, %not
427  ret <vscale x 16 x i1> %vc
428}
429