xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll (revision ebb27ccb08e0579825a53b218ff5b2ddc492626a)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
4; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB32
5; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB64
6
7declare <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
8declare <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
9
10define <vscale x 1 x i8> @vandn_vv_vp_nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
11; CHECK-LABEL: vandn_vv_vp_nxv1i8:
12; CHECK:       # %bb.0:
13; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
14; CHECK-NEXT:    vnot.v v8, v8, v0.t
15; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
16; CHECK-NEXT:    ret
17;
18; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv1i8:
19; CHECK-ZVKB:       # %bb.0:
20; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
21; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
22; CHECK-ZVKB-NEXT:    ret
23  %not.a = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> %mask, i32 %evl)
24  %x = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %not.a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, i32 %evl)
25  ret <vscale x 1 x i8> %x
26}
27
28define <vscale x 1 x i8> @vandn_vv_vp_swapped_nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
29; CHECK-LABEL: vandn_vv_vp_swapped_nxv1i8:
30; CHECK:       # %bb.0:
31; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
32; CHECK-NEXT:    vnot.v v8, v8, v0.t
33; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
34; CHECK-NEXT:    ret
35;
36; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv1i8:
37; CHECK-ZVKB:       # %bb.0:
38; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
39; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
40; CHECK-ZVKB-NEXT:    ret
41  %not.a = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> %mask, i32 %evl)
42  %x = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %b, <vscale x 1 x i8> %not.a, <vscale x 1 x i1> %mask, i32 %evl)
43  ret <vscale x 1 x i8> %x
44}
45
46define <vscale x 1 x i8> @vandn_vx_vp_nxv1i8(i8 %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
47; CHECK-LABEL: vandn_vx_vp_nxv1i8:
48; CHECK:       # %bb.0:
49; CHECK-NEXT:    not a0, a0
50; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
51; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
52; CHECK-NEXT:    ret
53;
54; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv1i8:
55; CHECK-ZVKB:       # %bb.0:
56; CHECK-ZVKB-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
57; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0, v0.t
58; CHECK-ZVKB-NEXT:    ret
59  %not.a = xor i8 %a, -1
60  %head.not.a = insertelement <vscale x 1 x i8> poison, i8 %not.a, i32 0
61  %splat.not.a = shufflevector <vscale x 1 x i8> %head.not.a, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
62  %x = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %b, <vscale x 1 x i8> %splat.not.a, <vscale x 1 x i1> %mask, i32 %evl)
63  ret <vscale x 1 x i8> %x
64}
65
66declare <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
67declare <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
68
69define <vscale x 2 x i8> @vandn_vv_vp_nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
70; CHECK-LABEL: vandn_vv_vp_nxv2i8:
71; CHECK:       # %bb.0:
72; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
73; CHECK-NEXT:    vnot.v v8, v8, v0.t
74; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
75; CHECK-NEXT:    ret
76;
77; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv2i8:
78; CHECK-ZVKB:       # %bb.0:
79; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
80; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
81; CHECK-ZVKB-NEXT:    ret
82  %not.a = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> %mask, i32 %evl)
83  %x = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %not.a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, i32 %evl)
84  ret <vscale x 2 x i8> %x
85}
86
87define <vscale x 2 x i8> @vandn_vv_vp_swapped_nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
88; CHECK-LABEL: vandn_vv_vp_swapped_nxv2i8:
89; CHECK:       # %bb.0:
90; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
91; CHECK-NEXT:    vnot.v v8, v8, v0.t
92; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
93; CHECK-NEXT:    ret
94;
95; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv2i8:
96; CHECK-ZVKB:       # %bb.0:
97; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
98; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
99; CHECK-ZVKB-NEXT:    ret
100  %not.a = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> %mask, i32 %evl)
101  %x = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %b, <vscale x 2 x i8> %not.a, <vscale x 2 x i1> %mask, i32 %evl)
102  ret <vscale x 2 x i8> %x
103}
104
105define <vscale x 2 x i8> @vandn_vx_vp_nxv2i8(i8 %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
106; CHECK-LABEL: vandn_vx_vp_nxv2i8:
107; CHECK:       # %bb.0:
108; CHECK-NEXT:    not a0, a0
109; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
110; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
111; CHECK-NEXT:    ret
112;
113; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv2i8:
114; CHECK-ZVKB:       # %bb.0:
115; CHECK-ZVKB-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
116; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0, v0.t
117; CHECK-ZVKB-NEXT:    ret
118  %not.a = xor i8 %a, -1
119  %head.not.a = insertelement <vscale x 2 x i8> poison, i8 %not.a, i32 0
120  %splat.not.a = shufflevector <vscale x 2 x i8> %head.not.a, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
121  %x = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %b, <vscale x 2 x i8> %splat.not.a, <vscale x 2 x i1> %mask, i32 %evl)
122  ret <vscale x 2 x i8> %x
123}
124
125declare <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
126declare <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
127
128define <vscale x 4 x i8> @vandn_vv_vp_nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
129; CHECK-LABEL: vandn_vv_vp_nxv4i8:
130; CHECK:       # %bb.0:
131; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
132; CHECK-NEXT:    vnot.v v8, v8, v0.t
133; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
134; CHECK-NEXT:    ret
135;
136; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv4i8:
137; CHECK-ZVKB:       # %bb.0:
138; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
139; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
140; CHECK-ZVKB-NEXT:    ret
141  %not.a = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> %mask, i32 %evl)
142  %x = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %not.a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, i32 %evl)
143  ret <vscale x 4 x i8> %x
144}
145
146define <vscale x 4 x i8> @vandn_vv_vp_swapped_nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
147; CHECK-LABEL: vandn_vv_vp_swapped_nxv4i8:
148; CHECK:       # %bb.0:
149; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
150; CHECK-NEXT:    vnot.v v8, v8, v0.t
151; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
152; CHECK-NEXT:    ret
153;
154; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv4i8:
155; CHECK-ZVKB:       # %bb.0:
156; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
157; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
158; CHECK-ZVKB-NEXT:    ret
159  %not.a = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> %mask, i32 %evl)
160  %x = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %b, <vscale x 4 x i8> %not.a, <vscale x 4 x i1> %mask, i32 %evl)
161  ret <vscale x 4 x i8> %x
162}
163
164define <vscale x 4 x i8> @vandn_vx_vp_nxv4i8(i8 %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
165; CHECK-LABEL: vandn_vx_vp_nxv4i8:
166; CHECK:       # %bb.0:
167; CHECK-NEXT:    not a0, a0
168; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
169; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
170; CHECK-NEXT:    ret
171;
172; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv4i8:
173; CHECK-ZVKB:       # %bb.0:
174; CHECK-ZVKB-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
175; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0, v0.t
176; CHECK-ZVKB-NEXT:    ret
177  %not.a = xor i8 %a, -1
178  %head.not.a = insertelement <vscale x 4 x i8> poison, i8 %not.a, i32 0
179  %splat.not.a = shufflevector <vscale x 4 x i8> %head.not.a, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
180  %x = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %b, <vscale x 4 x i8> %splat.not.a, <vscale x 4 x i1> %mask, i32 %evl)
181  ret <vscale x 4 x i8> %x
182}
183
184declare <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
185declare <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
186
187define <vscale x 8 x i8> @vandn_vv_vp_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
188; CHECK-LABEL: vandn_vv_vp_nxv8i8:
189; CHECK:       # %bb.0:
190; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
191; CHECK-NEXT:    vnot.v v8, v8, v0.t
192; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
193; CHECK-NEXT:    ret
194;
195; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv8i8:
196; CHECK-ZVKB:       # %bb.0:
197; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
198; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
199; CHECK-ZVKB-NEXT:    ret
200  %not.a = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> %mask, i32 %evl)
201  %x = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %not.a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, i32 %evl)
202  ret <vscale x 8 x i8> %x
203}
204
205define <vscale x 8 x i8> @vandn_vv_vp_swapped_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
206; CHECK-LABEL: vandn_vv_vp_swapped_nxv8i8:
207; CHECK:       # %bb.0:
208; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
209; CHECK-NEXT:    vnot.v v8, v8, v0.t
210; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
211; CHECK-NEXT:    ret
212;
213; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv8i8:
214; CHECK-ZVKB:       # %bb.0:
215; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
216; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
217; CHECK-ZVKB-NEXT:    ret
218  %not.a = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> %mask, i32 %evl)
219  %x = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %b, <vscale x 8 x i8> %not.a, <vscale x 8 x i1> %mask, i32 %evl)
220  ret <vscale x 8 x i8> %x
221}
222
223define <vscale x 8 x i8> @vandn_vx_vp_nxv8i8(i8 %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
224; CHECK-LABEL: vandn_vx_vp_nxv8i8:
225; CHECK:       # %bb.0:
226; CHECK-NEXT:    not a0, a0
227; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
228; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
229; CHECK-NEXT:    ret
230;
231; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv8i8:
232; CHECK-ZVKB:       # %bb.0:
233; CHECK-ZVKB-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
234; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0, v0.t
235; CHECK-ZVKB-NEXT:    ret
236  %not.a = xor i8 %a, -1
237  %head.not.a = insertelement <vscale x 8 x i8> poison, i8 %not.a, i32 0
238  %splat.not.a = shufflevector <vscale x 8 x i8> %head.not.a, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
239  %x = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %b, <vscale x 8 x i8> %splat.not.a, <vscale x 8 x i1> %mask, i32 %evl)
240  ret <vscale x 8 x i8> %x
241}
242
243declare <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
244declare <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
245
246define <vscale x 16 x i8> @vandn_vv_vp_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
247; CHECK-LABEL: vandn_vv_vp_nxv16i8:
248; CHECK:       # %bb.0:
249; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
250; CHECK-NEXT:    vnot.v v8, v8, v0.t
251; CHECK-NEXT:    vand.vv v8, v8, v10, v0.t
252; CHECK-NEXT:    ret
253;
254; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv16i8:
255; CHECK-ZVKB:       # %bb.0:
256; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
257; CHECK-ZVKB-NEXT:    vandn.vv v8, v10, v8, v0.t
258; CHECK-ZVKB-NEXT:    ret
259  %not.a = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> %mask, i32 %evl)
260  %x = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %not.a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, i32 %evl)
261  ret <vscale x 16 x i8> %x
262}
263
264define <vscale x 16 x i8> @vandn_vv_vp_swapped_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
265; CHECK-LABEL: vandn_vv_vp_swapped_nxv16i8:
266; CHECK:       # %bb.0:
267; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
268; CHECK-NEXT:    vnot.v v8, v8, v0.t
269; CHECK-NEXT:    vand.vv v8, v10, v8, v0.t
270; CHECK-NEXT:    ret
271;
272; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv16i8:
273; CHECK-ZVKB:       # %bb.0:
274; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
275; CHECK-ZVKB-NEXT:    vandn.vv v8, v10, v8, v0.t
276; CHECK-ZVKB-NEXT:    ret
277  %not.a = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> %mask, i32 %evl)
278  %x = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %b, <vscale x 16 x i8> %not.a, <vscale x 16 x i1> %mask, i32 %evl)
279  ret <vscale x 16 x i8> %x
280}
281
282define <vscale x 16 x i8> @vandn_vx_vp_nxv16i8(i8 %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
283; CHECK-LABEL: vandn_vx_vp_nxv16i8:
284; CHECK:       # %bb.0:
285; CHECK-NEXT:    not a0, a0
286; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
287; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
288; CHECK-NEXT:    ret
289;
290; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv16i8:
291; CHECK-ZVKB:       # %bb.0:
292; CHECK-ZVKB-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
293; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0, v0.t
294; CHECK-ZVKB-NEXT:    ret
295  %not.a = xor i8 %a, -1
296  %head.not.a = insertelement <vscale x 16 x i8> poison, i8 %not.a, i32 0
297  %splat.not.a = shufflevector <vscale x 16 x i8> %head.not.a, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
298  %x = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %b, <vscale x 16 x i8> %splat.not.a, <vscale x 16 x i1> %mask, i32 %evl)
299  ret <vscale x 16 x i8> %x
300}
301
302declare <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
303declare <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
304
305define <vscale x 32 x i8> @vandn_vv_vp_nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
306; CHECK-LABEL: vandn_vv_vp_nxv32i8:
307; CHECK:       # %bb.0:
308; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
309; CHECK-NEXT:    vnot.v v8, v8, v0.t
310; CHECK-NEXT:    vand.vv v8, v8, v12, v0.t
311; CHECK-NEXT:    ret
312;
313; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv32i8:
314; CHECK-ZVKB:       # %bb.0:
315; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
316; CHECK-ZVKB-NEXT:    vandn.vv v8, v12, v8, v0.t
317; CHECK-ZVKB-NEXT:    ret
318  %not.a = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> %mask, i32 %evl)
319  %x = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %not.a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, i32 %evl)
320  ret <vscale x 32 x i8> %x
321}
322
323define <vscale x 32 x i8> @vandn_vv_vp_swapped_nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
324; CHECK-LABEL: vandn_vv_vp_swapped_nxv32i8:
325; CHECK:       # %bb.0:
326; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
327; CHECK-NEXT:    vnot.v v8, v8, v0.t
328; CHECK-NEXT:    vand.vv v8, v12, v8, v0.t
329; CHECK-NEXT:    ret
330;
331; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv32i8:
332; CHECK-ZVKB:       # %bb.0:
333; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
334; CHECK-ZVKB-NEXT:    vandn.vv v8, v12, v8, v0.t
335; CHECK-ZVKB-NEXT:    ret
336  %not.a = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> %mask, i32 %evl)
337  %x = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %b, <vscale x 32 x i8> %not.a, <vscale x 32 x i1> %mask, i32 %evl)
338  ret <vscale x 32 x i8> %x
339}
340
341define <vscale x 32 x i8> @vandn_vx_vp_nxv32i8(i8 %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
342; CHECK-LABEL: vandn_vx_vp_nxv32i8:
343; CHECK:       # %bb.0:
344; CHECK-NEXT:    not a0, a0
345; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
346; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
347; CHECK-NEXT:    ret
348;
349; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv32i8:
350; CHECK-ZVKB:       # %bb.0:
351; CHECK-ZVKB-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
352; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0, v0.t
353; CHECK-ZVKB-NEXT:    ret
354  %not.a = xor i8 %a, -1
355  %head.not.a = insertelement <vscale x 32 x i8> poison, i8 %not.a, i32 0
356  %splat.not.a = shufflevector <vscale x 32 x i8> %head.not.a, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
357  %x = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %b, <vscale x 32 x i8> %splat.not.a, <vscale x 32 x i1> %mask, i32 %evl)
358  ret <vscale x 32 x i8> %x
359}
360
361declare <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
362declare <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
363
364define <vscale x 64 x i8> @vandn_vv_vp_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %mask, i32 zeroext %evl) {
365; CHECK-LABEL: vandn_vv_vp_nxv64i8:
366; CHECK:       # %bb.0:
367; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
368; CHECK-NEXT:    vnot.v v8, v8, v0.t
369; CHECK-NEXT:    vand.vv v8, v8, v16, v0.t
370; CHECK-NEXT:    ret
371;
372; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv64i8:
373; CHECK-ZVKB:       # %bb.0:
374; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
375; CHECK-ZVKB-NEXT:    vandn.vv v8, v16, v8, v0.t
376; CHECK-ZVKB-NEXT:    ret
377  %not.a = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> %mask, i32 %evl)
378  %x = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %not.a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %mask, i32 %evl)
379  ret <vscale x 64 x i8> %x
380}
381
382define <vscale x 64 x i8> @vandn_vv_vp_swapped_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %mask, i32 zeroext %evl) {
383; CHECK-LABEL: vandn_vv_vp_swapped_nxv64i8:
384; CHECK:       # %bb.0:
385; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
386; CHECK-NEXT:    vnot.v v8, v8, v0.t
387; CHECK-NEXT:    vand.vv v8, v16, v8, v0.t
388; CHECK-NEXT:    ret
389;
390; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv64i8:
391; CHECK-ZVKB:       # %bb.0:
392; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
393; CHECK-ZVKB-NEXT:    vandn.vv v8, v16, v8, v0.t
394; CHECK-ZVKB-NEXT:    ret
395  %not.a = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> %mask, i32 %evl)
396  %x = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %b, <vscale x 64 x i8> %not.a, <vscale x 64 x i1> %mask, i32 %evl)
397  ret <vscale x 64 x i8> %x
398}
399
400define <vscale x 64 x i8> @vandn_vx_vp_nxv64i8(i8 %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %mask, i32 zeroext %evl) {
401; CHECK-LABEL: vandn_vx_vp_nxv64i8:
402; CHECK:       # %bb.0:
403; CHECK-NEXT:    not a0, a0
404; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
405; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
406; CHECK-NEXT:    ret
407;
408; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv64i8:
409; CHECK-ZVKB:       # %bb.0:
410; CHECK-ZVKB-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
411; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0, v0.t
412; CHECK-ZVKB-NEXT:    ret
413  %not.a = xor i8 %a, -1
414  %head.not.a = insertelement <vscale x 64 x i8> poison, i8 %not.a, i32 0
415  %splat.not.a = shufflevector <vscale x 64 x i8> %head.not.a, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
416  %x = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %b, <vscale x 64 x i8> %splat.not.a, <vscale x 64 x i1> %mask, i32 %evl)
417  ret <vscale x 64 x i8> %x
418}
419
420declare <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
421declare <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
422
423define <vscale x 1 x i16> @vandn_vv_vp_nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
424; CHECK-LABEL: vandn_vv_vp_nxv1i16:
425; CHECK:       # %bb.0:
426; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
427; CHECK-NEXT:    vnot.v v8, v8, v0.t
428; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
429; CHECK-NEXT:    ret
430;
431; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv1i16:
432; CHECK-ZVKB:       # %bb.0:
433; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
434; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
435; CHECK-ZVKB-NEXT:    ret
436  %not.a = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> %mask, i32 %evl)
437  %x = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %not.a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, i32 %evl)
438  ret <vscale x 1 x i16> %x
439}
440
441define <vscale x 1 x i16> @vandn_vv_vp_swapped_nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
442; CHECK-LABEL: vandn_vv_vp_swapped_nxv1i16:
443; CHECK:       # %bb.0:
444; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
445; CHECK-NEXT:    vnot.v v8, v8, v0.t
446; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
447; CHECK-NEXT:    ret
448;
449; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv1i16:
450; CHECK-ZVKB:       # %bb.0:
451; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
452; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
453; CHECK-ZVKB-NEXT:    ret
454  %not.a = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> %mask, i32 %evl)
455  %x = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i16> %not.a, <vscale x 1 x i1> %mask, i32 %evl)
456  ret <vscale x 1 x i16> %x
457}
458
459define <vscale x 1 x i16> @vandn_vx_vp_nxv1i16(i16 %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
460; CHECK-LABEL: vandn_vx_vp_nxv1i16:
461; CHECK:       # %bb.0:
462; CHECK-NEXT:    not a0, a0
463; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
464; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
465; CHECK-NEXT:    ret
466;
467; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv1i16:
468; CHECK-ZVKB:       # %bb.0:
469; CHECK-ZVKB-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
470; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0, v0.t
471; CHECK-ZVKB-NEXT:    ret
472  %not.a = xor i16 %a, -1
473  %head.not.a = insertelement <vscale x 1 x i16> poison, i16 %not.a, i32 0
474  %splat.not.a = shufflevector <vscale x 1 x i16> %head.not.a, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
475  %x = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i16> %splat.not.a, <vscale x 1 x i1> %mask, i32 %evl)
476  ret <vscale x 1 x i16> %x
477}
478
479declare <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
480declare <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
481
482define <vscale x 2 x i16> @vandn_vv_vp_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
483; CHECK-LABEL: vandn_vv_vp_nxv2i16:
484; CHECK:       # %bb.0:
485; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
486; CHECK-NEXT:    vnot.v v8, v8, v0.t
487; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
488; CHECK-NEXT:    ret
489;
490; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv2i16:
491; CHECK-ZVKB:       # %bb.0:
492; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
493; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
494; CHECK-ZVKB-NEXT:    ret
495  %not.a = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> %mask, i32 %evl)
496  %x = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %not.a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, i32 %evl)
497  ret <vscale x 2 x i16> %x
498}
499
500define <vscale x 2 x i16> @vandn_vv_vp_swapped_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
501; CHECK-LABEL: vandn_vv_vp_swapped_nxv2i16:
502; CHECK:       # %bb.0:
503; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
504; CHECK-NEXT:    vnot.v v8, v8, v0.t
505; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
506; CHECK-NEXT:    ret
507;
508; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv2i16:
509; CHECK-ZVKB:       # %bb.0:
510; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
511; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
512; CHECK-ZVKB-NEXT:    ret
513  %not.a = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> %mask, i32 %evl)
514  %x = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %b, <vscale x 2 x i16> %not.a, <vscale x 2 x i1> %mask, i32 %evl)
515  ret <vscale x 2 x i16> %x
516}
517
518define <vscale x 2 x i16> @vandn_vx_vp_nxv2i16(i16 %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
519; CHECK-LABEL: vandn_vx_vp_nxv2i16:
520; CHECK:       # %bb.0:
521; CHECK-NEXT:    not a0, a0
522; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
523; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
524; CHECK-NEXT:    ret
525;
526; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv2i16:
527; CHECK-ZVKB:       # %bb.0:
528; CHECK-ZVKB-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
529; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0, v0.t
530; CHECK-ZVKB-NEXT:    ret
531  %not.a = xor i16 %a, -1
532  %head.not.a = insertelement <vscale x 2 x i16> poison, i16 %not.a, i32 0
533  %splat.not.a = shufflevector <vscale x 2 x i16> %head.not.a, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
534  %x = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %b, <vscale x 2 x i16> %splat.not.a, <vscale x 2 x i1> %mask, i32 %evl)
535  ret <vscale x 2 x i16> %x
536}
537
538declare <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
539declare <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
540
541define <vscale x 4 x i16> @vandn_vv_vp_nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
542; CHECK-LABEL: vandn_vv_vp_nxv4i16:
543; CHECK:       # %bb.0:
544; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
545; CHECK-NEXT:    vnot.v v8, v8, v0.t
546; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
547; CHECK-NEXT:    ret
548;
549; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv4i16:
550; CHECK-ZVKB:       # %bb.0:
551; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
552; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
553; CHECK-ZVKB-NEXT:    ret
554  %not.a = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> %mask, i32 %evl)
555  %x = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %not.a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, i32 %evl)
556  ret <vscale x 4 x i16> %x
557}
558
559define <vscale x 4 x i16> @vandn_vv_vp_swapped_nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
560; CHECK-LABEL: vandn_vv_vp_swapped_nxv4i16:
561; CHECK:       # %bb.0:
562; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
563; CHECK-NEXT:    vnot.v v8, v8, v0.t
564; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
565; CHECK-NEXT:    ret
566;
567; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv4i16:
568; CHECK-ZVKB:       # %bb.0:
569; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
570; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
571; CHECK-ZVKB-NEXT:    ret
572  %not.a = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> %mask, i32 %evl)
573  %x = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %b, <vscale x 4 x i16> %not.a, <vscale x 4 x i1> %mask, i32 %evl)
574  ret <vscale x 4 x i16> %x
575}
576
577define <vscale x 4 x i16> @vandn_vx_vp_nxv4i16(i16 %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
578; CHECK-LABEL: vandn_vx_vp_nxv4i16:
579; CHECK:       # %bb.0:
580; CHECK-NEXT:    not a0, a0
581; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
582; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
583; CHECK-NEXT:    ret
584;
585; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv4i16:
586; CHECK-ZVKB:       # %bb.0:
587; CHECK-ZVKB-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
588; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0, v0.t
589; CHECK-ZVKB-NEXT:    ret
590  %not.a = xor i16 %a, -1
591  %head.not.a = insertelement <vscale x 4 x i16> poison, i16 %not.a, i32 0
592  %splat.not.a = shufflevector <vscale x 4 x i16> %head.not.a, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
593  %x = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %b, <vscale x 4 x i16> %splat.not.a, <vscale x 4 x i1> %mask, i32 %evl)
594  ret <vscale x 4 x i16> %x
595}
596
597declare <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
598declare <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
599
600define <vscale x 8 x i16> @vandn_vv_vp_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
601; CHECK-LABEL: vandn_vv_vp_nxv8i16:
602; CHECK:       # %bb.0:
603; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
604; CHECK-NEXT:    vnot.v v8, v8, v0.t
605; CHECK-NEXT:    vand.vv v8, v8, v10, v0.t
606; CHECK-NEXT:    ret
607;
608; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv8i16:
609; CHECK-ZVKB:       # %bb.0:
610; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
611; CHECK-ZVKB-NEXT:    vandn.vv v8, v10, v8, v0.t
612; CHECK-ZVKB-NEXT:    ret
613  %not.a = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> %mask, i32 %evl)
614  %x = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %not.a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, i32 %evl)
615  ret <vscale x 8 x i16> %x
616}
617
618define <vscale x 8 x i16> @vandn_vv_vp_swapped_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
619; CHECK-LABEL: vandn_vv_vp_swapped_nxv8i16:
620; CHECK:       # %bb.0:
621; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
622; CHECK-NEXT:    vnot.v v8, v8, v0.t
623; CHECK-NEXT:    vand.vv v8, v10, v8, v0.t
624; CHECK-NEXT:    ret
625;
626; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv8i16:
627; CHECK-ZVKB:       # %bb.0:
628; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
629; CHECK-ZVKB-NEXT:    vandn.vv v8, v10, v8, v0.t
630; CHECK-ZVKB-NEXT:    ret
631  %not.a = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> %mask, i32 %evl)
632  %x = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %b, <vscale x 8 x i16> %not.a, <vscale x 8 x i1> %mask, i32 %evl)
633  ret <vscale x 8 x i16> %x
634}
635
636define <vscale x 8 x i16> @vandn_vx_vp_nxv8i16(i16 %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
637; CHECK-LABEL: vandn_vx_vp_nxv8i16:
638; CHECK:       # %bb.0:
639; CHECK-NEXT:    not a0, a0
640; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
641; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
642; CHECK-NEXT:    ret
643;
644; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv8i16:
645; CHECK-ZVKB:       # %bb.0:
646; CHECK-ZVKB-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
647; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0, v0.t
648; CHECK-ZVKB-NEXT:    ret
649  %not.a = xor i16 %a, -1
650  %head.not.a = insertelement <vscale x 8 x i16> poison, i16 %not.a, i32 0
651  %splat.not.a = shufflevector <vscale x 8 x i16> %head.not.a, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
652  %x = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %b, <vscale x 8 x i16> %splat.not.a, <vscale x 8 x i1> %mask, i32 %evl)
653  ret <vscale x 8 x i16> %x
654}
655
656declare <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
657declare <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
658
659define <vscale x 16 x i16> @vandn_vv_vp_nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
660; CHECK-LABEL: vandn_vv_vp_nxv16i16:
661; CHECK:       # %bb.0:
662; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
663; CHECK-NEXT:    vnot.v v8, v8, v0.t
664; CHECK-NEXT:    vand.vv v8, v8, v12, v0.t
665; CHECK-NEXT:    ret
666;
667; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv16i16:
668; CHECK-ZVKB:       # %bb.0:
669; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
670; CHECK-ZVKB-NEXT:    vandn.vv v8, v12, v8, v0.t
671; CHECK-ZVKB-NEXT:    ret
672  %not.a = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> %mask, i32 %evl)
673  %x = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %not.a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, i32 %evl)
674  ret <vscale x 16 x i16> %x
675}
676
677define <vscale x 16 x i16> @vandn_vv_vp_swapped_nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
678; CHECK-LABEL: vandn_vv_vp_swapped_nxv16i16:
679; CHECK:       # %bb.0:
680; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
681; CHECK-NEXT:    vnot.v v8, v8, v0.t
682; CHECK-NEXT:    vand.vv v8, v12, v8, v0.t
683; CHECK-NEXT:    ret
684;
685; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv16i16:
686; CHECK-ZVKB:       # %bb.0:
687; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
688; CHECK-ZVKB-NEXT:    vandn.vv v8, v12, v8, v0.t
689; CHECK-ZVKB-NEXT:    ret
690  %not.a = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> %mask, i32 %evl)
691  %x = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %b, <vscale x 16 x i16> %not.a, <vscale x 16 x i1> %mask, i32 %evl)
692  ret <vscale x 16 x i16> %x
693}
694
695define <vscale x 16 x i16> @vandn_vx_vp_nxv16i16(i16 %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
696; CHECK-LABEL: vandn_vx_vp_nxv16i16:
697; CHECK:       # %bb.0:
698; CHECK-NEXT:    not a0, a0
699; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
700; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
701; CHECK-NEXT:    ret
702;
703; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv16i16:
704; CHECK-ZVKB:       # %bb.0:
705; CHECK-ZVKB-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
706; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0, v0.t
707; CHECK-ZVKB-NEXT:    ret
708  %not.a = xor i16 %a, -1
709  %head.not.a = insertelement <vscale x 16 x i16> poison, i16 %not.a, i32 0
710  %splat.not.a = shufflevector <vscale x 16 x i16> %head.not.a, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
711  %x = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %b, <vscale x 16 x i16> %splat.not.a, <vscale x 16 x i1> %mask, i32 %evl)
712  ret <vscale x 16 x i16> %x
713}
714
715declare <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
716declare <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
717
718define <vscale x 32 x i16> @vandn_vv_vp_nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
719; CHECK-LABEL: vandn_vv_vp_nxv32i16:
720; CHECK:       # %bb.0:
721; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
722; CHECK-NEXT:    vnot.v v8, v8, v0.t
723; CHECK-NEXT:    vand.vv v8, v8, v16, v0.t
724; CHECK-NEXT:    ret
725;
726; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv32i16:
727; CHECK-ZVKB:       # %bb.0:
728; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
729; CHECK-ZVKB-NEXT:    vandn.vv v8, v16, v8, v0.t
730; CHECK-ZVKB-NEXT:    ret
731  %not.a = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> %mask, i32 %evl)
732  %x = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %not.a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %mask, i32 %evl)
733  ret <vscale x 32 x i16> %x
734}
735
736define <vscale x 32 x i16> @vandn_vv_vp_swapped_nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
737; CHECK-LABEL: vandn_vv_vp_swapped_nxv32i16:
738; CHECK:       # %bb.0:
739; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
740; CHECK-NEXT:    vnot.v v8, v8, v0.t
741; CHECK-NEXT:    vand.vv v8, v16, v8, v0.t
742; CHECK-NEXT:    ret
743;
744; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv32i16:
745; CHECK-ZVKB:       # %bb.0:
746; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
747; CHECK-ZVKB-NEXT:    vandn.vv v8, v16, v8, v0.t
748; CHECK-ZVKB-NEXT:    ret
749  %not.a = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> %mask, i32 %evl)
750  %x = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %b, <vscale x 32 x i16> %not.a, <vscale x 32 x i1> %mask, i32 %evl)
751  ret <vscale x 32 x i16> %x
752}
753
754define <vscale x 32 x i16> @vandn_vx_vp_nxv32i16(i16 %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
755; CHECK-LABEL: vandn_vx_vp_nxv32i16:
756; CHECK:       # %bb.0:
757; CHECK-NEXT:    not a0, a0
758; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
759; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
760; CHECK-NEXT:    ret
761;
762; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv32i16:
763; CHECK-ZVKB:       # %bb.0:
764; CHECK-ZVKB-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
765; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0, v0.t
766; CHECK-ZVKB-NEXT:    ret
767  %not.a = xor i16 %a, -1
768  %head.not.a = insertelement <vscale x 32 x i16> poison, i16 %not.a, i32 0
769  %splat.not.a = shufflevector <vscale x 32 x i16> %head.not.a, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
770  %x = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %b, <vscale x 32 x i16> %splat.not.a, <vscale x 32 x i1> %mask, i32 %evl)
771  ret <vscale x 32 x i16> %x
772}
773
774declare <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
775declare <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
776
777define <vscale x 1 x i32> @vandn_vv_vp_nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
778; CHECK-LABEL: vandn_vv_vp_nxv1i32:
779; CHECK:       # %bb.0:
780; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
781; CHECK-NEXT:    vnot.v v8, v8, v0.t
782; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
783; CHECK-NEXT:    ret
784;
785; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv1i32:
786; CHECK-ZVKB:       # %bb.0:
787; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
788; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
789; CHECK-ZVKB-NEXT:    ret
790  %not.a = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> %mask, i32 %evl)
791  %x = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %not.a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %mask, i32 %evl)
792  ret <vscale x 1 x i32> %x
793}
794
795define <vscale x 1 x i32> @vandn_vv_vp_swapped_nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
796; CHECK-LABEL: vandn_vv_vp_swapped_nxv1i32:
797; CHECK:       # %bb.0:
798; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
799; CHECK-NEXT:    vnot.v v8, v8, v0.t
800; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
801; CHECK-NEXT:    ret
802;
803; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv1i32:
804; CHECK-ZVKB:       # %bb.0:
805; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
806; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
807; CHECK-ZVKB-NEXT:    ret
808  %not.a = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> %mask, i32 %evl)
809  %x = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %b, <vscale x 1 x i32> %not.a, <vscale x 1 x i1> %mask, i32 %evl)
810  ret <vscale x 1 x i32> %x
811}
812
813define <vscale x 1 x i32> @vandn_vx_vp_nxv1i32(i32 %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
814; CHECK-LABEL: vandn_vx_vp_nxv1i32:
815; CHECK:       # %bb.0:
816; CHECK-NEXT:    not a0, a0
817; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
818; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
819; CHECK-NEXT:    ret
820;
821; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv1i32:
822; CHECK-ZVKB:       # %bb.0:
823; CHECK-ZVKB-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
824; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0, v0.t
825; CHECK-ZVKB-NEXT:    ret
826  %not.a = xor i32 %a, -1
827  %head.not.a = insertelement <vscale x 1 x i32> poison, i32 %not.a, i32 0
828  %splat.not.a = shufflevector <vscale x 1 x i32> %head.not.a, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
829  %x = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %b, <vscale x 1 x i32> %splat.not.a, <vscale x 1 x i1> %mask, i32 %evl)
830  ret <vscale x 1 x i32> %x
831}
832
833declare <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
834declare <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
835
836define <vscale x 2 x i32> @vandn_vv_vp_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
837; CHECK-LABEL: vandn_vv_vp_nxv2i32:
838; CHECK:       # %bb.0:
839; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
840; CHECK-NEXT:    vnot.v v8, v8, v0.t
841; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
842; CHECK-NEXT:    ret
843;
844; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv2i32:
845; CHECK-ZVKB:       # %bb.0:
846; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
847; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
848; CHECK-ZVKB-NEXT:    ret
849  %not.a = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> %mask, i32 %evl)
850  %x = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %not.a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %mask, i32 %evl)
851  ret <vscale x 2 x i32> %x
852}
853
854define <vscale x 2 x i32> @vandn_vv_vp_swapped_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
855; CHECK-LABEL: vandn_vv_vp_swapped_nxv2i32:
856; CHECK:       # %bb.0:
857; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
858; CHECK-NEXT:    vnot.v v8, v8, v0.t
859; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
860; CHECK-NEXT:    ret
861;
862; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv2i32:
863; CHECK-ZVKB:       # %bb.0:
864; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
865; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
866; CHECK-ZVKB-NEXT:    ret
867  %not.a = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> %mask, i32 %evl)
868  %x = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %b, <vscale x 2 x i32> %not.a, <vscale x 2 x i1> %mask, i32 %evl)
869  ret <vscale x 2 x i32> %x
870}
871
872define <vscale x 2 x i32> @vandn_vx_vp_nxv2i32(i32 %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
873; CHECK-LABEL: vandn_vx_vp_nxv2i32:
874; CHECK:       # %bb.0:
875; CHECK-NEXT:    not a0, a0
876; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
877; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
878; CHECK-NEXT:    ret
879;
880; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv2i32:
881; CHECK-ZVKB:       # %bb.0:
882; CHECK-ZVKB-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
883; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0, v0.t
884; CHECK-ZVKB-NEXT:    ret
885  %not.a = xor i32 %a, -1
886  %head.not.a = insertelement <vscale x 2 x i32> poison, i32 %not.a, i32 0
887  %splat.not.a = shufflevector <vscale x 2 x i32> %head.not.a, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
888  %x = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %b, <vscale x 2 x i32> %splat.not.a, <vscale x 2 x i1> %mask, i32 %evl)
889  ret <vscale x 2 x i32> %x
890}
891
892declare <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
893declare <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
894
895define <vscale x 4 x i32> @vandn_vv_vp_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
896; CHECK-LABEL: vandn_vv_vp_nxv4i32:
897; CHECK:       # %bb.0:
898; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
899; CHECK-NEXT:    vnot.v v8, v8, v0.t
900; CHECK-NEXT:    vand.vv v8, v8, v10, v0.t
901; CHECK-NEXT:    ret
902;
903; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv4i32:
904; CHECK-ZVKB:       # %bb.0:
905; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
906; CHECK-ZVKB-NEXT:    vandn.vv v8, v10, v8, v0.t
907; CHECK-ZVKB-NEXT:    ret
908  %not.a = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> %mask, i32 %evl)
909  %x = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %not.a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %mask, i32 %evl)
910  ret <vscale x 4 x i32> %x
911}
912
913define <vscale x 4 x i32> @vandn_vv_vp_swapped_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
914; CHECK-LABEL: vandn_vv_vp_swapped_nxv4i32:
915; CHECK:       # %bb.0:
916; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
917; CHECK-NEXT:    vnot.v v8, v8, v0.t
918; CHECK-NEXT:    vand.vv v8, v10, v8, v0.t
919; CHECK-NEXT:    ret
920;
921; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv4i32:
922; CHECK-ZVKB:       # %bb.0:
923; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
924; CHECK-ZVKB-NEXT:    vandn.vv v8, v10, v8, v0.t
925; CHECK-ZVKB-NEXT:    ret
926  %not.a = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> %mask, i32 %evl)
927  %x = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %b, <vscale x 4 x i32> %not.a, <vscale x 4 x i1> %mask, i32 %evl)
928  ret <vscale x 4 x i32> %x
929}
930
931define <vscale x 4 x i32> @vandn_vx_vp_nxv4i32(i32 %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
932; CHECK-LABEL: vandn_vx_vp_nxv4i32:
933; CHECK:       # %bb.0:
934; CHECK-NEXT:    not a0, a0
935; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
936; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
937; CHECK-NEXT:    ret
938;
939; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv4i32:
940; CHECK-ZVKB:       # %bb.0:
941; CHECK-ZVKB-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
942; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0, v0.t
943; CHECK-ZVKB-NEXT:    ret
944  %not.a = xor i32 %a, -1
945  %head.not.a = insertelement <vscale x 4 x i32> poison, i32 %not.a, i32 0
946  %splat.not.a = shufflevector <vscale x 4 x i32> %head.not.a, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
947  %x = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %b, <vscale x 4 x i32> %splat.not.a, <vscale x 4 x i1> %mask, i32 %evl)
948  ret <vscale x 4 x i32> %x
949}
950
951declare <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
952declare <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
953
954define <vscale x 8 x i32> @vandn_vv_vp_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
955; CHECK-LABEL: vandn_vv_vp_nxv8i32:
956; CHECK:       # %bb.0:
957; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
958; CHECK-NEXT:    vnot.v v8, v8, v0.t
959; CHECK-NEXT:    vand.vv v8, v8, v12, v0.t
960; CHECK-NEXT:    ret
961;
962; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv8i32:
963; CHECK-ZVKB:       # %bb.0:
964; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
965; CHECK-ZVKB-NEXT:    vandn.vv v8, v12, v8, v0.t
966; CHECK-ZVKB-NEXT:    ret
967  %not.a = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> %mask, i32 %evl)
968  %x = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %not.a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %mask, i32 %evl)
969  ret <vscale x 8 x i32> %x
970}
971
972define <vscale x 8 x i32> @vandn_vv_vp_swapped_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
973; CHECK-LABEL: vandn_vv_vp_swapped_nxv8i32:
974; CHECK:       # %bb.0:
975; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
976; CHECK-NEXT:    vnot.v v8, v8, v0.t
977; CHECK-NEXT:    vand.vv v8, v12, v8, v0.t
978; CHECK-NEXT:    ret
979;
980; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv8i32:
981; CHECK-ZVKB:       # %bb.0:
982; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
983; CHECK-ZVKB-NEXT:    vandn.vv v8, v12, v8, v0.t
984; CHECK-ZVKB-NEXT:    ret
985  %not.a = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> %mask, i32 %evl)
986  %x = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %b, <vscale x 8 x i32> %not.a, <vscale x 8 x i1> %mask, i32 %evl)
987  ret <vscale x 8 x i32> %x
988}
989
990define <vscale x 8 x i32> @vandn_vx_vp_nxv8i32(i32 %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
991; CHECK-LABEL: vandn_vx_vp_nxv8i32:
992; CHECK:       # %bb.0:
993; CHECK-NEXT:    not a0, a0
994; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
995; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
996; CHECK-NEXT:    ret
997;
998; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv8i32:
999; CHECK-ZVKB:       # %bb.0:
1000; CHECK-ZVKB-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1001; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0, v0.t
1002; CHECK-ZVKB-NEXT:    ret
1003  %not.a = xor i32 %a, -1
1004  %head.not.a = insertelement <vscale x 8 x i32> poison, i32 %not.a, i32 0
1005  %splat.not.a = shufflevector <vscale x 8 x i32> %head.not.a, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1006  %x = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %b, <vscale x 8 x i32> %splat.not.a, <vscale x 8 x i1> %mask, i32 %evl)
1007  ret <vscale x 8 x i32> %x
1008}
1009
1010declare <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
1011declare <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
1012
1013define <vscale x 16 x i32> @vandn_vv_vp_nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
1014; CHECK-LABEL: vandn_vv_vp_nxv16i32:
1015; CHECK:       # %bb.0:
1016; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
1017; CHECK-NEXT:    vnot.v v8, v8, v0.t
1018; CHECK-NEXT:    vand.vv v8, v8, v16, v0.t
1019; CHECK-NEXT:    ret
1020;
1021; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv16i32:
1022; CHECK-ZVKB:       # %bb.0:
1023; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
1024; CHECK-ZVKB-NEXT:    vandn.vv v8, v16, v8, v0.t
1025; CHECK-ZVKB-NEXT:    ret
1026  %not.a = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> %mask, i32 %evl)
1027  %x = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %not.a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %mask, i32 %evl)
1028  ret <vscale x 16 x i32> %x
1029}
1030
1031define <vscale x 16 x i32> @vandn_vv_vp_swapped_nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
1032; CHECK-LABEL: vandn_vv_vp_swapped_nxv16i32:
1033; CHECK:       # %bb.0:
1034; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
1035; CHECK-NEXT:    vnot.v v8, v8, v0.t
1036; CHECK-NEXT:    vand.vv v8, v16, v8, v0.t
1037; CHECK-NEXT:    ret
1038;
1039; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv16i32:
1040; CHECK-ZVKB:       # %bb.0:
1041; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
1042; CHECK-ZVKB-NEXT:    vandn.vv v8, v16, v8, v0.t
1043; CHECK-ZVKB-NEXT:    ret
1044  %not.a = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> %mask, i32 %evl)
1045  %x = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %b, <vscale x 16 x i32> %not.a, <vscale x 16 x i1> %mask, i32 %evl)
1046  ret <vscale x 16 x i32> %x
1047}
1048
1049define <vscale x 16 x i32> @vandn_vx_vp_nxv16i32(i32 %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
1050; CHECK-LABEL: vandn_vx_vp_nxv16i32:
1051; CHECK:       # %bb.0:
1052; CHECK-NEXT:    not a0, a0
1053; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1054; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
1055; CHECK-NEXT:    ret
1056;
1057; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv16i32:
1058; CHECK-ZVKB:       # %bb.0:
1059; CHECK-ZVKB-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1060; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a0, v0.t
1061; CHECK-ZVKB-NEXT:    ret
1062  %not.a = xor i32 %a, -1
1063  %head.not.a = insertelement <vscale x 16 x i32> poison, i32 %not.a, i32 0
1064  %splat.not.a = shufflevector <vscale x 16 x i32> %head.not.a, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
1065  %x = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %b, <vscale x 16 x i32> %splat.not.a, <vscale x 16 x i1> %mask, i32 %evl)
1066  ret <vscale x 16 x i32> %x
1067}
1068
1069declare <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
1070declare <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
1071
1072define <vscale x 1 x i64> @vandn_vv_vp_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
1073; CHECK-LABEL: vandn_vv_vp_nxv1i64:
1074; CHECK:       # %bb.0:
1075; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1076; CHECK-NEXT:    vnot.v v8, v8, v0.t
1077; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
1078; CHECK-NEXT:    ret
1079;
1080; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv1i64:
1081; CHECK-ZVKB:       # %bb.0:
1082; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1083; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
1084; CHECK-ZVKB-NEXT:    ret
1085  %not.a = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> %mask, i32 %evl)
1086  %x = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %not.a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %mask, i32 %evl)
1087  ret <vscale x 1 x i64> %x
1088}
1089
1090define <vscale x 1 x i64> @vandn_vv_vp_swapped_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
1091; CHECK-LABEL: vandn_vv_vp_swapped_nxv1i64:
1092; CHECK:       # %bb.0:
1093; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1094; CHECK-NEXT:    vnot.v v8, v8, v0.t
1095; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
1096; CHECK-NEXT:    ret
1097;
1098; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv1i64:
1099; CHECK-ZVKB:       # %bb.0:
1100; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1101; CHECK-ZVKB-NEXT:    vandn.vv v8, v9, v8, v0.t
1102; CHECK-ZVKB-NEXT:    ret
1103  %not.a = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> %mask, i32 %evl)
1104  %x = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %b, <vscale x 1 x i64> %not.a, <vscale x 1 x i1> %mask, i32 %evl)
1105  ret <vscale x 1 x i64> %x
1106}
1107
1108define <vscale x 1 x i64> @vandn_vx_vp_nxv1i64(i64 %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
1109; CHECK-RV32-LABEL: vandn_vx_vp_nxv1i64:
1110; CHECK-RV32:       # %bb.0:
1111; CHECK-RV32-NEXT:    addi sp, sp, -16
1112; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
1113; CHECK-RV32-NEXT:    not a0, a0
1114; CHECK-RV32-NEXT:    not a1, a1
1115; CHECK-RV32-NEXT:    sw a0, 8(sp)
1116; CHECK-RV32-NEXT:    sw a1, 12(sp)
1117; CHECK-RV32-NEXT:    addi a0, sp, 8
1118; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1119; CHECK-RV32-NEXT:    vlse64.v v9, (a0), zero
1120; CHECK-RV32-NEXT:    vand.vv v8, v8, v9, v0.t
1121; CHECK-RV32-NEXT:    addi sp, sp, 16
1122; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
1123; CHECK-RV32-NEXT:    ret
1124;
1125; CHECK-RV64-LABEL: vandn_vx_vp_nxv1i64:
1126; CHECK-RV64:       # %bb.0:
1127; CHECK-RV64-NEXT:    not a0, a0
1128; CHECK-RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1129; CHECK-RV64-NEXT:    vand.vx v8, v8, a0, v0.t
1130; CHECK-RV64-NEXT:    ret
1131;
1132; CHECK-ZVKB32-LABEL: vandn_vx_vp_nxv1i64:
1133; CHECK-ZVKB32:       # %bb.0:
1134; CHECK-ZVKB32-NEXT:    addi sp, sp, -16
1135; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 16
1136; CHECK-ZVKB32-NEXT:    not a0, a0
1137; CHECK-ZVKB32-NEXT:    not a1, a1
1138; CHECK-ZVKB32-NEXT:    sw a0, 8(sp)
1139; CHECK-ZVKB32-NEXT:    sw a1, 12(sp)
1140; CHECK-ZVKB32-NEXT:    addi a0, sp, 8
1141; CHECK-ZVKB32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1142; CHECK-ZVKB32-NEXT:    vlse64.v v9, (a0), zero
1143; CHECK-ZVKB32-NEXT:    vand.vv v8, v8, v9, v0.t
1144; CHECK-ZVKB32-NEXT:    addi sp, sp, 16
1145; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 0
1146; CHECK-ZVKB32-NEXT:    ret
1147;
1148; CHECK-ZVKB64-LABEL: vandn_vx_vp_nxv1i64:
1149; CHECK-ZVKB64:       # %bb.0:
1150; CHECK-ZVKB64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1151; CHECK-ZVKB64-NEXT:    vandn.vx v8, v8, a0, v0.t
1152; CHECK-ZVKB64-NEXT:    ret
1153  %not.a = xor i64 %a, -1
1154  %head.not.a = insertelement <vscale x 1 x i64> poison, i64 %not.a, i32 0
1155  %splat.not.a = shufflevector <vscale x 1 x i64> %head.not.a, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1156  %x = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %b, <vscale x 1 x i64> %splat.not.a, <vscale x 1 x i1> %mask, i32 %evl)
1157  ret <vscale x 1 x i64> %x
1158}
1159
1160declare <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
1161declare <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
1162
1163define <vscale x 2 x i64> @vandn_vv_vp_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
1164; CHECK-LABEL: vandn_vv_vp_nxv2i64:
1165; CHECK:       # %bb.0:
1166; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1167; CHECK-NEXT:    vnot.v v8, v8, v0.t
1168; CHECK-NEXT:    vand.vv v8, v8, v10, v0.t
1169; CHECK-NEXT:    ret
1170;
1171; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv2i64:
1172; CHECK-ZVKB:       # %bb.0:
1173; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1174; CHECK-ZVKB-NEXT:    vandn.vv v8, v10, v8, v0.t
1175; CHECK-ZVKB-NEXT:    ret
1176  %not.a = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> %mask, i32 %evl)
1177  %x = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %not.a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %mask, i32 %evl)
1178  ret <vscale x 2 x i64> %x
1179}
1180
1181define <vscale x 2 x i64> @vandn_vv_vp_swapped_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
1182; CHECK-LABEL: vandn_vv_vp_swapped_nxv2i64:
1183; CHECK:       # %bb.0:
1184; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1185; CHECK-NEXT:    vnot.v v8, v8, v0.t
1186; CHECK-NEXT:    vand.vv v8, v10, v8, v0.t
1187; CHECK-NEXT:    ret
1188;
1189; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv2i64:
1190; CHECK-ZVKB:       # %bb.0:
1191; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1192; CHECK-ZVKB-NEXT:    vandn.vv v8, v10, v8, v0.t
1193; CHECK-ZVKB-NEXT:    ret
1194  %not.a = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> %mask, i32 %evl)
1195  %x = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %b, <vscale x 2 x i64> %not.a, <vscale x 2 x i1> %mask, i32 %evl)
1196  ret <vscale x 2 x i64> %x
1197}
1198
1199define <vscale x 2 x i64> @vandn_vx_vp_nxv2i64(i64 %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
1200; CHECK-RV32-LABEL: vandn_vx_vp_nxv2i64:
1201; CHECK-RV32:       # %bb.0:
1202; CHECK-RV32-NEXT:    addi sp, sp, -16
1203; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
1204; CHECK-RV32-NEXT:    not a0, a0
1205; CHECK-RV32-NEXT:    not a1, a1
1206; CHECK-RV32-NEXT:    sw a0, 8(sp)
1207; CHECK-RV32-NEXT:    sw a1, 12(sp)
1208; CHECK-RV32-NEXT:    addi a0, sp, 8
1209; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
1210; CHECK-RV32-NEXT:    vlse64.v v10, (a0), zero
1211; CHECK-RV32-NEXT:    vand.vv v8, v8, v10, v0.t
1212; CHECK-RV32-NEXT:    addi sp, sp, 16
1213; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
1214; CHECK-RV32-NEXT:    ret
1215;
1216; CHECK-RV64-LABEL: vandn_vx_vp_nxv2i64:
1217; CHECK-RV64:       # %bb.0:
1218; CHECK-RV64-NEXT:    not a0, a0
1219; CHECK-RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1220; CHECK-RV64-NEXT:    vand.vx v8, v8, a0, v0.t
1221; CHECK-RV64-NEXT:    ret
1222;
1223; CHECK-ZVKB32-LABEL: vandn_vx_vp_nxv2i64:
1224; CHECK-ZVKB32:       # %bb.0:
1225; CHECK-ZVKB32-NEXT:    addi sp, sp, -16
1226; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 16
1227; CHECK-ZVKB32-NEXT:    not a0, a0
1228; CHECK-ZVKB32-NEXT:    not a1, a1
1229; CHECK-ZVKB32-NEXT:    sw a0, 8(sp)
1230; CHECK-ZVKB32-NEXT:    sw a1, 12(sp)
1231; CHECK-ZVKB32-NEXT:    addi a0, sp, 8
1232; CHECK-ZVKB32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
1233; CHECK-ZVKB32-NEXT:    vlse64.v v10, (a0), zero
1234; CHECK-ZVKB32-NEXT:    vand.vv v8, v8, v10, v0.t
1235; CHECK-ZVKB32-NEXT:    addi sp, sp, 16
1236; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 0
1237; CHECK-ZVKB32-NEXT:    ret
1238;
1239; CHECK-ZVKB64-LABEL: vandn_vx_vp_nxv2i64:
1240; CHECK-ZVKB64:       # %bb.0:
1241; CHECK-ZVKB64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1242; CHECK-ZVKB64-NEXT:    vandn.vx v8, v8, a0, v0.t
1243; CHECK-ZVKB64-NEXT:    ret
1244  %not.a = xor i64 %a, -1
1245  %head.not.a = insertelement <vscale x 2 x i64> poison, i64 %not.a, i32 0
1246  %splat.not.a = shufflevector <vscale x 2 x i64> %head.not.a, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1247  %x = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %b, <vscale x 2 x i64> %splat.not.a, <vscale x 2 x i1> %mask, i32 %evl)
1248  ret <vscale x 2 x i64> %x
1249}
1250
1251declare <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1252declare <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1253
1254define <vscale x 4 x i64> @vandn_vv_vp_nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
1255; CHECK-LABEL: vandn_vv_vp_nxv4i64:
1256; CHECK:       # %bb.0:
1257; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1258; CHECK-NEXT:    vnot.v v8, v8, v0.t
1259; CHECK-NEXT:    vand.vv v8, v8, v12, v0.t
1260; CHECK-NEXT:    ret
1261;
1262; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv4i64:
1263; CHECK-ZVKB:       # %bb.0:
1264; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1265; CHECK-ZVKB-NEXT:    vandn.vv v8, v12, v8, v0.t
1266; CHECK-ZVKB-NEXT:    ret
1267  %not.a = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> %mask, i32 %evl)
1268  %x = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %not.a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %mask, i32 %evl)
1269  ret <vscale x 4 x i64> %x
1270}
1271
1272define <vscale x 4 x i64> @vandn_vv_vp_swapped_nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
1273; CHECK-LABEL: vandn_vv_vp_swapped_nxv4i64:
1274; CHECK:       # %bb.0:
1275; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1276; CHECK-NEXT:    vnot.v v8, v8, v0.t
1277; CHECK-NEXT:    vand.vv v8, v12, v8, v0.t
1278; CHECK-NEXT:    ret
1279;
1280; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv4i64:
1281; CHECK-ZVKB:       # %bb.0:
1282; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1283; CHECK-ZVKB-NEXT:    vandn.vv v8, v12, v8, v0.t
1284; CHECK-ZVKB-NEXT:    ret
1285  %not.a = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> %mask, i32 %evl)
1286  %x = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %b, <vscale x 4 x i64> %not.a, <vscale x 4 x i1> %mask, i32 %evl)
1287  ret <vscale x 4 x i64> %x
1288}
1289
1290define <vscale x 4 x i64> @vandn_vx_vp_nxv4i64(i64 %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
1291; CHECK-RV32-LABEL: vandn_vx_vp_nxv4i64:
1292; CHECK-RV32:       # %bb.0:
1293; CHECK-RV32-NEXT:    addi sp, sp, -16
1294; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
1295; CHECK-RV32-NEXT:    not a0, a0
1296; CHECK-RV32-NEXT:    not a1, a1
1297; CHECK-RV32-NEXT:    sw a0, 8(sp)
1298; CHECK-RV32-NEXT:    sw a1, 12(sp)
1299; CHECK-RV32-NEXT:    addi a0, sp, 8
1300; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1301; CHECK-RV32-NEXT:    vlse64.v v12, (a0), zero
1302; CHECK-RV32-NEXT:    vand.vv v8, v8, v12, v0.t
1303; CHECK-RV32-NEXT:    addi sp, sp, 16
1304; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
1305; CHECK-RV32-NEXT:    ret
1306;
1307; CHECK-RV64-LABEL: vandn_vx_vp_nxv4i64:
1308; CHECK-RV64:       # %bb.0:
1309; CHECK-RV64-NEXT:    not a0, a0
1310; CHECK-RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1311; CHECK-RV64-NEXT:    vand.vx v8, v8, a0, v0.t
1312; CHECK-RV64-NEXT:    ret
1313;
1314; CHECK-ZVKB32-LABEL: vandn_vx_vp_nxv4i64:
1315; CHECK-ZVKB32:       # %bb.0:
1316; CHECK-ZVKB32-NEXT:    addi sp, sp, -16
1317; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 16
1318; CHECK-ZVKB32-NEXT:    not a0, a0
1319; CHECK-ZVKB32-NEXT:    not a1, a1
1320; CHECK-ZVKB32-NEXT:    sw a0, 8(sp)
1321; CHECK-ZVKB32-NEXT:    sw a1, 12(sp)
1322; CHECK-ZVKB32-NEXT:    addi a0, sp, 8
1323; CHECK-ZVKB32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1324; CHECK-ZVKB32-NEXT:    vlse64.v v12, (a0), zero
1325; CHECK-ZVKB32-NEXT:    vand.vv v8, v8, v12, v0.t
1326; CHECK-ZVKB32-NEXT:    addi sp, sp, 16
1327; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 0
1328; CHECK-ZVKB32-NEXT:    ret
1329;
1330; CHECK-ZVKB64-LABEL: vandn_vx_vp_nxv4i64:
1331; CHECK-ZVKB64:       # %bb.0:
1332; CHECK-ZVKB64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1333; CHECK-ZVKB64-NEXT:    vandn.vx v8, v8, a0, v0.t
1334; CHECK-ZVKB64-NEXT:    ret
1335  %not.a = xor i64 %a, -1
1336  %head.not.a = insertelement <vscale x 4 x i64> poison, i64 %not.a, i32 0
1337  %splat.not.a = shufflevector <vscale x 4 x i64> %head.not.a, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1338  %x = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %b, <vscale x 4 x i64> %splat.not.a, <vscale x 4 x i1> %mask, i32 %evl)
1339  ret <vscale x 4 x i64> %x
1340}
1341
1342declare <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1343declare <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1344
1345define <vscale x 8 x i64> @vandn_vv_vp_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
1346; CHECK-LABEL: vandn_vv_vp_nxv8i64:
1347; CHECK:       # %bb.0:
1348; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1349; CHECK-NEXT:    vnot.v v8, v8, v0.t
1350; CHECK-NEXT:    vand.vv v8, v8, v16, v0.t
1351; CHECK-NEXT:    ret
1352;
1353; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv8i64:
1354; CHECK-ZVKB:       # %bb.0:
1355; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1356; CHECK-ZVKB-NEXT:    vandn.vv v8, v16, v8, v0.t
1357; CHECK-ZVKB-NEXT:    ret
1358  %not.a = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> %mask, i32 %evl)
1359  %x = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %not.a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %mask, i32 %evl)
1360  ret <vscale x 8 x i64> %x
1361}
1362
1363define <vscale x 8 x i64> @vandn_vv_vp_swapped_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
1364; CHECK-LABEL: vandn_vv_vp_swapped_nxv8i64:
1365; CHECK:       # %bb.0:
1366; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1367; CHECK-NEXT:    vnot.v v8, v8, v0.t
1368; CHECK-NEXT:    vand.vv v8, v16, v8, v0.t
1369; CHECK-NEXT:    ret
1370;
1371; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv8i64:
1372; CHECK-ZVKB:       # %bb.0:
1373; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1374; CHECK-ZVKB-NEXT:    vandn.vv v8, v16, v8, v0.t
1375; CHECK-ZVKB-NEXT:    ret
1376  %not.a = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> %mask, i32 %evl)
1377  %x = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %b, <vscale x 8 x i64> %not.a, <vscale x 8 x i1> %mask, i32 %evl)
1378  ret <vscale x 8 x i64> %x
1379}
1380
1381define <vscale x 8 x i64> @vandn_vx_vp_nxv8i64(i64 %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
1382; CHECK-RV32-LABEL: vandn_vx_vp_nxv8i64:
1383; CHECK-RV32:       # %bb.0:
1384; CHECK-RV32-NEXT:    addi sp, sp, -16
1385; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
1386; CHECK-RV32-NEXT:    not a0, a0
1387; CHECK-RV32-NEXT:    not a1, a1
1388; CHECK-RV32-NEXT:    sw a0, 8(sp)
1389; CHECK-RV32-NEXT:    sw a1, 12(sp)
1390; CHECK-RV32-NEXT:    addi a0, sp, 8
1391; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1392; CHECK-RV32-NEXT:    vlse64.v v16, (a0), zero
1393; CHECK-RV32-NEXT:    vand.vv v8, v8, v16, v0.t
1394; CHECK-RV32-NEXT:    addi sp, sp, 16
1395; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
1396; CHECK-RV32-NEXT:    ret
1397;
1398; CHECK-RV64-LABEL: vandn_vx_vp_nxv8i64:
1399; CHECK-RV64:       # %bb.0:
1400; CHECK-RV64-NEXT:    not a0, a0
1401; CHECK-RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1402; CHECK-RV64-NEXT:    vand.vx v8, v8, a0, v0.t
1403; CHECK-RV64-NEXT:    ret
1404;
1405; CHECK-ZVKB32-LABEL: vandn_vx_vp_nxv8i64:
1406; CHECK-ZVKB32:       # %bb.0:
1407; CHECK-ZVKB32-NEXT:    addi sp, sp, -16
1408; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 16
1409; CHECK-ZVKB32-NEXT:    not a0, a0
1410; CHECK-ZVKB32-NEXT:    not a1, a1
1411; CHECK-ZVKB32-NEXT:    sw a0, 8(sp)
1412; CHECK-ZVKB32-NEXT:    sw a1, 12(sp)
1413; CHECK-ZVKB32-NEXT:    addi a0, sp, 8
1414; CHECK-ZVKB32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1415; CHECK-ZVKB32-NEXT:    vlse64.v v16, (a0), zero
1416; CHECK-ZVKB32-NEXT:    vand.vv v8, v8, v16, v0.t
1417; CHECK-ZVKB32-NEXT:    addi sp, sp, 16
1418; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 0
1419; CHECK-ZVKB32-NEXT:    ret
1420;
1421; CHECK-ZVKB64-LABEL: vandn_vx_vp_nxv8i64:
1422; CHECK-ZVKB64:       # %bb.0:
1423; CHECK-ZVKB64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1424; CHECK-ZVKB64-NEXT:    vandn.vx v8, v8, a0, v0.t
1425; CHECK-ZVKB64-NEXT:    ret
1426  %not.a = xor i64 %a, -1
1427  %head.not.a = insertelement <vscale x 8 x i64> poison, i64 %not.a, i32 0
1428  %splat.not.a = shufflevector <vscale x 8 x i64> %head.not.a, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1429  %x = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %b, <vscale x 8 x i64> %splat.not.a, <vscale x 8 x i1> %mask, i32 %evl)
1430  ret <vscale x 8 x i64> %x
1431}
1432
1433define <vscale x 1 x i16> @vandn_vx_vp_imm16(<vscale x 1 x i16> %x, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
1434; CHECK-LABEL: vandn_vx_vp_imm16:
1435; CHECK:       # %bb.0:
1436; CHECK-NEXT:    lui a1, 8
1437; CHECK-NEXT:    addi a1, a1, -1
1438; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1439; CHECK-NEXT:    vand.vx v8, v8, a1, v0.t
1440; CHECK-NEXT:    ret
1441;
1442; CHECK-ZVKB-LABEL: vandn_vx_vp_imm16:
1443; CHECK-ZVKB:       # %bb.0:
1444; CHECK-ZVKB-NEXT:    lui a1, 1048568
1445; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1446; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a1, v0.t
1447; CHECK-ZVKB-NEXT:    ret
1448  %a = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> splat (i16 32767), <vscale x 1 x i16> %x, <vscale x 1 x i1> %mask, i32 %evl)
1449  ret <vscale x 1 x i16> %a
1450}
1451
1452define <vscale x 1 x i16> @vandn_vx_vp_swapped_imm16(<vscale x 1 x i16> %x, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
1453; CHECK-LABEL: vandn_vx_vp_swapped_imm16:
1454; CHECK:       # %bb.0:
1455; CHECK-NEXT:    lui a1, 8
1456; CHECK-NEXT:    addi a1, a1, -1
1457; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1458; CHECK-NEXT:    vand.vx v8, v8, a1, v0.t
1459; CHECK-NEXT:    ret
1460;
1461; CHECK-ZVKB-LABEL: vandn_vx_vp_swapped_imm16:
1462; CHECK-ZVKB:       # %bb.0:
1463; CHECK-ZVKB-NEXT:    lui a1, 1048568
1464; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1465; CHECK-ZVKB-NEXT:    vandn.vx v8, v8, a1, v0.t
1466; CHECK-ZVKB-NEXT:    ret
1467  %a = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> splat (i16 32767), <vscale x 1 x i1> %mask, i32 %evl)
1468  ret <vscale x 1 x i16> %a
1469}
1470
1471define <vscale x 1 x i64> @vandn_vx_vp_imm64(<vscale x 1 x i64> %x, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
1472; CHECK-RV32-LABEL: vandn_vx_vp_imm64:
1473; CHECK-RV32:       # %bb.0:
1474; CHECK-RV32-NEXT:    addi sp, sp, -16
1475; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
1476; CHECK-RV32-NEXT:    lui a1, 1044480
1477; CHECK-RV32-NEXT:    li a2, 255
1478; CHECK-RV32-NEXT:    sw a2, 8(sp)
1479; CHECK-RV32-NEXT:    sw a1, 12(sp)
1480; CHECK-RV32-NEXT:    addi a1, sp, 8
1481; CHECK-RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1482; CHECK-RV32-NEXT:    vlse64.v v9, (a1), zero
1483; CHECK-RV32-NEXT:    vand.vv v8, v8, v9, v0.t
1484; CHECK-RV32-NEXT:    addi sp, sp, 16
1485; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 0
1486; CHECK-RV32-NEXT:    ret
1487;
1488; CHECK-RV64-LABEL: vandn_vx_vp_imm64:
1489; CHECK-RV64:       # %bb.0:
1490; CHECK-RV64-NEXT:    li a1, -1
1491; CHECK-RV64-NEXT:    slli a1, a1, 56
1492; CHECK-RV64-NEXT:    addi a1, a1, 255
1493; CHECK-RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1494; CHECK-RV64-NEXT:    vand.vx v8, v8, a1, v0.t
1495; CHECK-RV64-NEXT:    ret
1496;
1497; CHECK-ZVKB32-LABEL: vandn_vx_vp_imm64:
1498; CHECK-ZVKB32:       # %bb.0:
1499; CHECK-ZVKB32-NEXT:    addi sp, sp, -16
1500; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 16
1501; CHECK-ZVKB32-NEXT:    lui a1, 1044480
1502; CHECK-ZVKB32-NEXT:    li a2, 255
1503; CHECK-ZVKB32-NEXT:    sw a2, 8(sp)
1504; CHECK-ZVKB32-NEXT:    sw a1, 12(sp)
1505; CHECK-ZVKB32-NEXT:    addi a1, sp, 8
1506; CHECK-ZVKB32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1507; CHECK-ZVKB32-NEXT:    vlse64.v v9, (a1), zero
1508; CHECK-ZVKB32-NEXT:    vand.vv v8, v8, v9, v0.t
1509; CHECK-ZVKB32-NEXT:    addi sp, sp, 16
1510; CHECK-ZVKB32-NEXT:    .cfi_def_cfa_offset 0
1511; CHECK-ZVKB32-NEXT:    ret
1512;
1513; CHECK-ZVKB64-LABEL: vandn_vx_vp_imm64:
1514; CHECK-ZVKB64:       # %bb.0:
1515; CHECK-ZVKB64-NEXT:    lui a1, 1048560
1516; CHECK-ZVKB64-NEXT:    srli a1, a1, 8
1517; CHECK-ZVKB64-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1518; CHECK-ZVKB64-NEXT:    vandn.vx v8, v8, a1, v0.t
1519; CHECK-ZVKB64-NEXT:    ret
1520  %a = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> splat (i64 -72057594037927681), <vscale x 1 x i1> %mask, i32 %evl)
1521  ret <vscale x 1 x i64> %a
1522}
1523
1524define <vscale x 1 x i16> @vand_vadd_vx_vp_imm16(<vscale x 1 x i16> %x, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
1525; CHECK-LABEL: vand_vadd_vx_vp_imm16:
1526; CHECK:       # %bb.0:
1527; CHECK-NEXT:    lui a1, 8
1528; CHECK-NEXT:    addi a1, a1, -1
1529; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1530; CHECK-NEXT:    vand.vx v8, v8, a1, v0.t
1531; CHECK-NEXT:    vadd.vx v8, v8, a1, v0.t
1532; CHECK-NEXT:    ret
1533;
1534; CHECK-ZVKB-LABEL: vand_vadd_vx_vp_imm16:
1535; CHECK-ZVKB:       # %bb.0:
1536; CHECK-ZVKB-NEXT:    lui a1, 8
1537; CHECK-ZVKB-NEXT:    addi a1, a1, -1
1538; CHECK-ZVKB-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1539; CHECK-ZVKB-NEXT:    vand.vx v8, v8, a1, v0.t
1540; CHECK-ZVKB-NEXT:    vadd.vx v8, v8, a1, v0.t
1541; CHECK-ZVKB-NEXT:    ret
1542  %a = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> splat (i16 32767), <vscale x 1 x i16> %x, <vscale x 1 x i1> %mask, i32 %evl)
1543  %b = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> splat (i16 32767), <vscale x 1 x i16> %a, <vscale x 1 x i1> %mask, i32 %evl)
1544  ret <vscale x 1 x i16> %b
1545}
1546