xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll (revision 36e4176f1d83d04cdebb4e1870561099b2478d80)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 8 x i7> @llvm.vp.and.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
8
9define <vscale x 8 x i7> @vand_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
10; CHECK-LABEL: vand_vx_nxv8i7:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
13; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
14; CHECK-NEXT:    ret
15  %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
16  %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
17  %v = call <vscale x 8 x i7> @llvm.vp.and.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
18  ret <vscale x 8 x i7> %v
19}
20
21declare <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
22
23define <vscale x 1 x i8> @vand_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
24; CHECK-LABEL: vand_vv_nxv1i8:
25; CHECK:       # %bb.0:
26; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
27; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
28; CHECK-NEXT:    ret
29  %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
30  ret <vscale x 1 x i8> %v
31}
32
33define <vscale x 1 x i8> @vand_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
34; CHECK-LABEL: vand_vv_nxv1i8_unmasked:
35; CHECK:       # %bb.0:
36; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
37; CHECK-NEXT:    vand.vv v8, v8, v9
38; CHECK-NEXT:    ret
39  %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
40  ret <vscale x 1 x i8> %v
41}
42
43define <vscale x 1 x i8> @vand_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
44; CHECK-LABEL: vand_vx_nxv1i8:
45; CHECK:       # %bb.0:
46; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
47; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
48; CHECK-NEXT:    ret
49  %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
50  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
51  %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
52  ret <vscale x 1 x i8> %v
53}
54
55define <vscale x 1 x i8> @vand_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
56; CHECK-LABEL: vand_vx_nxv1i8_unmasked:
57; CHECK:       # %bb.0:
58; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
59; CHECK-NEXT:    vand.vx v8, v8, a0
60; CHECK-NEXT:    ret
61  %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
62  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
63  %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
64  ret <vscale x 1 x i8> %v
65}
66
67define <vscale x 1 x i8> @vand_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
68; CHECK-LABEL: vand_vi_nxv1i8:
69; CHECK:       # %bb.0:
70; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
71; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
72; CHECK-NEXT:    ret
73  %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 4), <vscale x 1 x i1> %m, i32 %evl)
74  ret <vscale x 1 x i8> %v
75}
76
77define <vscale x 1 x i8> @vand_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zeroext %evl) {
78; CHECK-LABEL: vand_vi_nxv1i8_unmasked:
79; CHECK:       # %bb.0:
80; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
81; CHECK-NEXT:    vand.vi v8, v8, 4
82; CHECK-NEXT:    ret
83  %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 4), <vscale x 1 x i1> splat (i1 true), i32 %evl)
84  ret <vscale x 1 x i8> %v
85}
86
87declare <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
88
89define <vscale x 2 x i8> @vand_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
90; CHECK-LABEL: vand_vv_nxv2i8:
91; CHECK:       # %bb.0:
92; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
93; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
94; CHECK-NEXT:    ret
95  %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
96  ret <vscale x 2 x i8> %v
97}
98
99define <vscale x 2 x i8> @vand_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
100; CHECK-LABEL: vand_vv_nxv2i8_unmasked:
101; CHECK:       # %bb.0:
102; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
103; CHECK-NEXT:    vand.vv v8, v8, v9
104; CHECK-NEXT:    ret
105  %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
106  ret <vscale x 2 x i8> %v
107}
108
109define <vscale x 2 x i8> @vand_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
110; CHECK-LABEL: vand_vx_nxv2i8:
111; CHECK:       # %bb.0:
112; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
113; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
114; CHECK-NEXT:    ret
115  %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
116  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
117  %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
118  ret <vscale x 2 x i8> %v
119}
120
121define <vscale x 2 x i8> @vand_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
122; CHECK-LABEL: vand_vx_nxv2i8_unmasked:
123; CHECK:       # %bb.0:
124; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
125; CHECK-NEXT:    vand.vx v8, v8, a0
126; CHECK-NEXT:    ret
127  %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
128  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
129  %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
130  ret <vscale x 2 x i8> %v
131}
132
133define <vscale x 2 x i8> @vand_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
134; CHECK-LABEL: vand_vi_nxv2i8:
135; CHECK:       # %bb.0:
136; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
137; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
138; CHECK-NEXT:    ret
139  %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 4), <vscale x 2 x i1> %m, i32 %evl)
140  ret <vscale x 2 x i8> %v
141}
142
143define <vscale x 2 x i8> @vand_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
144; CHECK-LABEL: vand_vi_nxv2i8_unmasked:
145; CHECK:       # %bb.0:
146; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
147; CHECK-NEXT:    vand.vi v8, v8, 4
148; CHECK-NEXT:    ret
149  %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 4), <vscale x 2 x i1> splat (i1 true), i32 %evl)
150  ret <vscale x 2 x i8> %v
151}
152
153declare <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
154
155define <vscale x 4 x i8> @vand_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
156; CHECK-LABEL: vand_vv_nxv4i8:
157; CHECK:       # %bb.0:
158; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
159; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
160; CHECK-NEXT:    ret
161  %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
162  ret <vscale x 4 x i8> %v
163}
164
165define <vscale x 4 x i8> @vand_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
166; CHECK-LABEL: vand_vv_nxv4i8_unmasked:
167; CHECK:       # %bb.0:
168; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
169; CHECK-NEXT:    vand.vv v8, v8, v9
170; CHECK-NEXT:    ret
171  %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
172  ret <vscale x 4 x i8> %v
173}
174
175define <vscale x 4 x i8> @vand_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
176; CHECK-LABEL: vand_vx_nxv4i8:
177; CHECK:       # %bb.0:
178; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
179; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
180; CHECK-NEXT:    ret
181  %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
182  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
183  %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
184  ret <vscale x 4 x i8> %v
185}
186
187define <vscale x 4 x i8> @vand_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
188; CHECK-LABEL: vand_vx_nxv4i8_unmasked:
189; CHECK:       # %bb.0:
190; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
191; CHECK-NEXT:    vand.vx v8, v8, a0
192; CHECK-NEXT:    ret
193  %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
194  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
195  %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
196  ret <vscale x 4 x i8> %v
197}
198
199define <vscale x 4 x i8> @vand_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
200; CHECK-LABEL: vand_vi_nxv4i8:
201; CHECK:       # %bb.0:
202; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
203; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
204; CHECK-NEXT:    ret
205  %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 4), <vscale x 4 x i1> %m, i32 %evl)
206  ret <vscale x 4 x i8> %v
207}
208
209define <vscale x 4 x i8> @vand_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zeroext %evl) {
210; CHECK-LABEL: vand_vi_nxv4i8_unmasked:
211; CHECK:       # %bb.0:
212; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
213; CHECK-NEXT:    vand.vi v8, v8, 4
214; CHECK-NEXT:    ret
215  %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 4), <vscale x 4 x i1> splat (i1 true), i32 %evl)
216  ret <vscale x 4 x i8> %v
217}
218
219declare <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
220
221define <vscale x 8 x i8> @vand_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
222; CHECK-LABEL: vand_vv_nxv8i8:
223; CHECK:       # %bb.0:
224; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
225; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
226; CHECK-NEXT:    ret
227  %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
228  ret <vscale x 8 x i8> %v
229}
230
231define <vscale x 8 x i8> @vand_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
232; CHECK-LABEL: vand_vv_nxv8i8_unmasked:
233; CHECK:       # %bb.0:
234; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
235; CHECK-NEXT:    vand.vv v8, v8, v9
236; CHECK-NEXT:    ret
237  %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
238  ret <vscale x 8 x i8> %v
239}
240
241define <vscale x 8 x i8> @vand_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
242; CHECK-LABEL: vand_vx_nxv8i8:
243; CHECK:       # %bb.0:
244; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
245; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
246; CHECK-NEXT:    ret
247  %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
248  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
249  %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
250  ret <vscale x 8 x i8> %v
251}
252
253define <vscale x 8 x i8> @vand_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
254; CHECK-LABEL: vand_vx_nxv8i8_unmasked:
255; CHECK:       # %bb.0:
256; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
257; CHECK-NEXT:    vand.vx v8, v8, a0
258; CHECK-NEXT:    ret
259  %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
260  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
261  %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
262  ret <vscale x 8 x i8> %v
263}
264
265define <vscale x 8 x i8> @vand_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
266; CHECK-LABEL: vand_vi_nxv8i8:
267; CHECK:       # %bb.0:
268; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
269; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
270; CHECK-NEXT:    ret
271  %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 4), <vscale x 8 x i1> %m, i32 %evl)
272  ret <vscale x 8 x i8> %v
273}
274
275define <vscale x 8 x i8> @vand_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zeroext %evl) {
276; CHECK-LABEL: vand_vi_nxv8i8_unmasked:
277; CHECK:       # %bb.0:
278; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
279; CHECK-NEXT:    vand.vi v8, v8, 4
280; CHECK-NEXT:    ret
281  %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 4), <vscale x 8 x i1> splat (i1 true), i32 %evl)
282  ret <vscale x 8 x i8> %v
283}
284
285declare <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
286
287define <vscale x 16 x i8> @vand_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
288; CHECK-LABEL: vand_vv_nxv16i8:
289; CHECK:       # %bb.0:
290; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
291; CHECK-NEXT:    vand.vv v8, v8, v10, v0.t
292; CHECK-NEXT:    ret
293  %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
294  ret <vscale x 16 x i8> %v
295}
296
297define <vscale x 16 x i8> @vand_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
298; CHECK-LABEL: vand_vv_nxv16i8_unmasked:
299; CHECK:       # %bb.0:
300; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
301; CHECK-NEXT:    vand.vv v8, v8, v10
302; CHECK-NEXT:    ret
303  %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
304  ret <vscale x 16 x i8> %v
305}
306
307define <vscale x 16 x i8> @vand_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
308; CHECK-LABEL: vand_vx_nxv16i8:
309; CHECK:       # %bb.0:
310; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
311; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
312; CHECK-NEXT:    ret
313  %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
314  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
315  %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
316  ret <vscale x 16 x i8> %v
317}
318
319define <vscale x 16 x i8> @vand_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
320; CHECK-LABEL: vand_vx_nxv16i8_unmasked:
321; CHECK:       # %bb.0:
322; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
323; CHECK-NEXT:    vand.vx v8, v8, a0
324; CHECK-NEXT:    ret
325  %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
326  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
327  %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
328  ret <vscale x 16 x i8> %v
329}
330
331define <vscale x 16 x i8> @vand_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
332; CHECK-LABEL: vand_vi_nxv16i8:
333; CHECK:       # %bb.0:
334; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
335; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
336; CHECK-NEXT:    ret
337  %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 4), <vscale x 16 x i1> %m, i32 %evl)
338  ret <vscale x 16 x i8> %v
339}
340
341define <vscale x 16 x i8> @vand_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 zeroext %evl) {
342; CHECK-LABEL: vand_vi_nxv16i8_unmasked:
343; CHECK:       # %bb.0:
344; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
345; CHECK-NEXT:    vand.vi v8, v8, 4
346; CHECK-NEXT:    ret
347  %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 4), <vscale x 16 x i1> splat (i1 true), i32 %evl)
348  ret <vscale x 16 x i8> %v
349}
350
351declare <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
352
353define <vscale x 32 x i8> @vand_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
354; CHECK-LABEL: vand_vv_nxv32i8:
355; CHECK:       # %bb.0:
356; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
357; CHECK-NEXT:    vand.vv v8, v8, v12, v0.t
358; CHECK-NEXT:    ret
359  %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
360  ret <vscale x 32 x i8> %v
361}
362
363define <vscale x 32 x i8> @vand_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
364; CHECK-LABEL: vand_vv_nxv32i8_unmasked:
365; CHECK:       # %bb.0:
366; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
367; CHECK-NEXT:    vand.vv v8, v8, v12
368; CHECK-NEXT:    ret
369  %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
370  ret <vscale x 32 x i8> %v
371}
372
373define <vscale x 32 x i8> @vand_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
374; CHECK-LABEL: vand_vx_nxv32i8:
375; CHECK:       # %bb.0:
376; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
377; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
378; CHECK-NEXT:    ret
379  %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
380  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
381  %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
382  ret <vscale x 32 x i8> %v
383}
384
385define <vscale x 32 x i8> @vand_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
386; CHECK-LABEL: vand_vx_nxv32i8_unmasked:
387; CHECK:       # %bb.0:
388; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
389; CHECK-NEXT:    vand.vx v8, v8, a0
390; CHECK-NEXT:    ret
391  %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
392  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
393  %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
394  ret <vscale x 32 x i8> %v
395}
396
397define <vscale x 32 x i8> @vand_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
398; CHECK-LABEL: vand_vi_nxv32i8:
399; CHECK:       # %bb.0:
400; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
401; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
402; CHECK-NEXT:    ret
403  %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 4), <vscale x 32 x i1> %m, i32 %evl)
404  ret <vscale x 32 x i8> %v
405}
406
407define <vscale x 32 x i8> @vand_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 zeroext %evl) {
408; CHECK-LABEL: vand_vi_nxv32i8_unmasked:
409; CHECK:       # %bb.0:
410; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
411; CHECK-NEXT:    vand.vi v8, v8, 4
412; CHECK-NEXT:    ret
413  %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 4), <vscale x 32 x i1> splat (i1 true), i32 %evl)
414  ret <vscale x 32 x i8> %v
415}
416
417declare <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
418
419define <vscale x 64 x i8> @vand_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
420; CHECK-LABEL: vand_vv_nxv64i8:
421; CHECK:       # %bb.0:
422; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
423; CHECK-NEXT:    vand.vv v8, v8, v16, v0.t
424; CHECK-NEXT:    ret
425  %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
426  ret <vscale x 64 x i8> %v
427}
428
429define <vscale x 64 x i8> @vand_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
430; CHECK-LABEL: vand_vv_nxv64i8_unmasked:
431; CHECK:       # %bb.0:
432; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
433; CHECK-NEXT:    vand.vv v8, v8, v16
434; CHECK-NEXT:    ret
435  %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
436  ret <vscale x 64 x i8> %v
437}
438
439define <vscale x 64 x i8> @vand_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
440; CHECK-LABEL: vand_vx_nxv64i8:
441; CHECK:       # %bb.0:
442; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
443; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
444; CHECK-NEXT:    ret
445  %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
446  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
447  %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
448  ret <vscale x 64 x i8> %v
449}
450
451define <vscale x 64 x i8> @vand_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
452; CHECK-LABEL: vand_vx_nxv64i8_unmasked:
453; CHECK:       # %bb.0:
454; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
455; CHECK-NEXT:    vand.vx v8, v8, a0
456; CHECK-NEXT:    ret
457  %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
458  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
459  %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
460  ret <vscale x 64 x i8> %v
461}
462
463define <vscale x 64 x i8> @vand_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
464; CHECK-LABEL: vand_vi_nxv64i8:
465; CHECK:       # %bb.0:
466; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
467; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
468; CHECK-NEXT:    ret
469  %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 4), <vscale x 64 x i1> %m, i32 %evl)
470  ret <vscale x 64 x i8> %v
471}
472
473define <vscale x 64 x i8> @vand_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 zeroext %evl) {
474; CHECK-LABEL: vand_vi_nxv64i8_unmasked:
475; CHECK:       # %bb.0:
476; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
477; CHECK-NEXT:    vand.vi v8, v8, 4
478; CHECK-NEXT:    ret
479  %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 4), <vscale x 64 x i1> splat (i1 true), i32 %evl)
480  ret <vscale x 64 x i8> %v
481}
482
483declare <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
484
485define <vscale x 1 x i16> @vand_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
486; CHECK-LABEL: vand_vv_nxv1i16:
487; CHECK:       # %bb.0:
488; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
489; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
490; CHECK-NEXT:    ret
491  %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
492  ret <vscale x 1 x i16> %v
493}
494
495define <vscale x 1 x i16> @vand_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
496; CHECK-LABEL: vand_vv_nxv1i16_unmasked:
497; CHECK:       # %bb.0:
498; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
499; CHECK-NEXT:    vand.vv v8, v8, v9
500; CHECK-NEXT:    ret
501  %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
502  ret <vscale x 1 x i16> %v
503}
504
505define <vscale x 1 x i16> @vand_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
506; CHECK-LABEL: vand_vx_nxv1i16:
507; CHECK:       # %bb.0:
508; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
509; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
510; CHECK-NEXT:    ret
511  %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
512  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
513  %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
514  ret <vscale x 1 x i16> %v
515}
516
517define <vscale x 1 x i16> @vand_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
518; CHECK-LABEL: vand_vx_nxv1i16_unmasked:
519; CHECK:       # %bb.0:
520; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
521; CHECK-NEXT:    vand.vx v8, v8, a0
522; CHECK-NEXT:    ret
523  %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
524  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
525  %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
526  ret <vscale x 1 x i16> %v
527}
528
529define <vscale x 1 x i16> @vand_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
530; CHECK-LABEL: vand_vi_nxv1i16:
531; CHECK:       # %bb.0:
532; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
533; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
534; CHECK-NEXT:    ret
535  %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 4), <vscale x 1 x i1> %m, i32 %evl)
536  ret <vscale x 1 x i16> %v
537}
538
539define <vscale x 1 x i16> @vand_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 zeroext %evl) {
540; CHECK-LABEL: vand_vi_nxv1i16_unmasked:
541; CHECK:       # %bb.0:
542; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
543; CHECK-NEXT:    vand.vi v8, v8, 4
544; CHECK-NEXT:    ret
545  %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 4), <vscale x 1 x i1> splat (i1 true), i32 %evl)
546  ret <vscale x 1 x i16> %v
547}
548
549declare <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
550
551define <vscale x 2 x i16> @vand_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
552; CHECK-LABEL: vand_vv_nxv2i16:
553; CHECK:       # %bb.0:
554; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
555; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
556; CHECK-NEXT:    ret
557  %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
558  ret <vscale x 2 x i16> %v
559}
560
561define <vscale x 2 x i16> @vand_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
562; CHECK-LABEL: vand_vv_nxv2i16_unmasked:
563; CHECK:       # %bb.0:
564; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
565; CHECK-NEXT:    vand.vv v8, v8, v9
566; CHECK-NEXT:    ret
567  %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
568  ret <vscale x 2 x i16> %v
569}
570
571define <vscale x 2 x i16> @vand_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
572; CHECK-LABEL: vand_vx_nxv2i16:
573; CHECK:       # %bb.0:
574; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
575; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
576; CHECK-NEXT:    ret
577  %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
578  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
579  %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
580  ret <vscale x 2 x i16> %v
581}
582
583define <vscale x 2 x i16> @vand_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
584; CHECK-LABEL: vand_vx_nxv2i16_unmasked:
585; CHECK:       # %bb.0:
586; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
587; CHECK-NEXT:    vand.vx v8, v8, a0
588; CHECK-NEXT:    ret
589  %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
590  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
591  %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
592  ret <vscale x 2 x i16> %v
593}
594
595define <vscale x 2 x i16> @vand_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
596; CHECK-LABEL: vand_vi_nxv2i16:
597; CHECK:       # %bb.0:
598; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
599; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
600; CHECK-NEXT:    ret
601  %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 4), <vscale x 2 x i1> %m, i32 %evl)
602  ret <vscale x 2 x i16> %v
603}
604
605define <vscale x 2 x i16> @vand_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
606; CHECK-LABEL: vand_vi_nxv2i16_unmasked:
607; CHECK:       # %bb.0:
608; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
609; CHECK-NEXT:    vand.vi v8, v8, 4
610; CHECK-NEXT:    ret
611  %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 4), <vscale x 2 x i1> splat (i1 true), i32 %evl)
612  ret <vscale x 2 x i16> %v
613}
614
615declare <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
616
617define <vscale x 4 x i16> @vand_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
618; CHECK-LABEL: vand_vv_nxv4i16:
619; CHECK:       # %bb.0:
620; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
621; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
622; CHECK-NEXT:    ret
623  %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
624  ret <vscale x 4 x i16> %v
625}
626
627define <vscale x 4 x i16> @vand_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
628; CHECK-LABEL: vand_vv_nxv4i16_unmasked:
629; CHECK:       # %bb.0:
630; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
631; CHECK-NEXT:    vand.vv v8, v8, v9
632; CHECK-NEXT:    ret
633  %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
634  ret <vscale x 4 x i16> %v
635}
636
637define <vscale x 4 x i16> @vand_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
638; CHECK-LABEL: vand_vx_nxv4i16:
639; CHECK:       # %bb.0:
640; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
641; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
642; CHECK-NEXT:    ret
643  %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
644  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
645  %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
646  ret <vscale x 4 x i16> %v
647}
648
649define <vscale x 4 x i16> @vand_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
650; CHECK-LABEL: vand_vx_nxv4i16_unmasked:
651; CHECK:       # %bb.0:
652; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
653; CHECK-NEXT:    vand.vx v8, v8, a0
654; CHECK-NEXT:    ret
655  %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
656  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
657  %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
658  ret <vscale x 4 x i16> %v
659}
660
661define <vscale x 4 x i16> @vand_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
662; CHECK-LABEL: vand_vi_nxv4i16:
663; CHECK:       # %bb.0:
664; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
665; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
666; CHECK-NEXT:    ret
667  %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 4), <vscale x 4 x i1> %m, i32 %evl)
668  ret <vscale x 4 x i16> %v
669}
670
671define <vscale x 4 x i16> @vand_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 zeroext %evl) {
672; CHECK-LABEL: vand_vi_nxv4i16_unmasked:
673; CHECK:       # %bb.0:
674; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
675; CHECK-NEXT:    vand.vi v8, v8, 4
676; CHECK-NEXT:    ret
677  %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 4), <vscale x 4 x i1> splat (i1 true), i32 %evl)
678  ret <vscale x 4 x i16> %v
679}
680
681declare <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
682
683define <vscale x 8 x i16> @vand_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
684; CHECK-LABEL: vand_vv_nxv8i16:
685; CHECK:       # %bb.0:
686; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
687; CHECK-NEXT:    vand.vv v8, v8, v10, v0.t
688; CHECK-NEXT:    ret
689  %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
690  ret <vscale x 8 x i16> %v
691}
692
693define <vscale x 8 x i16> @vand_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
694; CHECK-LABEL: vand_vv_nxv8i16_unmasked:
695; CHECK:       # %bb.0:
696; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
697; CHECK-NEXT:    vand.vv v8, v8, v10
698; CHECK-NEXT:    ret
699  %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
700  ret <vscale x 8 x i16> %v
701}
702
703define <vscale x 8 x i16> @vand_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
704; CHECK-LABEL: vand_vx_nxv8i16:
705; CHECK:       # %bb.0:
706; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
707; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
708; CHECK-NEXT:    ret
709  %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
710  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
711  %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
712  ret <vscale x 8 x i16> %v
713}
714
715define <vscale x 8 x i16> @vand_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
716; CHECK-LABEL: vand_vx_nxv8i16_unmasked:
717; CHECK:       # %bb.0:
718; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
719; CHECK-NEXT:    vand.vx v8, v8, a0
720; CHECK-NEXT:    ret
721  %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
722  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
723  %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
724  ret <vscale x 8 x i16> %v
725}
726
727define <vscale x 8 x i16> @vand_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
728; CHECK-LABEL: vand_vi_nxv8i16:
729; CHECK:       # %bb.0:
730; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
731; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
732; CHECK-NEXT:    ret
733  %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 4), <vscale x 8 x i1> %m, i32 %evl)
734  ret <vscale x 8 x i16> %v
735}
736
737define <vscale x 8 x i16> @vand_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 zeroext %evl) {
738; CHECK-LABEL: vand_vi_nxv8i16_unmasked:
739; CHECK:       # %bb.0:
740; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
741; CHECK-NEXT:    vand.vi v8, v8, 4
742; CHECK-NEXT:    ret
743  %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 4), <vscale x 8 x i1> splat (i1 true), i32 %evl)
744  ret <vscale x 8 x i16> %v
745}
746
747declare <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16>, <vscale x 14 x i16>, <vscale x 14 x i1>, i32)
748
749define <vscale x 14 x i16> @vand_vv_nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %b, <vscale x 14 x i1> %m, i32 zeroext %evl) {
750; CHECK-LABEL: vand_vv_nxv14i16:
751; CHECK:       # %bb.0:
752; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
753; CHECK-NEXT:    vand.vv v8, v8, v12, v0.t
754; CHECK-NEXT:    ret
755  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %b, <vscale x 14 x i1> %m, i32 %evl)
756  ret <vscale x 14 x i16> %v
757}
758
759define <vscale x 14 x i16> @vand_vv_nxv14i16_unmasked(<vscale x 14 x i16> %va, <vscale x 14 x i16> %b, i32 zeroext %evl) {
760; CHECK-LABEL: vand_vv_nxv14i16_unmasked:
761; CHECK:       # %bb.0:
762; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
763; CHECK-NEXT:    vand.vv v8, v8, v12
764; CHECK-NEXT:    ret
765  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %b, <vscale x 14 x i1> splat (i1 true), i32 %evl)
766  ret <vscale x 14 x i16> %v
767}
768
769define <vscale x 14 x i16> @vand_vx_nxv14i16(<vscale x 14 x i16> %va, i16 %b, <vscale x 14 x i1> %m, i32 zeroext %evl) {
770; CHECK-LABEL: vand_vx_nxv14i16:
771; CHECK:       # %bb.0:
772; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
773; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
774; CHECK-NEXT:    ret
775  %elt.head = insertelement <vscale x 14 x i16> poison, i16 %b, i32 0
776  %vb = shufflevector <vscale x 14 x i16> %elt.head, <vscale x 14 x i16> poison, <vscale x 14 x i32> zeroinitializer
777  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %vb, <vscale x 14 x i1> %m, i32 %evl)
778  ret <vscale x 14 x i16> %v
779}
780
781define <vscale x 14 x i16> @vand_vx_nxv14i16_unmasked(<vscale x 14 x i16> %va, i16 %b, i32 zeroext %evl) {
782; CHECK-LABEL: vand_vx_nxv14i16_unmasked:
783; CHECK:       # %bb.0:
784; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
785; CHECK-NEXT:    vand.vx v8, v8, a0
786; CHECK-NEXT:    ret
787  %elt.head = insertelement <vscale x 14 x i16> poison, i16 %b, i32 0
788  %vb = shufflevector <vscale x 14 x i16> %elt.head, <vscale x 14 x i16> poison, <vscale x 14 x i32> zeroinitializer
789  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %vb, <vscale x 14 x i1> splat (i1 true), i32 %evl)
790  ret <vscale x 14 x i16> %v
791}
792
793define <vscale x 14 x i16> @vand_vi_nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i1> %m, i32 zeroext %evl) {
794; CHECK-LABEL: vand_vi_nxv14i16:
795; CHECK:       # %bb.0:
796; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
797; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
798; CHECK-NEXT:    ret
799  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> splat (i16 4), <vscale x 14 x i1> %m, i32 %evl)
800  ret <vscale x 14 x i16> %v
801}
802
803define <vscale x 14 x i16> @vand_vi_nxv14i16_unmasked(<vscale x 14 x i16> %va, i32 zeroext %evl) {
804; CHECK-LABEL: vand_vi_nxv14i16_unmasked:
805; CHECK:       # %bb.0:
806; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
807; CHECK-NEXT:    vand.vi v8, v8, 4
808; CHECK-NEXT:    ret
809  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> splat (i16 4), <vscale x 14 x i1> splat (i1 true), i32 %evl)
810  ret <vscale x 14 x i16> %v
811}
812
813declare <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
814
815define <vscale x 16 x i16> @vand_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
816; CHECK-LABEL: vand_vv_nxv16i16:
817; CHECK:       # %bb.0:
818; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
819; CHECK-NEXT:    vand.vv v8, v8, v12, v0.t
820; CHECK-NEXT:    ret
821  %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
822  ret <vscale x 16 x i16> %v
823}
824
825define <vscale x 16 x i16> @vand_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
826; CHECK-LABEL: vand_vv_nxv16i16_unmasked:
827; CHECK:       # %bb.0:
828; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
829; CHECK-NEXT:    vand.vv v8, v8, v12
830; CHECK-NEXT:    ret
831  %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
832  ret <vscale x 16 x i16> %v
833}
834
835define <vscale x 16 x i16> @vand_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
836; CHECK-LABEL: vand_vx_nxv16i16:
837; CHECK:       # %bb.0:
838; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
839; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
840; CHECK-NEXT:    ret
841  %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
842  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
843  %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
844  ret <vscale x 16 x i16> %v
845}
846
847define <vscale x 16 x i16> @vand_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
848; CHECK-LABEL: vand_vx_nxv16i16_unmasked:
849; CHECK:       # %bb.0:
850; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
851; CHECK-NEXT:    vand.vx v8, v8, a0
852; CHECK-NEXT:    ret
853  %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
854  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
855  %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
856  ret <vscale x 16 x i16> %v
857}
858
859define <vscale x 16 x i16> @vand_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
860; CHECK-LABEL: vand_vi_nxv16i16:
861; CHECK:       # %bb.0:
862; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
863; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
864; CHECK-NEXT:    ret
865  %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 4), <vscale x 16 x i1> %m, i32 %evl)
866  ret <vscale x 16 x i16> %v
867}
868
869define <vscale x 16 x i16> @vand_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va, i32 zeroext %evl) {
870; CHECK-LABEL: vand_vi_nxv16i16_unmasked:
871; CHECK:       # %bb.0:
872; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
873; CHECK-NEXT:    vand.vi v8, v8, 4
874; CHECK-NEXT:    ret
875  %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 4), <vscale x 16 x i1> splat (i1 true), i32 %evl)
876  ret <vscale x 16 x i16> %v
877}
878
879declare <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
880
881define <vscale x 32 x i16> @vand_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
882; CHECK-LABEL: vand_vv_nxv32i16:
883; CHECK:       # %bb.0:
884; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
885; CHECK-NEXT:    vand.vv v8, v8, v16, v0.t
886; CHECK-NEXT:    ret
887  %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
888  ret <vscale x 32 x i16> %v
889}
890
891define <vscale x 32 x i16> @vand_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
892; CHECK-LABEL: vand_vv_nxv32i16_unmasked:
893; CHECK:       # %bb.0:
894; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
895; CHECK-NEXT:    vand.vv v8, v8, v16
896; CHECK-NEXT:    ret
897  %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
898  ret <vscale x 32 x i16> %v
899}
900
901define <vscale x 32 x i16> @vand_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
902; CHECK-LABEL: vand_vx_nxv32i16:
903; CHECK:       # %bb.0:
904; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
905; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
906; CHECK-NEXT:    ret
907  %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
908  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
909  %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
910  ret <vscale x 32 x i16> %v
911}
912
913define <vscale x 32 x i16> @vand_vx_nxv32i16_commute(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
914; CHECK-LABEL: vand_vx_nxv32i16_commute:
915; CHECK:       # %bb.0:
916; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
917; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
918; CHECK-NEXT:    ret
919  %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
920  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
921  %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %vb, <vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 %evl)
922  ret <vscale x 32 x i16> %v
923}
924
925define <vscale x 32 x i16> @vand_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
926; CHECK-LABEL: vand_vx_nxv32i16_unmasked:
927; CHECK:       # %bb.0:
928; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
929; CHECK-NEXT:    vand.vx v8, v8, a0
930; CHECK-NEXT:    ret
931  %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
932  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
933  %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
934  ret <vscale x 32 x i16> %v
935}
936
937define <vscale x 32 x i16> @vand_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
938; CHECK-LABEL: vand_vi_nxv32i16:
939; CHECK:       # %bb.0:
940; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
941; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
942; CHECK-NEXT:    ret
943  %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 4), <vscale x 32 x i1> %m, i32 %evl)
944  ret <vscale x 32 x i16> %v
945}
946
947define <vscale x 32 x i16> @vand_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va, i32 zeroext %evl) {
948; CHECK-LABEL: vand_vi_nxv32i16_unmasked:
949; CHECK:       # %bb.0:
950; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
951; CHECK-NEXT:    vand.vi v8, v8, 4
952; CHECK-NEXT:    ret
953  %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 4), <vscale x 32 x i1> splat (i1 true), i32 %evl)
954  ret <vscale x 32 x i16> %v
955}
956
957declare <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
958
959define <vscale x 1 x i32> @vand_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
960; CHECK-LABEL: vand_vv_nxv1i32:
961; CHECK:       # %bb.0:
962; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
963; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
964; CHECK-NEXT:    ret
965  %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
966  ret <vscale x 1 x i32> %v
967}
968
969define <vscale x 1 x i32> @vand_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
970; CHECK-LABEL: vand_vv_nxv1i32_unmasked:
971; CHECK:       # %bb.0:
972; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
973; CHECK-NEXT:    vand.vv v8, v8, v9
974; CHECK-NEXT:    ret
975  %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
976  ret <vscale x 1 x i32> %v
977}
978
979define <vscale x 1 x i32> @vand_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
980; CHECK-LABEL: vand_vx_nxv1i32:
981; CHECK:       # %bb.0:
982; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
983; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
984; CHECK-NEXT:    ret
985  %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
986  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
987  %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
988  ret <vscale x 1 x i32> %v
989}
990
991define <vscale x 1 x i32> @vand_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
992; CHECK-LABEL: vand_vx_nxv1i32_unmasked:
993; CHECK:       # %bb.0:
994; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
995; CHECK-NEXT:    vand.vx v8, v8, a0
996; CHECK-NEXT:    ret
997  %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
998  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
999  %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1000  ret <vscale x 1 x i32> %v
1001}
1002
1003define <vscale x 1 x i32> @vand_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1004; CHECK-LABEL: vand_vi_nxv1i32:
1005; CHECK:       # %bb.0:
1006; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
1007; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
1008; CHECK-NEXT:    ret
1009  %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 4), <vscale x 1 x i1> %m, i32 %evl)
1010  ret <vscale x 1 x i32> %v
1011}
1012
1013define <vscale x 1 x i32> @vand_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 zeroext %evl) {
1014; CHECK-LABEL: vand_vi_nxv1i32_unmasked:
1015; CHECK:       # %bb.0:
1016; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
1017; CHECK-NEXT:    vand.vi v8, v8, 4
1018; CHECK-NEXT:    ret
1019  %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 4), <vscale x 1 x i1> splat (i1 true), i32 %evl)
1020  ret <vscale x 1 x i32> %v
1021}
1022
1023declare <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
1024
1025define <vscale x 2 x i32> @vand_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1026; CHECK-LABEL: vand_vv_nxv2i32:
1027; CHECK:       # %bb.0:
1028; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
1029; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
1030; CHECK-NEXT:    ret
1031  %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
1032  ret <vscale x 2 x i32> %v
1033}
1034
1035define <vscale x 2 x i32> @vand_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
1036; CHECK-LABEL: vand_vv_nxv2i32_unmasked:
1037; CHECK:       # %bb.0:
1038; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
1039; CHECK-NEXT:    vand.vv v8, v8, v9
1040; CHECK-NEXT:    ret
1041  %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1042  ret <vscale x 2 x i32> %v
1043}
1044
1045define <vscale x 2 x i32> @vand_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1046; CHECK-LABEL: vand_vx_nxv2i32:
1047; CHECK:       # %bb.0:
1048; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1049; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
1050; CHECK-NEXT:    ret
1051  %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
1052  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
1053  %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
1054  ret <vscale x 2 x i32> %v
1055}
1056
1057define <vscale x 2 x i32> @vand_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
1058; CHECK-LABEL: vand_vx_nxv2i32_unmasked:
1059; CHECK:       # %bb.0:
1060; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1061; CHECK-NEXT:    vand.vx v8, v8, a0
1062; CHECK-NEXT:    ret
1063  %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
1064  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
1065  %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1066  ret <vscale x 2 x i32> %v
1067}
1068
1069define <vscale x 2 x i32> @vand_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1070; CHECK-LABEL: vand_vi_nxv2i32:
1071; CHECK:       # %bb.0:
1072; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
1073; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
1074; CHECK-NEXT:    ret
1075  %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 4), <vscale x 2 x i1> %m, i32 %evl)
1076  ret <vscale x 2 x i32> %v
1077}
1078
1079define <vscale x 2 x i32> @vand_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
1080; CHECK-LABEL: vand_vi_nxv2i32_unmasked:
1081; CHECK:       # %bb.0:
1082; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
1083; CHECK-NEXT:    vand.vi v8, v8, 4
1084; CHECK-NEXT:    ret
1085  %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 4), <vscale x 2 x i1> splat (i1 true), i32 %evl)
1086  ret <vscale x 2 x i32> %v
1087}
1088
1089declare <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
1090
1091define <vscale x 4 x i32> @vand_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1092; CHECK-LABEL: vand_vv_nxv4i32:
1093; CHECK:       # %bb.0:
1094; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
1095; CHECK-NEXT:    vand.vv v8, v8, v10, v0.t
1096; CHECK-NEXT:    ret
1097  %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
1098  ret <vscale x 4 x i32> %v
1099}
1100
1101define <vscale x 4 x i32> @vand_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
1102; CHECK-LABEL: vand_vv_nxv4i32_unmasked:
1103; CHECK:       # %bb.0:
1104; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
1105; CHECK-NEXT:    vand.vv v8, v8, v10
1106; CHECK-NEXT:    ret
1107  %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1108  ret <vscale x 4 x i32> %v
1109}
1110
1111define <vscale x 4 x i32> @vand_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1112; CHECK-LABEL: vand_vx_nxv4i32:
1113; CHECK:       # %bb.0:
1114; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1115; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
1116; CHECK-NEXT:    ret
1117  %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
1118  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1119  %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
1120  ret <vscale x 4 x i32> %v
1121}
1122
1123define <vscale x 4 x i32> @vand_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
1124; CHECK-LABEL: vand_vx_nxv4i32_unmasked:
1125; CHECK:       # %bb.0:
1126; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1127; CHECK-NEXT:    vand.vx v8, v8, a0
1128; CHECK-NEXT:    ret
1129  %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
1130  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1131  %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1132  ret <vscale x 4 x i32> %v
1133}
1134
1135define <vscale x 4 x i32> @vand_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1136; CHECK-LABEL: vand_vi_nxv4i32:
1137; CHECK:       # %bb.0:
1138; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
1139; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
1140; CHECK-NEXT:    ret
1141  %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 4), <vscale x 4 x i1> %m, i32 %evl)
1142  ret <vscale x 4 x i32> %v
1143}
1144
1145define <vscale x 4 x i32> @vand_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 zeroext %evl) {
1146; CHECK-LABEL: vand_vi_nxv4i32_unmasked:
1147; CHECK:       # %bb.0:
1148; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
1149; CHECK-NEXT:    vand.vi v8, v8, 4
1150; CHECK-NEXT:    ret
1151  %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 4), <vscale x 4 x i1> splat (i1 true), i32 %evl)
1152  ret <vscale x 4 x i32> %v
1153}
1154
1155declare <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
1156
1157define <vscale x 8 x i32> @vand_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1158; CHECK-LABEL: vand_vv_nxv8i32:
1159; CHECK:       # %bb.0:
1160; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
1161; CHECK-NEXT:    vand.vv v8, v8, v12, v0.t
1162; CHECK-NEXT:    ret
1163  %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
1164  ret <vscale x 8 x i32> %v
1165}
1166
1167define <vscale x 8 x i32> @vand_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
1168; CHECK-LABEL: vand_vv_nxv8i32_unmasked:
1169; CHECK:       # %bb.0:
1170; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
1171; CHECK-NEXT:    vand.vv v8, v8, v12
1172; CHECK-NEXT:    ret
1173  %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1174  ret <vscale x 8 x i32> %v
1175}
1176
1177define <vscale x 8 x i32> @vand_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1178; CHECK-LABEL: vand_vx_nxv8i32:
1179; CHECK:       # %bb.0:
1180; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1181; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
1182; CHECK-NEXT:    ret
1183  %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1184  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1185  %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
1186  ret <vscale x 8 x i32> %v
1187}
1188
1189define <vscale x 8 x i32> @vand_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
1190; CHECK-LABEL: vand_vx_nxv8i32_unmasked:
1191; CHECK:       # %bb.0:
1192; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1193; CHECK-NEXT:    vand.vx v8, v8, a0
1194; CHECK-NEXT:    ret
1195  %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1196  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1197  %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1198  ret <vscale x 8 x i32> %v
1199}
1200
1201define <vscale x 8 x i32> @vand_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1202; CHECK-LABEL: vand_vi_nxv8i32:
1203; CHECK:       # %bb.0:
1204; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
1205; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
1206; CHECK-NEXT:    ret
1207  %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 4), <vscale x 8 x i1> %m, i32 %evl)
1208  ret <vscale x 8 x i32> %v
1209}
1210
1211define <vscale x 8 x i32> @vand_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 zeroext %evl) {
1212; CHECK-LABEL: vand_vi_nxv8i32_unmasked:
1213; CHECK:       # %bb.0:
1214; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
1215; CHECK-NEXT:    vand.vi v8, v8, 4
1216; CHECK-NEXT:    ret
1217  %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 4), <vscale x 8 x i1> splat (i1 true), i32 %evl)
1218  ret <vscale x 8 x i32> %v
1219}
1220
1221declare <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
1222
1223define <vscale x 16 x i32> @vand_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1224; CHECK-LABEL: vand_vv_nxv16i32:
1225; CHECK:       # %bb.0:
1226; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
1227; CHECK-NEXT:    vand.vv v8, v8, v16, v0.t
1228; CHECK-NEXT:    ret
1229  %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
1230  ret <vscale x 16 x i32> %v
1231}
1232
1233define <vscale x 16 x i32> @vand_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
1234; CHECK-LABEL: vand_vv_nxv16i32_unmasked:
1235; CHECK:       # %bb.0:
1236; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
1237; CHECK-NEXT:    vand.vv v8, v8, v16
1238; CHECK-NEXT:    ret
1239  %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
1240  ret <vscale x 16 x i32> %v
1241}
1242
1243define <vscale x 16 x i32> @vand_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1244; CHECK-LABEL: vand_vx_nxv16i32:
1245; CHECK:       # %bb.0:
1246; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1247; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
1248; CHECK-NEXT:    ret
1249  %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
1250  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
1251  %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
1252  ret <vscale x 16 x i32> %v
1253}
1254
1255define <vscale x 16 x i32> @vand_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
1256; CHECK-LABEL: vand_vx_nxv16i32_unmasked:
1257; CHECK:       # %bb.0:
1258; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1259; CHECK-NEXT:    vand.vx v8, v8, a0
1260; CHECK-NEXT:    ret
1261  %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
1262  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
1263  %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
1264  ret <vscale x 16 x i32> %v
1265}
1266
1267define <vscale x 16 x i32> @vand_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1268; CHECK-LABEL: vand_vi_nxv16i32:
1269; CHECK:       # %bb.0:
1270; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
1271; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
1272; CHECK-NEXT:    ret
1273  %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 4), <vscale x 16 x i1> %m, i32 %evl)
1274  ret <vscale x 16 x i32> %v
1275}
1276
1277define <vscale x 16 x i32> @vand_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 zeroext %evl) {
1278; CHECK-LABEL: vand_vi_nxv16i32_unmasked:
1279; CHECK:       # %bb.0:
1280; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
1281; CHECK-NEXT:    vand.vi v8, v8, 4
1282; CHECK-NEXT:    ret
1283  %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 4), <vscale x 16 x i1> splat (i1 true), i32 %evl)
1284  ret <vscale x 16 x i32> %v
1285}
1286
1287declare <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
1288
1289define <vscale x 1 x i64> @vand_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1290; CHECK-LABEL: vand_vv_nxv1i64:
1291; CHECK:       # %bb.0:
1292; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1293; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
1294; CHECK-NEXT:    ret
1295  %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
1296  ret <vscale x 1 x i64> %v
1297}
1298
1299define <vscale x 1 x i64> @vand_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
1300; CHECK-LABEL: vand_vv_nxv1i64_unmasked:
1301; CHECK:       # %bb.0:
1302; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1303; CHECK-NEXT:    vand.vv v8, v8, v9
1304; CHECK-NEXT:    ret
1305  %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1306  ret <vscale x 1 x i64> %v
1307}
1308
1309define <vscale x 1 x i64> @vand_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1310; RV32-LABEL: vand_vx_nxv1i64:
1311; RV32:       # %bb.0:
1312; RV32-NEXT:    addi sp, sp, -16
1313; RV32-NEXT:    .cfi_def_cfa_offset 16
1314; RV32-NEXT:    sw a0, 8(sp)
1315; RV32-NEXT:    sw a1, 12(sp)
1316; RV32-NEXT:    addi a0, sp, 8
1317; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1318; RV32-NEXT:    vlse64.v v9, (a0), zero
1319; RV32-NEXT:    vand.vv v8, v8, v9, v0.t
1320; RV32-NEXT:    addi sp, sp, 16
1321; RV32-NEXT:    .cfi_def_cfa_offset 0
1322; RV32-NEXT:    ret
1323;
1324; RV64-LABEL: vand_vx_nxv1i64:
1325; RV64:       # %bb.0:
1326; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1327; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
1328; RV64-NEXT:    ret
1329  %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1330  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1331  %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
1332  ret <vscale x 1 x i64> %v
1333}
1334
1335define <vscale x 1 x i64> @vand_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
1336; RV32-LABEL: vand_vx_nxv1i64_unmasked:
1337; RV32:       # %bb.0:
1338; RV32-NEXT:    addi sp, sp, -16
1339; RV32-NEXT:    .cfi_def_cfa_offset 16
1340; RV32-NEXT:    sw a0, 8(sp)
1341; RV32-NEXT:    sw a1, 12(sp)
1342; RV32-NEXT:    addi a0, sp, 8
1343; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1344; RV32-NEXT:    vlse64.v v9, (a0), zero
1345; RV32-NEXT:    vand.vv v8, v8, v9
1346; RV32-NEXT:    addi sp, sp, 16
1347; RV32-NEXT:    .cfi_def_cfa_offset 0
1348; RV32-NEXT:    ret
1349;
1350; RV64-LABEL: vand_vx_nxv1i64_unmasked:
1351; RV64:       # %bb.0:
1352; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1353; RV64-NEXT:    vand.vx v8, v8, a0
1354; RV64-NEXT:    ret
1355  %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1356  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1357  %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1358  ret <vscale x 1 x i64> %v
1359}
1360
1361define <vscale x 1 x i64> @vand_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1362; CHECK-LABEL: vand_vi_nxv1i64:
1363; CHECK:       # %bb.0:
1364; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1365; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
1366; CHECK-NEXT:    ret
1367  %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 4), <vscale x 1 x i1> %m, i32 %evl)
1368  ret <vscale x 1 x i64> %v
1369}
1370
1371define <vscale x 1 x i64> @vand_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 zeroext %evl) {
1372; CHECK-LABEL: vand_vi_nxv1i64_unmasked:
1373; CHECK:       # %bb.0:
1374; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1375; CHECK-NEXT:    vand.vi v8, v8, 4
1376; CHECK-NEXT:    ret
1377  %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 4), <vscale x 1 x i1> splat (i1 true), i32 %evl)
1378  ret <vscale x 1 x i64> %v
1379}
1380
1381declare <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
1382
1383define <vscale x 2 x i64> @vand_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1384; CHECK-LABEL: vand_vv_nxv2i64:
1385; CHECK:       # %bb.0:
1386; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1387; CHECK-NEXT:    vand.vv v8, v8, v10, v0.t
1388; CHECK-NEXT:    ret
1389  %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
1390  ret <vscale x 2 x i64> %v
1391}
1392
1393define <vscale x 2 x i64> @vand_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
1394; CHECK-LABEL: vand_vv_nxv2i64_unmasked:
1395; CHECK:       # %bb.0:
1396; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1397; CHECK-NEXT:    vand.vv v8, v8, v10
1398; CHECK-NEXT:    ret
1399  %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1400  ret <vscale x 2 x i64> %v
1401}
1402
1403define <vscale x 2 x i64> @vand_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1404; RV32-LABEL: vand_vx_nxv2i64:
1405; RV32:       # %bb.0:
1406; RV32-NEXT:    addi sp, sp, -16
1407; RV32-NEXT:    .cfi_def_cfa_offset 16
1408; RV32-NEXT:    sw a0, 8(sp)
1409; RV32-NEXT:    sw a1, 12(sp)
1410; RV32-NEXT:    addi a0, sp, 8
1411; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
1412; RV32-NEXT:    vlse64.v v10, (a0), zero
1413; RV32-NEXT:    vand.vv v8, v8, v10, v0.t
1414; RV32-NEXT:    addi sp, sp, 16
1415; RV32-NEXT:    .cfi_def_cfa_offset 0
1416; RV32-NEXT:    ret
1417;
1418; RV64-LABEL: vand_vx_nxv2i64:
1419; RV64:       # %bb.0:
1420; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1421; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
1422; RV64-NEXT:    ret
1423  %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1424  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1425  %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
1426  ret <vscale x 2 x i64> %v
1427}
1428
1429define <vscale x 2 x i64> @vand_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
1430; RV32-LABEL: vand_vx_nxv2i64_unmasked:
1431; RV32:       # %bb.0:
1432; RV32-NEXT:    addi sp, sp, -16
1433; RV32-NEXT:    .cfi_def_cfa_offset 16
1434; RV32-NEXT:    sw a0, 8(sp)
1435; RV32-NEXT:    sw a1, 12(sp)
1436; RV32-NEXT:    addi a0, sp, 8
1437; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
1438; RV32-NEXT:    vlse64.v v10, (a0), zero
1439; RV32-NEXT:    vand.vv v8, v8, v10
1440; RV32-NEXT:    addi sp, sp, 16
1441; RV32-NEXT:    .cfi_def_cfa_offset 0
1442; RV32-NEXT:    ret
1443;
1444; RV64-LABEL: vand_vx_nxv2i64_unmasked:
1445; RV64:       # %bb.0:
1446; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1447; RV64-NEXT:    vand.vx v8, v8, a0
1448; RV64-NEXT:    ret
1449  %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1450  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1451  %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1452  ret <vscale x 2 x i64> %v
1453}
1454
1455define <vscale x 2 x i64> @vand_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1456; CHECK-LABEL: vand_vi_nxv2i64:
1457; CHECK:       # %bb.0:
1458; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1459; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
1460; CHECK-NEXT:    ret
1461  %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 4), <vscale x 2 x i1> %m, i32 %evl)
1462  ret <vscale x 2 x i64> %v
1463}
1464
1465define <vscale x 2 x i64> @vand_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
1466; CHECK-LABEL: vand_vi_nxv2i64_unmasked:
1467; CHECK:       # %bb.0:
1468; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1469; CHECK-NEXT:    vand.vi v8, v8, 4
1470; CHECK-NEXT:    ret
1471  %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 4), <vscale x 2 x i1> splat (i1 true), i32 %evl)
1472  ret <vscale x 2 x i64> %v
1473}
1474
1475declare <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1476
1477define <vscale x 4 x i64> @vand_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1478; CHECK-LABEL: vand_vv_nxv4i64:
1479; CHECK:       # %bb.0:
1480; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1481; CHECK-NEXT:    vand.vv v8, v8, v12, v0.t
1482; CHECK-NEXT:    ret
1483  %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
1484  ret <vscale x 4 x i64> %v
1485}
1486
1487define <vscale x 4 x i64> @vand_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
1488; CHECK-LABEL: vand_vv_nxv4i64_unmasked:
1489; CHECK:       # %bb.0:
1490; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1491; CHECK-NEXT:    vand.vv v8, v8, v12
1492; CHECK-NEXT:    ret
1493  %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1494  ret <vscale x 4 x i64> %v
1495}
1496
1497define <vscale x 4 x i64> @vand_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1498; RV32-LABEL: vand_vx_nxv4i64:
1499; RV32:       # %bb.0:
1500; RV32-NEXT:    addi sp, sp, -16
1501; RV32-NEXT:    .cfi_def_cfa_offset 16
1502; RV32-NEXT:    sw a0, 8(sp)
1503; RV32-NEXT:    sw a1, 12(sp)
1504; RV32-NEXT:    addi a0, sp, 8
1505; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1506; RV32-NEXT:    vlse64.v v12, (a0), zero
1507; RV32-NEXT:    vand.vv v8, v8, v12, v0.t
1508; RV32-NEXT:    addi sp, sp, 16
1509; RV32-NEXT:    .cfi_def_cfa_offset 0
1510; RV32-NEXT:    ret
1511;
1512; RV64-LABEL: vand_vx_nxv4i64:
1513; RV64:       # %bb.0:
1514; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1515; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
1516; RV64-NEXT:    ret
1517  %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1518  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1519  %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
1520  ret <vscale x 4 x i64> %v
1521}
1522
1523define <vscale x 4 x i64> @vand_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
1524; RV32-LABEL: vand_vx_nxv4i64_unmasked:
1525; RV32:       # %bb.0:
1526; RV32-NEXT:    addi sp, sp, -16
1527; RV32-NEXT:    .cfi_def_cfa_offset 16
1528; RV32-NEXT:    sw a0, 8(sp)
1529; RV32-NEXT:    sw a1, 12(sp)
1530; RV32-NEXT:    addi a0, sp, 8
1531; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1532; RV32-NEXT:    vlse64.v v12, (a0), zero
1533; RV32-NEXT:    vand.vv v8, v8, v12
1534; RV32-NEXT:    addi sp, sp, 16
1535; RV32-NEXT:    .cfi_def_cfa_offset 0
1536; RV32-NEXT:    ret
1537;
1538; RV64-LABEL: vand_vx_nxv4i64_unmasked:
1539; RV64:       # %bb.0:
1540; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1541; RV64-NEXT:    vand.vx v8, v8, a0
1542; RV64-NEXT:    ret
1543  %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1544  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1545  %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1546  ret <vscale x 4 x i64> %v
1547}
1548
1549define <vscale x 4 x i64> @vand_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1550; CHECK-LABEL: vand_vi_nxv4i64:
1551; CHECK:       # %bb.0:
1552; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1553; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
1554; CHECK-NEXT:    ret
1555  %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 4), <vscale x 4 x i1> %m, i32 %evl)
1556  ret <vscale x 4 x i64> %v
1557}
1558
1559define <vscale x 4 x i64> @vand_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 zeroext %evl) {
1560; CHECK-LABEL: vand_vi_nxv4i64_unmasked:
1561; CHECK:       # %bb.0:
1562; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1563; CHECK-NEXT:    vand.vi v8, v8, 4
1564; CHECK-NEXT:    ret
1565  %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 4), <vscale x 4 x i1> splat (i1 true), i32 %evl)
1566  ret <vscale x 4 x i64> %v
1567}
1568
1569declare <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1570
1571define <vscale x 8 x i64> @vand_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1572; CHECK-LABEL: vand_vv_nxv8i64:
1573; CHECK:       # %bb.0:
1574; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1575; CHECK-NEXT:    vand.vv v8, v8, v16, v0.t
1576; CHECK-NEXT:    ret
1577  %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
1578  ret <vscale x 8 x i64> %v
1579}
1580
1581define <vscale x 8 x i64> @vand_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
1582; CHECK-LABEL: vand_vv_nxv8i64_unmasked:
1583; CHECK:       # %bb.0:
1584; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1585; CHECK-NEXT:    vand.vv v8, v8, v16
1586; CHECK-NEXT:    ret
1587  %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1588  ret <vscale x 8 x i64> %v
1589}
1590
1591define <vscale x 8 x i64> @vand_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1592; RV32-LABEL: vand_vx_nxv8i64:
1593; RV32:       # %bb.0:
1594; RV32-NEXT:    addi sp, sp, -16
1595; RV32-NEXT:    .cfi_def_cfa_offset 16
1596; RV32-NEXT:    sw a0, 8(sp)
1597; RV32-NEXT:    sw a1, 12(sp)
1598; RV32-NEXT:    addi a0, sp, 8
1599; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1600; RV32-NEXT:    vlse64.v v16, (a0), zero
1601; RV32-NEXT:    vand.vv v8, v8, v16, v0.t
1602; RV32-NEXT:    addi sp, sp, 16
1603; RV32-NEXT:    .cfi_def_cfa_offset 0
1604; RV32-NEXT:    ret
1605;
1606; RV64-LABEL: vand_vx_nxv8i64:
1607; RV64:       # %bb.0:
1608; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1609; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
1610; RV64-NEXT:    ret
1611  %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1612  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1613  %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1614  ret <vscale x 8 x i64> %v
1615}
1616
1617define <vscale x 8 x i64> @vand_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
1618; RV32-LABEL: vand_vx_nxv8i64_unmasked:
1619; RV32:       # %bb.0:
1620; RV32-NEXT:    addi sp, sp, -16
1621; RV32-NEXT:    .cfi_def_cfa_offset 16
1622; RV32-NEXT:    sw a0, 8(sp)
1623; RV32-NEXT:    sw a1, 12(sp)
1624; RV32-NEXT:    addi a0, sp, 8
1625; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1626; RV32-NEXT:    vlse64.v v16, (a0), zero
1627; RV32-NEXT:    vand.vv v8, v8, v16
1628; RV32-NEXT:    addi sp, sp, 16
1629; RV32-NEXT:    .cfi_def_cfa_offset 0
1630; RV32-NEXT:    ret
1631;
1632; RV64-LABEL: vand_vx_nxv8i64_unmasked:
1633; RV64:       # %bb.0:
1634; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1635; RV64-NEXT:    vand.vx v8, v8, a0
1636; RV64-NEXT:    ret
1637  %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1638  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1639  %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1640  ret <vscale x 8 x i64> %v
1641}
1642
1643define <vscale x 8 x i64> @vand_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1644; CHECK-LABEL: vand_vi_nxv8i64:
1645; CHECK:       # %bb.0:
1646; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1647; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
1648; CHECK-NEXT:    ret
1649  %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 4), <vscale x 8 x i1> %m, i32 %evl)
1650  ret <vscale x 8 x i64> %v
1651}
1652
1653define <vscale x 8 x i64> @vand_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1654; CHECK-LABEL: vand_vi_nxv8i64_unmasked:
1655; CHECK:       # %bb.0:
1656; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1657; CHECK-NEXT:    vand.vi v8, v8, 4
1658; CHECK-NEXT:    ret
1659  %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 4), <vscale x 8 x i1> splat (i1 true), i32 %evl)
1660  ret <vscale x 8 x i64> %v
1661}
1662