xref: /llvm-project/llvm/test/CodeGen/AArch64/sve-pred-log.ll (revision f7eb061a5f1ee8e357cb8a735f789719415eba66)
1; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
2
3define <vscale x 16 x i1> @vselect_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
4; CHECK-LABEL: vselect_16:
5; CHECK: sel p0.b, p0, p1.b, p2.b
6; CHECK-NEXT: ret
7  %res = select <vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd
8  ret <vscale x 16 x i1> %res;
9}
10
11define <vscale x 8 x i1> @vselect_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd) {
12; CHECK-LABEL: vselect_8:
13; CHECK: sel p0.b, p0, p1.b, p2.b
14; CHECK-NEXT: ret
15  %res = select <vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd
16  ret <vscale x 8 x i1> %res;
17}
18
19define <vscale x 4 x i1> @vselect_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd) {
20; CHECK-LABEL: vselect_4:
21; CHECK: sel p0.b, p0, p1.b, p2.b
22; CHECK-NEXT: ret
23  %res = select <vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd
24  ret <vscale x 4 x i1> %res;
25}
26
27define <vscale x 2 x i1> @vselect_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
28; CHECK-LABEL: vselect_2:
29; CHECK: sel p0.b, p0, p1.b, p2.b
30; CHECK-NEXT: ret
31  %res = select <vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd
32  ret <vscale x 2 x i1> %res;
33}
34
35define <vscale x 1 x i1> @vselect_1(<vscale x 1 x i1> %Pg, <vscale x 1 x i1> %Pn, <vscale x 1 x i1> %Pd) {
36; CHECK-LABEL: vselect_1:
37; CHECK: sel p0.b, p0, p1.b, p2.b
38; CHECK-NEXT: ret
39  %res = select <vscale x 1 x i1> %Pg, <vscale x 1 x i1> %Pn, <vscale x 1 x i1> %Pd
40  ret <vscale x 1 x i1> %res;
41}
42
43define <vscale x 16 x i1> @and_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
44; CHECK-LABEL: and_16:
45; CHECK: and p0.b, p0/z, p1.b, p2.b
46; CHECK-NEXT: ret
47  %res = call <vscale x 16 x i1> @llvm.aarch64.sve.and.z.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
48  ret <vscale x 16 x i1> %res;
49}
50
51define <vscale x 8 x i1> @and_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd) {
52; CHECK-LABEL: and_8:
53; CHECK: and p0.b, p0/z, p1.b, p2.b
54; CHECK-NEXT: ret
55  %res = call <vscale x 8 x i1> @llvm.aarch64.sve.and.z.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
56  ret <vscale x 8 x i1> %res;
57}
58
59define <vscale x 4 x i1> @and_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd) {
60; CHECK-LABEL: and_4:
61; CHECK: and p0.b, p0/z, p1.b, p2.b
62; CHECK-NEXT: ret
63  %res = call <vscale x 4 x i1> @llvm.aarch64.sve.and.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
64  ret <vscale x 4 x i1> %res;
65}
66
67define <vscale x 2 x i1> @and_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
68; CHECK-LABEL: and_2:
69; CHECK: and p0.b, p0/z, p1.b, p2.b
70; CHECK-NEXT: ret
71  %res = call <vscale x 2 x i1> @llvm.aarch64.sve.and.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
72  ret <vscale x 2 x i1> %res;
73}
74
75define <vscale x 16 x i1> @bic_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
76; CHECK-LABEL: bic_16:
77; CHECK: bic p0.b, p0/z, p1.b, p2.b
78; CHECK-NEXT: ret
79  %res = call <vscale x 16 x i1> @llvm.aarch64.sve.bic.z.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
80  ret <vscale x 16 x i1> %res;
81}
82
83define <vscale x 8 x i1> @bic_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd) {
84; CHECK-LABEL: bic_8:
85; CHECK: bic p0.b, p0/z, p1.b, p2.b
86; CHECK-NEXT: ret
87  %res = call <vscale x 8 x i1> @llvm.aarch64.sve.bic.z.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
88  ret <vscale x 8 x i1> %res;
89}
90
91define <vscale x 4 x i1> @bic_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd) {
92; CHECK-LABEL: bic_4:
93; CHECK: bic p0.b, p0/z, p1.b, p2.b
94; CHECK-NEXT: ret
95  %res = call <vscale x 4 x i1> @llvm.aarch64.sve.bic.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
96  ret <vscale x 4 x i1> %res;
97}
98
99define <vscale x 2 x i1> @bic_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
100; CHECK-LABEL: bic_2:
101; CHECK: bic p0.b, p0/z, p1.b, p2.b
102; CHECK-NEXT: ret
103  %res = call <vscale x 2 x i1> @llvm.aarch64.sve.bic.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
104  ret <vscale x 2 x i1> %res;
105}
106
107define <vscale x 16 x i1> @eor_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
108; CHECK-LABEL: eor_16:
109; CHECK: eor p0.b, p0/z, p1.b, p2.b
110; CHECK-NEXT: ret
111  %res = call <vscale x 16 x i1> @llvm.aarch64.sve.eor.z.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
112  ret <vscale x 16 x i1> %res;
113}
114
115define <vscale x 8 x i1> @eor_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd) {
116; CHECK-LABEL: eor_8:
117; CHECK: eor p0.b, p0/z, p1.b, p2.b
118; CHECK-NEXT: ret
119  %res = call <vscale x 8 x i1> @llvm.aarch64.sve.eor.z.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
120  ret <vscale x 8 x i1> %res;
121}
122
123define <vscale x 4 x i1> @eor_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd) {
124; CHECK-LABEL: eor_4:
125; CHECK: eor p0.b, p0/z, p1.b, p2.b
126; CHECK-NEXT: ret
127  %res = call <vscale x 4 x i1> @llvm.aarch64.sve.eor.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
128  ret <vscale x 4 x i1> %res;
129}
130
131define <vscale x 2 x i1> @eor_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
132; CHECK-LABEL: eor_2:
133; CHECK: eor p0.b, p0/z, p1.b, p2.b
134; CHECK-NEXT: ret
135  %res = call <vscale x 2 x i1> @llvm.aarch64.sve.eor.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
136  ret <vscale x 2 x i1> %res;
137}
138
139define <vscale x 16 x i1> @orr_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
140; CHECK-LABEL: orr_16:
141; CHECK: orr p0.b, p0/z, p1.b, p2.b
142; CHECK-NEXT: ret
143  %res = call <vscale x 16 x i1> @llvm.aarch64.sve.orr.z.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
144  ret <vscale x 16 x i1> %res;
145}
146
147define <vscale x 8 x i1> @orr_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd) {
148; CHECK-LABEL: orr_8:
149; CHECK: orr p0.b, p0/z, p1.b, p2.b
150; CHECK-NEXT: ret
151  %res = call <vscale x 8 x i1> @llvm.aarch64.sve.orr.z.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
152  ret <vscale x 8 x i1> %res;
153}
154
155define <vscale x 4 x i1> @orr_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd) {
156; CHECK-LABEL: orr_4:
157; CHECK: orr p0.b, p0/z, p1.b, p2.b
158; CHECK-NEXT: ret
159  %res = call <vscale x 4 x i1> @llvm.aarch64.sve.orr.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
160  ret <vscale x 4 x i1> %res;
161}
162
163define <vscale x 2 x i1> @orr_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
164; CHECK-LABEL: orr_2:
165; CHECK: orr p0.b, p0/z, p1.b, p2.b
166; CHECK-NEXT: ret
167  %res = call <vscale x 2 x i1> @llvm.aarch64.sve.orr.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
168  ret <vscale x 2 x i1> %res;
169}
170
171define <vscale x 16 x i1> @orn_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
172; CHECK-LABEL: orn_16:
173; CHECK: orn p0.b, p0/z, p1.b, p2.b
174; CHECK-NEXT: ret
175  %res = call <vscale x 16 x i1> @llvm.aarch64.sve.orn.z.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
176  ret <vscale x 16 x i1> %res;
177}
178
179define <vscale x 8 x i1> @orn_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd) {
180; CHECK-LABEL: orn_8:
181; CHECK: orn p0.b, p0/z, p1.b, p2.b
182; CHECK-NEXT: ret
183  %res = call <vscale x 8 x i1> @llvm.aarch64.sve.orn.z.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
184  ret <vscale x 8 x i1> %res;
185}
186
187define <vscale x 4 x i1> @orn_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd) {
188; CHECK-LABEL: orn_4:
189; CHECK: orn p0.b, p0/z, p1.b, p2.b
190; CHECK-NEXT: ret
191  %res = call <vscale x 4 x i1> @llvm.aarch64.sve.orn.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
192  ret <vscale x 4 x i1> %res;
193}
194
195define <vscale x 2 x i1> @orn_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
196; CHECK-LABEL: orn_2:
197; CHECK: orn p0.b, p0/z, p1.b, p2.b
198; CHECK-NEXT: ret
199  %res = call <vscale x 2 x i1> @llvm.aarch64.sve.orn.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
200  ret <vscale x 2 x i1> %res;
201}
202
203define <vscale x 16 x i1> @nor_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
204; CHECK-LABEL: nor_16:
205; CHECK: nor p0.b, p0/z, p1.b, p2.b
206; CHECK-NEXT: ret
207  %res = call <vscale x 16 x i1> @llvm.aarch64.sve.nor.z.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd)
208  ret <vscale x 16 x i1> %res;
209}
210
211define <vscale x 8 x i1> @nor_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd) {
212; CHECK-LABEL: nor_8:
213; CHECK: nor p0.b, p0/z, p1.b, p2.b
214; CHECK-NEXT: ret
215  %res = call <vscale x 8 x i1> @llvm.aarch64.sve.nor.z.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
216  ret <vscale x 8 x i1> %res;
217}
218
219define <vscale x 4 x i1> @nor_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd) {
220; CHECK-LABEL: nor_4:
221; CHECK: nor p0.b, p0/z, p1.b, p2.b
222; CHECK-NEXT: ret
223  %res = call <vscale x 4 x i1> @llvm.aarch64.sve.nor.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
224  ret <vscale x 4 x i1> %res;
225}
226
227define <vscale x 2 x i1> @nor_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
228; CHECK-LABEL: nor_2:
229; CHECK: nor p0.b, p0/z, p1.b, p2.b
230; CHECK-NEXT: ret
231  %res = call <vscale x 2 x i1> @llvm.aarch64.sve.nor.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
232  ret <vscale x 2 x i1> %res;
233}
234
235define <vscale x 16 x i1> @nand_16(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn, <vscale x 16 x i1> %Pd) {
236; CHECK-LABEL: nand_16:
237; CHECK: nand p0.b, p0/z, p1.b, p2.b
238; CHECK-NEXT: ret
239  %res = call <vscale x 16 x i1> @llvm.aarch64.sve.nand.z.nxv16i1(<vscale x 16 x i1> %Pg, <vscale x 16 x i1> %Pn,  <vscale x 16 x i1> %Pd)
240  ret <vscale x 16 x i1> %res;
241}
242
243define <vscale x 8 x i1> @nand_8(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd) {
244; CHECK-LABEL: nand_8:
245; CHECK: nand p0.b, p0/z, p1.b, p2.b
246; CHECK-NEXT: ret
247  %res = call <vscale x 8 x i1> @llvm.aarch64.sve.nand.z.nxv8i1(<vscale x 8 x i1> %Pg, <vscale x 8 x i1> %Pn, <vscale x 8 x i1> %Pd)
248  ret <vscale x 8 x i1> %res;
249}
250
251define <vscale x 4 x i1> @nand_4(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd) {
252; CHECK-LABEL: nand_4:
253; CHECK: nand p0.b, p0/z, p1.b, p2.b
254; CHECK-NEXT: ret
255  %res = call <vscale x 4 x i1> @llvm.aarch64.sve.nand.z.nxv4i1(<vscale x 4 x i1> %Pg, <vscale x 4 x i1> %Pn, <vscale x 4 x i1> %Pd)
256  ret <vscale x 4 x i1> %res;
257}
258
259define <vscale x 2 x i1> @nand_2(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd) {
260; CHECK-LABEL: nand_2:
261; CHECK: nand p0.b, p0/z, p1.b, p2.b
262; CHECK-NEXT: ret
263  %res = call <vscale x 2 x i1> @llvm.aarch64.sve.nand.z.nxv2i1(<vscale x 2 x i1> %Pg, <vscale x 2 x i1> %Pn, <vscale x 2 x i1> %Pd)
264  ret <vscale x 2 x i1> %res;
265}
266
267declare <vscale x 16 x i1> @llvm.aarch64.sve.and.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
268declare <vscale x 8 x i1> @llvm.aarch64.sve.and.z.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
269declare <vscale x 4 x i1> @llvm.aarch64.sve.and.z.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
270declare <vscale x 2 x i1> @llvm.aarch64.sve.and.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
271declare <vscale x 16 x i1> @llvm.aarch64.sve.bic.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
272declare <vscale x 8 x i1> @llvm.aarch64.sve.bic.z.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
273declare <vscale x 4 x i1> @llvm.aarch64.sve.bic.z.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
274declare <vscale x 2 x i1> @llvm.aarch64.sve.bic.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
275declare <vscale x 16 x i1> @llvm.aarch64.sve.eor.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
276declare <vscale x 8 x i1> @llvm.aarch64.sve.eor.z.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
277declare <vscale x 4 x i1> @llvm.aarch64.sve.eor.z.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
278declare <vscale x 2 x i1> @llvm.aarch64.sve.eor.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
279declare <vscale x 16 x i1> @llvm.aarch64.sve.orr.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
280declare <vscale x 8 x i1> @llvm.aarch64.sve.orr.z.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
281declare <vscale x 4 x i1> @llvm.aarch64.sve.orr.z.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
282declare <vscale x 2 x i1> @llvm.aarch64.sve.orr.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
283declare <vscale x 16 x i1> @llvm.aarch64.sve.orn.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
284declare <vscale x 8 x i1> @llvm.aarch64.sve.orn.z.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
285declare <vscale x 4 x i1> @llvm.aarch64.sve.orn.z.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
286declare <vscale x 2 x i1> @llvm.aarch64.sve.orn.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
287declare <vscale x 16 x i1> @llvm.aarch64.sve.nor.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
288declare <vscale x 8 x i1> @llvm.aarch64.sve.nor.z.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
289declare <vscale x 4 x i1> @llvm.aarch64.sve.nor.z.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
290declare <vscale x 2 x i1> @llvm.aarch64.sve.nor.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
291declare <vscale x 16 x i1> @llvm.aarch64.sve.nand.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
292declare <vscale x 8 x i1> @llvm.aarch64.sve.nand.z.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>)
293declare <vscale x 4 x i1> @llvm.aarch64.sve.nand.z.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>)
294declare <vscale x 2 x i1> @llvm.aarch64.sve.nand.z.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>)
295